kernel: Make thread resource pools into sys_heaps

The k_mem_pool allocator is no more, and the z_mem_pool compatibility
API is going away.  The internal allocator should be a k_heap always.

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
Andy Ross 2020-10-02 08:22:03 -07:00 committed by Anas Nashif
parent 9413922625
commit c770cab1a3
13 changed files with 36 additions and 38 deletions

View file

@ -405,7 +405,7 @@ struct k_thread {
void *switch_handle; void *switch_handle;
#endif #endif
/** resource pool */ /** resource pool */
struct k_mem_pool *resource_pool; struct k_heap *resource_pool;
#if defined(CONFIG_THREAD_LOCAL_STORAGE) #if defined(CONFIG_THREAD_LOCAL_STORAGE)
/* Pointer to arch-specific TLS area */ /* Pointer to arch-specific TLS area */
@ -652,13 +652,19 @@ extern FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry,
* previous pool. * previous pool.
* *
* @param thread Target thread to assign a memory pool for resource requests. * @param thread Target thread to assign a memory pool for resource requests.
* @param pool Memory pool to use for resources, * @param heap Heap object to use for resources,
* or NULL if the thread should no longer have a memory pool. * or NULL if the thread should no longer have a memory pool.
*/ */
static inline void k_thread_resource_pool_assign(struct k_thread *thread, static inline void k_thread_heap_assign(struct k_thread *thread,
struct k_heap *heap)
{
thread->resource_pool = heap;
}
static inline void z_thread_resource_pool_assign(struct k_thread *thread,
struct k_mem_pool *pool) struct k_mem_pool *pool)
{ {
thread->resource_pool = pool; k_thread_heap_assign(thread, pool ? pool->heap : NULL);
} }
#if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO) #if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO)
@ -690,7 +696,7 @@ __syscall int k_thread_stack_space_get(const struct k_thread *thread,
/** /**
* @brief Assign the system heap as a thread's resource pool * @brief Assign the system heap as a thread's resource pool
* *
* Similar to k_thread_resource_pool_assign(), but the thread will use * Similar to z_thread_resource_pool_assign(), but the thread will use
* the kernel heap to draw memory. * the kernel heap to draw memory.
* *
* Use with caution, as a malicious thread could perform DoS attacks on the * Use with caution, as a malicious thread could perform DoS attacks on the

View file

@ -54,20 +54,12 @@ void k_free(void *ptr)
#if (CONFIG_HEAP_MEM_POOL_SIZE > 0) #if (CONFIG_HEAP_MEM_POOL_SIZE > 0)
/* K_HEAP_DEFINE(_system_heap, CONFIG_HEAP_MEM_POOL_SIZE);
* Heap is defined using HEAP_MEM_POOL_SIZE configuration option. #define _SYSTEM_HEAP (&_system_heap)
*
* This module defines the heap memory pool and the _HEAP_MEM_POOL symbol
* that has the address of the associated memory pool struct.
*/
Z_MEM_POOL_DEFINE(_heap_mem_pool, CONFIG_HEAP_MEM_POOL_MIN_SIZE,
CONFIG_HEAP_MEM_POOL_SIZE, 1, 4);
#define _HEAP_MEM_POOL (&_heap_mem_pool)
void *k_malloc(size_t size) void *k_malloc(size_t size)
{ {
return z_mem_pool_malloc(_HEAP_MEM_POOL, size); return z_heap_malloc(_SYSTEM_HEAP, size);
} }
void *k_calloc(size_t nmemb, size_t size) void *k_calloc(size_t nmemb, size_t size)
@ -88,25 +80,25 @@ void *k_calloc(size_t nmemb, size_t size)
void k_thread_system_pool_assign(struct k_thread *thread) void k_thread_system_pool_assign(struct k_thread *thread)
{ {
thread->resource_pool = _HEAP_MEM_POOL; thread->resource_pool = _SYSTEM_HEAP;
} }
#else #else
#define _HEAP_MEM_POOL NULL #define _SYSTEM_HEAP NULL
#endif #endif
void *z_thread_malloc(size_t size) void *z_thread_malloc(size_t size)
{ {
void *ret; void *ret;
struct k_mem_pool *pool; struct k_heap *heap;
if (k_is_in_isr()) { if (k_is_in_isr()) {
pool = _HEAP_MEM_POOL; heap = _SYSTEM_HEAP;
} else { } else {
pool = _current->resource_pool; heap = _current->resource_pool;
} }
if (pool) { if (heap) {
ret = z_mem_pool_malloc(pool, size); ret = z_heap_malloc(heap, size);
} else { } else {
ret = NULL; ret = NULL;
} }

View file

@ -534,7 +534,7 @@ void main(void)
k_mem_domain_init(&app_domain, ARRAY_SIZE(parts), parts); k_mem_domain_init(&app_domain, ARRAY_SIZE(parts), parts);
k_mem_domain_add_thread(&app_domain, app_thread); k_mem_domain_add_thread(&app_domain, app_thread);
k_thread_resource_pool_assign(app_thread, &app_mem_pool); z_thread_resource_pool_assign(app_thread, &app_mem_pool);
k_thread_start(app_thread); k_thread_start(app_thread);
k_thread_join(app_thread, K_FOREVER); k_thread_join(app_thread, K_FOREVER);

View file

@ -213,7 +213,7 @@ void app_a_entry(void *p1, void *p2, void *p3)
/* Assign a resource pool to serve for kernel-side allocations on /* Assign a resource pool to serve for kernel-side allocations on
* behalf of application A. Needed for k_queue_alloc_append(). * behalf of application A. Needed for k_queue_alloc_append().
*/ */
k_thread_resource_pool_assign(k_current_get(), &app_a_resource_pool); z_thread_resource_pool_assign(k_current_get(), &app_a_resource_pool);
/* Set the callback function for the sample driver. This has to be /* Set the callback function for the sample driver. This has to be
* done from supervisor mode, as this code will run in supervisor * done from supervisor mode, as this code will run in supervisor

View file

@ -86,7 +86,7 @@ void app_b_entry(void *p1, void *p2, void *p3)
/* Assign a resource pool to serve for kernel-side allocations on /* Assign a resource pool to serve for kernel-side allocations on
* behalf of application A. Needed for k_queue_alloc_append(). * behalf of application A. Needed for k_queue_alloc_append().
*/ */
k_thread_resource_pool_assign(k_current_get(), &app_b_resource_pool); z_thread_resource_pool_assign(k_current_get(), &app_b_resource_pool);
/* We are about to drop to user mode and become the monitor thread. /* We are about to drop to user mode and become the monitor thread.
* Grant ourselves access to the kernel objects we need for * Grant ourselves access to the kernel objects we need for

View file

@ -167,7 +167,7 @@ void parent_handler(void *p1, void *p2, void *p3)
* *
* @ingroup kernel_memprotect_tests * @ingroup kernel_memprotect_tests
* *
* @see k_thread_resource_pool_assign() * @see z_thread_resource_pool_assign()
*/ */
void test_inherit_resource_pool(void) void test_inherit_resource_pool(void)
{ {
@ -177,7 +177,7 @@ void test_inherit_resource_pool(void)
parent_handler, parent_handler,
NULL, NULL, NULL, NULL, NULL, NULL,
PRIORITY, 0, K_NO_WAIT); PRIORITY, 0, K_NO_WAIT);
k_thread_resource_pool_assign(&parent_thr, &res_pool); z_thread_resource_pool_assign(&parent_thr, &res_pool);
k_sem_take(&sync_sem, K_FOREVER); k_sem_take(&sync_sem, K_FOREVER);
zassert_true(parent_res_pool_ptr == child_res_pool_ptr, zassert_true(parent_res_pool_ptr == child_res_pool_ptr,
"Resource pool of the parent thread not inherited," "Resource pool of the parent thread not inherited,"

View file

@ -420,7 +420,7 @@ void test_main(void)
{ {
sprintf(kernel_string, "this is a kernel string"); sprintf(kernel_string, "this is a kernel string");
sprintf(user_string, "this is a user string"); sprintf(user_string, "this is a user string");
k_thread_resource_pool_assign(k_current_get(), &test_pool); z_thread_resource_pool_assign(k_current_get(), &test_pool);
ztest_test_suite(syscalls, ztest_test_suite(syscalls,
ztest_unit_test(test_string_nlen), ztest_unit_test(test_string_nlen),

View file

@ -64,7 +64,7 @@ void test_main(void)
k_thread_access_grant(k_current_get(), &kmsgq, &msgq, &end_sema, k_thread_access_grant(k_current_get(), &kmsgq, &msgq, &end_sema,
&tdata, &tstack); &tdata, &tstack);
k_thread_resource_pool_assign(k_current_get(), &test_pool); z_thread_resource_pool_assign(k_current_get(), &test_pool);
ztest_test_suite(msgq_api, ztest_test_suite(msgq_api,
ztest_1cpu_unit_test(test_msgq_thread), ztest_1cpu_unit_test(test_msgq_thread),

View file

@ -65,7 +65,7 @@ void test_main(void)
&kpipe, &end_sema, &tdata, &tstack, &kpipe, &end_sema, &tdata, &tstack,
&khalfpipe, &put_get_pipe); &khalfpipe, &put_get_pipe);
k_thread_resource_pool_assign(k_current_get(), &test_pool); z_thread_resource_pool_assign(k_current_get(), &test_pool);
ztest_test_suite(pipe_api, ztest_test_suite(pipe_api,
ztest_1cpu_unit_test(test_pipe_thread2thread), ztest_1cpu_unit_test(test_pipe_thread2thread),

View file

@ -27,7 +27,7 @@ void test_main(void)
{ {
test_poll_grant_access(); test_poll_grant_access();
k_thread_resource_pool_assign(k_current_get(), &test_pool); z_thread_resource_pool_assign(k_current_get(), &test_pool);
ztest_test_suite(poll_api, ztest_test_suite(poll_api,
ztest_1cpu_user_unit_test(test_poll_no_wait), ztest_1cpu_user_unit_test(test_poll_no_wait),

View file

@ -30,7 +30,7 @@ Z_MEM_POOL_DEFINE(test_pool, 16, MAX_SZ, 4, 4);
/*test case main entry*/ /*test case main entry*/
void test_main(void) void test_main(void)
{ {
k_thread_resource_pool_assign(k_current_get(), &test_pool); z_thread_resource_pool_assign(k_current_get(), &test_pool);
ztest_test_suite(queue_api, ztest_test_suite(queue_api,
ztest_1cpu_unit_test(test_queue_supv_to_user), ztest_1cpu_unit_test(test_queue_supv_to_user),

View file

@ -260,7 +260,7 @@ void test_queue_get_2threads(void)
static void tqueue_alloc(struct k_queue *pqueue) static void tqueue_alloc(struct k_queue *pqueue)
{ {
k_thread_resource_pool_assign(k_current_get(), NULL); z_thread_resource_pool_assign(k_current_get(), NULL);
/* Alloc append without resource pool */ /* Alloc append without resource pool */
k_queue_alloc_append(pqueue, (void *)&data_append); k_queue_alloc_append(pqueue, (void *)&data_append);
@ -269,7 +269,7 @@ static void tqueue_alloc(struct k_queue *pqueue)
zassert_false(k_queue_remove(pqueue, &data_append), NULL); zassert_false(k_queue_remove(pqueue, &data_append), NULL);
/* Assign resource pool of lower size */ /* Assign resource pool of lower size */
k_thread_resource_pool_assign(k_current_get(), &mem_pool_fail); z_thread_resource_pool_assign(k_current_get(), &mem_pool_fail);
/* Prepend to the queue, but fails because of /* Prepend to the queue, but fails because of
* insufficient memory * insufficient memory
@ -284,7 +284,7 @@ static void tqueue_alloc(struct k_queue *pqueue)
zassert_true(k_queue_is_empty(pqueue), NULL); zassert_true(k_queue_is_empty(pqueue), NULL);
/* Assign resource pool of sufficient size */ /* Assign resource pool of sufficient size */
k_thread_resource_pool_assign(k_current_get(), z_thread_resource_pool_assign(k_current_get(),
&mem_pool_pass); &mem_pool_pass);
zassert_false(k_queue_alloc_prepend(pqueue, (void *)&data_prepend), zassert_false(k_queue_alloc_prepend(pqueue, (void *)&data_prepend),
@ -301,7 +301,7 @@ static void tqueue_alloc(struct k_queue *pqueue)
* @brief Test queue alloc append and prepend * @brief Test queue alloc append and prepend
* @ingroup kernel_queue_tests * @ingroup kernel_queue_tests
* @see k_queue_alloc_append(), k_queue_alloc_prepend(), * @see k_queue_alloc_append(), k_queue_alloc_prepend(),
* k_thread_resource_pool_assign(), k_queue_is_empty(), * z_thread_resource_pool_assign(), k_queue_is_empty(),
* k_queue_get(), k_queue_remove() * k_queue_get(), k_queue_remove()
*/ */
void test_queue_alloc(void) void test_queue_alloc(void)

View file

@ -335,7 +335,7 @@ void test_main(void)
&end_sema, &threadstack, &kstack, &stack, &thread_data1, &end_sema, &threadstack, &kstack, &stack, &thread_data1,
&end_sema1, &threadstack1); &end_sema1, &threadstack1);
k_thread_resource_pool_assign(k_current_get(), &test_pool); z_thread_resource_pool_assign(k_current_get(), &test_pool);
ztest_test_suite(test_stack_usage, ztest_test_suite(test_stack_usage,
ztest_unit_test(test_stack_thread2thread), ztest_unit_test(test_stack_thread2thread),