kernel: Make thread resource pools into sys_heaps
The k_mem_pool allocator is no more, and the z_mem_pool compatibility API is going away. The internal allocator should be a k_heap always. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
parent
9413922625
commit
c770cab1a3
|
@ -405,7 +405,7 @@ struct k_thread {
|
|||
void *switch_handle;
|
||||
#endif
|
||||
/** resource pool */
|
||||
struct k_mem_pool *resource_pool;
|
||||
struct k_heap *resource_pool;
|
||||
|
||||
#if defined(CONFIG_THREAD_LOCAL_STORAGE)
|
||||
/* Pointer to arch-specific TLS area */
|
||||
|
@ -652,13 +652,19 @@ extern FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry,
|
|||
* previous pool.
|
||||
*
|
||||
* @param thread Target thread to assign a memory pool for resource requests.
|
||||
* @param pool Memory pool to use for resources,
|
||||
* @param heap Heap object to use for resources,
|
||||
* or NULL if the thread should no longer have a memory pool.
|
||||
*/
|
||||
static inline void k_thread_resource_pool_assign(struct k_thread *thread,
|
||||
static inline void k_thread_heap_assign(struct k_thread *thread,
|
||||
struct k_heap *heap)
|
||||
{
|
||||
thread->resource_pool = heap;
|
||||
}
|
||||
|
||||
static inline void z_thread_resource_pool_assign(struct k_thread *thread,
|
||||
struct k_mem_pool *pool)
|
||||
{
|
||||
thread->resource_pool = pool;
|
||||
k_thread_heap_assign(thread, pool ? pool->heap : NULL);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO)
|
||||
|
@ -690,7 +696,7 @@ __syscall int k_thread_stack_space_get(const struct k_thread *thread,
|
|||
/**
|
||||
* @brief Assign the system heap as a thread's resource pool
|
||||
*
|
||||
* Similar to k_thread_resource_pool_assign(), but the thread will use
|
||||
* Similar to z_thread_resource_pool_assign(), but the thread will use
|
||||
* the kernel heap to draw memory.
|
||||
*
|
||||
* Use with caution, as a malicious thread could perform DoS attacks on the
|
||||
|
|
|
@ -54,20 +54,12 @@ void k_free(void *ptr)
|
|||
|
||||
#if (CONFIG_HEAP_MEM_POOL_SIZE > 0)
|
||||
|
||||
/*
|
||||
* Heap is defined using HEAP_MEM_POOL_SIZE configuration option.
|
||||
*
|
||||
* This module defines the heap memory pool and the _HEAP_MEM_POOL symbol
|
||||
* that has the address of the associated memory pool struct.
|
||||
*/
|
||||
|
||||
Z_MEM_POOL_DEFINE(_heap_mem_pool, CONFIG_HEAP_MEM_POOL_MIN_SIZE,
|
||||
CONFIG_HEAP_MEM_POOL_SIZE, 1, 4);
|
||||
#define _HEAP_MEM_POOL (&_heap_mem_pool)
|
||||
K_HEAP_DEFINE(_system_heap, CONFIG_HEAP_MEM_POOL_SIZE);
|
||||
#define _SYSTEM_HEAP (&_system_heap)
|
||||
|
||||
void *k_malloc(size_t size)
|
||||
{
|
||||
return z_mem_pool_malloc(_HEAP_MEM_POOL, size);
|
||||
return z_heap_malloc(_SYSTEM_HEAP, size);
|
||||
}
|
||||
|
||||
void *k_calloc(size_t nmemb, size_t size)
|
||||
|
@ -88,25 +80,25 @@ void *k_calloc(size_t nmemb, size_t size)
|
|||
|
||||
void k_thread_system_pool_assign(struct k_thread *thread)
|
||||
{
|
||||
thread->resource_pool = _HEAP_MEM_POOL;
|
||||
thread->resource_pool = _SYSTEM_HEAP;
|
||||
}
|
||||
#else
|
||||
#define _HEAP_MEM_POOL NULL
|
||||
#define _SYSTEM_HEAP NULL
|
||||
#endif
|
||||
|
||||
void *z_thread_malloc(size_t size)
|
||||
{
|
||||
void *ret;
|
||||
struct k_mem_pool *pool;
|
||||
struct k_heap *heap;
|
||||
|
||||
if (k_is_in_isr()) {
|
||||
pool = _HEAP_MEM_POOL;
|
||||
heap = _SYSTEM_HEAP;
|
||||
} else {
|
||||
pool = _current->resource_pool;
|
||||
heap = _current->resource_pool;
|
||||
}
|
||||
|
||||
if (pool) {
|
||||
ret = z_mem_pool_malloc(pool, size);
|
||||
if (heap) {
|
||||
ret = z_heap_malloc(heap, size);
|
||||
} else {
|
||||
ret = NULL;
|
||||
}
|
||||
|
|
|
@ -534,7 +534,7 @@ void main(void)
|
|||
|
||||
k_mem_domain_init(&app_domain, ARRAY_SIZE(parts), parts);
|
||||
k_mem_domain_add_thread(&app_domain, app_thread);
|
||||
k_thread_resource_pool_assign(app_thread, &app_mem_pool);
|
||||
z_thread_resource_pool_assign(app_thread, &app_mem_pool);
|
||||
|
||||
k_thread_start(app_thread);
|
||||
k_thread_join(app_thread, K_FOREVER);
|
||||
|
|
|
@ -213,7 +213,7 @@ void app_a_entry(void *p1, void *p2, void *p3)
|
|||
/* Assign a resource pool to serve for kernel-side allocations on
|
||||
* behalf of application A. Needed for k_queue_alloc_append().
|
||||
*/
|
||||
k_thread_resource_pool_assign(k_current_get(), &app_a_resource_pool);
|
||||
z_thread_resource_pool_assign(k_current_get(), &app_a_resource_pool);
|
||||
|
||||
/* Set the callback function for the sample driver. This has to be
|
||||
* done from supervisor mode, as this code will run in supervisor
|
||||
|
|
|
@ -86,7 +86,7 @@ void app_b_entry(void *p1, void *p2, void *p3)
|
|||
/* Assign a resource pool to serve for kernel-side allocations on
|
||||
* behalf of application A. Needed for k_queue_alloc_append().
|
||||
*/
|
||||
k_thread_resource_pool_assign(k_current_get(), &app_b_resource_pool);
|
||||
z_thread_resource_pool_assign(k_current_get(), &app_b_resource_pool);
|
||||
|
||||
/* We are about to drop to user mode and become the monitor thread.
|
||||
* Grant ourselves access to the kernel objects we need for
|
||||
|
|
|
@ -167,7 +167,7 @@ void parent_handler(void *p1, void *p2, void *p3)
|
|||
*
|
||||
* @ingroup kernel_memprotect_tests
|
||||
*
|
||||
* @see k_thread_resource_pool_assign()
|
||||
* @see z_thread_resource_pool_assign()
|
||||
*/
|
||||
void test_inherit_resource_pool(void)
|
||||
{
|
||||
|
@ -177,7 +177,7 @@ void test_inherit_resource_pool(void)
|
|||
parent_handler,
|
||||
NULL, NULL, NULL,
|
||||
PRIORITY, 0, K_NO_WAIT);
|
||||
k_thread_resource_pool_assign(&parent_thr, &res_pool);
|
||||
z_thread_resource_pool_assign(&parent_thr, &res_pool);
|
||||
k_sem_take(&sync_sem, K_FOREVER);
|
||||
zassert_true(parent_res_pool_ptr == child_res_pool_ptr,
|
||||
"Resource pool of the parent thread not inherited,"
|
||||
|
|
|
@ -420,7 +420,7 @@ void test_main(void)
|
|||
{
|
||||
sprintf(kernel_string, "this is a kernel string");
|
||||
sprintf(user_string, "this is a user string");
|
||||
k_thread_resource_pool_assign(k_current_get(), &test_pool);
|
||||
z_thread_resource_pool_assign(k_current_get(), &test_pool);
|
||||
|
||||
ztest_test_suite(syscalls,
|
||||
ztest_unit_test(test_string_nlen),
|
||||
|
|
|
@ -64,7 +64,7 @@ void test_main(void)
|
|||
k_thread_access_grant(k_current_get(), &kmsgq, &msgq, &end_sema,
|
||||
&tdata, &tstack);
|
||||
|
||||
k_thread_resource_pool_assign(k_current_get(), &test_pool);
|
||||
z_thread_resource_pool_assign(k_current_get(), &test_pool);
|
||||
|
||||
ztest_test_suite(msgq_api,
|
||||
ztest_1cpu_unit_test(test_msgq_thread),
|
||||
|
|
|
@ -65,7 +65,7 @@ void test_main(void)
|
|||
&kpipe, &end_sema, &tdata, &tstack,
|
||||
&khalfpipe, &put_get_pipe);
|
||||
|
||||
k_thread_resource_pool_assign(k_current_get(), &test_pool);
|
||||
z_thread_resource_pool_assign(k_current_get(), &test_pool);
|
||||
|
||||
ztest_test_suite(pipe_api,
|
||||
ztest_1cpu_unit_test(test_pipe_thread2thread),
|
||||
|
|
|
@ -27,7 +27,7 @@ void test_main(void)
|
|||
{
|
||||
test_poll_grant_access();
|
||||
|
||||
k_thread_resource_pool_assign(k_current_get(), &test_pool);
|
||||
z_thread_resource_pool_assign(k_current_get(), &test_pool);
|
||||
|
||||
ztest_test_suite(poll_api,
|
||||
ztest_1cpu_user_unit_test(test_poll_no_wait),
|
||||
|
|
|
@ -30,7 +30,7 @@ Z_MEM_POOL_DEFINE(test_pool, 16, MAX_SZ, 4, 4);
|
|||
/*test case main entry*/
|
||||
void test_main(void)
|
||||
{
|
||||
k_thread_resource_pool_assign(k_current_get(), &test_pool);
|
||||
z_thread_resource_pool_assign(k_current_get(), &test_pool);
|
||||
|
||||
ztest_test_suite(queue_api,
|
||||
ztest_1cpu_unit_test(test_queue_supv_to_user),
|
||||
|
|
|
@ -260,7 +260,7 @@ void test_queue_get_2threads(void)
|
|||
|
||||
static void tqueue_alloc(struct k_queue *pqueue)
|
||||
{
|
||||
k_thread_resource_pool_assign(k_current_get(), NULL);
|
||||
z_thread_resource_pool_assign(k_current_get(), NULL);
|
||||
|
||||
/* Alloc append without resource pool */
|
||||
k_queue_alloc_append(pqueue, (void *)&data_append);
|
||||
|
@ -269,7 +269,7 @@ static void tqueue_alloc(struct k_queue *pqueue)
|
|||
zassert_false(k_queue_remove(pqueue, &data_append), NULL);
|
||||
|
||||
/* Assign resource pool of lower size */
|
||||
k_thread_resource_pool_assign(k_current_get(), &mem_pool_fail);
|
||||
z_thread_resource_pool_assign(k_current_get(), &mem_pool_fail);
|
||||
|
||||
/* Prepend to the queue, but fails because of
|
||||
* insufficient memory
|
||||
|
@ -284,7 +284,7 @@ static void tqueue_alloc(struct k_queue *pqueue)
|
|||
zassert_true(k_queue_is_empty(pqueue), NULL);
|
||||
|
||||
/* Assign resource pool of sufficient size */
|
||||
k_thread_resource_pool_assign(k_current_get(),
|
||||
z_thread_resource_pool_assign(k_current_get(),
|
||||
&mem_pool_pass);
|
||||
|
||||
zassert_false(k_queue_alloc_prepend(pqueue, (void *)&data_prepend),
|
||||
|
@ -301,7 +301,7 @@ static void tqueue_alloc(struct k_queue *pqueue)
|
|||
* @brief Test queue alloc append and prepend
|
||||
* @ingroup kernel_queue_tests
|
||||
* @see k_queue_alloc_append(), k_queue_alloc_prepend(),
|
||||
* k_thread_resource_pool_assign(), k_queue_is_empty(),
|
||||
* z_thread_resource_pool_assign(), k_queue_is_empty(),
|
||||
* k_queue_get(), k_queue_remove()
|
||||
*/
|
||||
void test_queue_alloc(void)
|
||||
|
|
|
@ -335,7 +335,7 @@ void test_main(void)
|
|||
&end_sema, &threadstack, &kstack, &stack, &thread_data1,
|
||||
&end_sema1, &threadstack1);
|
||||
|
||||
k_thread_resource_pool_assign(k_current_get(), &test_pool);
|
||||
z_thread_resource_pool_assign(k_current_get(), &test_pool);
|
||||
|
||||
ztest_test_suite(test_stack_usage,
|
||||
ztest_unit_test(test_stack_thread2thread),
|
||||
|
|
Loading…
Reference in a new issue