zephyr/kernel/mempool.c
Andrew Boie aa6de29c4b lib: user mode compatible mempools
We would like to offer the capability to have memory pool heap data
structures that are usable from user mode threads. The current
k_mem_pool implementation uses IRQ locking and system-wide membership
lists that make it incompatible with user mode constraints.

However, much of the existing memory pool code can be abstracted to some
common functions that are used by both k_mem_pool and the new
sys_mem_pool implementations.

The sys_mem_pool implementation has the following differences:

* The alloc/free APIs work directly with pointers, no internal memory
block structures are exposed to the end user. A pointer to the source
pool is provided for allocation, but freeing memory just requires the
pointer and nothing else.

* k_mem_pool uses IRQ locks and required very fine-grained locking in
order to not affect system latency. sys_mem_pools just use a semaphore
to protect the pool data structures at the API level, since there aren't
implications for system responsiveness with this kind of concurrency
control.

* sys_mem_pools do not support the notion of timeouts for requesting
memory.

* sys_mem_pools are specified at compile time with macros, just like
kernel memory pools. Alternative forms of specification at runtime
will be a later enhancement.

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
2018-04-05 07:03:05 -07:00

188 lines
3.9 KiB
C

/*
* Copyright (c) 2017 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <kernel.h>
#include <ksched.h>
#include <wait_q.h>
#include <init.h>
#include <string.h>
#include <misc/__assert.h>
#include <kswap.h>
/* Linker-defined symbols bound the static pool structs */
extern struct k_mem_pool _k_mem_pool_list_start[];
extern struct k_mem_pool _k_mem_pool_list_end[];
s64_t _tick_get(void);
static struct k_mem_pool *get_pool(int id)
{
return &_k_mem_pool_list_start[id];
}
static int pool_id(struct k_mem_pool *pool)
{
return pool - &_k_mem_pool_list_start[0];
}
static void k_mem_pool_init(struct k_mem_pool *p)
{
sys_dlist_init(&p->wait_q);
_sys_mem_pool_base_init(&p->base);
}
int init_static_pools(struct device *unused)
{
ARG_UNUSED(unused);
struct k_mem_pool *p;
for (p = _k_mem_pool_list_start; p < _k_mem_pool_list_end; p++) {
k_mem_pool_init(p);
}
return 0;
}
SYS_INIT(init_static_pools, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
int k_mem_pool_alloc(struct k_mem_pool *p, struct k_mem_block *block,
size_t size, s32_t timeout)
{
int ret, key;
s64_t end = 0;
__ASSERT(!(_is_in_isr() && timeout != K_NO_WAIT), "");
if (timeout > 0) {
end = _tick_get() + _ms_to_ticks(timeout);
}
while (1) {
u32_t level_num, block_num;
ret = _sys_mem_pool_block_alloc(&p->base, size, &level_num,
&block_num, &block->data);
block->id.pool = pool_id(p);
block->id.level = level_num;
block->id.block = block_num;
if (ret == 0 || timeout == K_NO_WAIT ||
ret == -EAGAIN || (ret && ret != -ENOMEM)) {
return ret;
}
key = irq_lock();
_pend_current_thread(&p->wait_q, timeout);
_Swap(key);
if (timeout != K_FOREVER) {
timeout = end - _tick_get();
if (timeout < 0) {
break;
}
}
}
return -EAGAIN;
}
void k_mem_pool_free_id(struct k_mem_block_id *id)
{
int key, need_sched = 0;
struct k_mem_pool *p = get_pool(id->pool);
_sys_mem_pool_block_free(&p->base, id->level, id->block);
/* Wake up anyone blocked on this pool and let them repeat
* their allocation attempts
*/
key = irq_lock();
while (!sys_dlist_is_empty(&p->wait_q)) {
struct k_thread *th = (void *)sys_dlist_peek_head(&p->wait_q);
_unpend_thread(th);
_abort_thread_timeout(th);
_ready_thread(th);
need_sched = 1;
}
if (need_sched && !_is_in_isr()) {
_reschedule_threads(key);
} else {
irq_unlock(key);
}
}
void k_mem_pool_free(struct k_mem_block *block)
{
k_mem_pool_free_id(&block->id);
}
#if (CONFIG_HEAP_MEM_POOL_SIZE > 0)
/*
* Heap is defined using HEAP_MEM_POOL_SIZE configuration option.
*
* This module defines the heap memory pool and the _HEAP_MEM_POOL symbol
* that has the address of the associated memory pool struct.
*/
K_MEM_POOL_DEFINE(_heap_mem_pool, 64, CONFIG_HEAP_MEM_POOL_SIZE, 1, 4);
#define _HEAP_MEM_POOL (&_heap_mem_pool)
void *k_malloc(size_t size)
{
struct k_mem_block block;
/*
* get a block large enough to hold an initial (hidden) block
* descriptor, as well as the space the caller requested
*/
size += sizeof(struct k_mem_block_id);
if (k_mem_pool_alloc(_HEAP_MEM_POOL, &block, size, K_NO_WAIT) != 0) {
return NULL;
}
/* save the block descriptor info at the start of the actual block */
memcpy(block.data, &block.id, sizeof(struct k_mem_block_id));
/* return address of the user area part of the block to the caller */
return (char *)block.data + sizeof(struct k_mem_block_id);
}
void k_free(void *ptr)
{
if (ptr != NULL) {
/* point to hidden block descriptor at start of block */
ptr = (char *)ptr - sizeof(struct k_mem_block_id);
/* return block to the heap memory pool */
k_mem_pool_free_id(ptr);
}
}
void *k_calloc(size_t nmemb, size_t size)
{
void *ret;
size_t bounds;
#ifdef CONFIG_ASSERT
__ASSERT(!__builtin_mul_overflow(nmemb, size, &bounds),
"requested size overflow");
#else
bounds = nmemb * size;
#endif
ret = k_malloc(bounds);
if (ret) {
memset(ret, 0, bounds);
}
return ret;
}
#endif