8606fabf74
There was a somewhat promiscuous pattern in the kernel where IPC mechanisms would do something that might effect the current thread choice, then check _must_switch_threads() (or occasionally __must_switch_threads -- don't ask, the distinction is being replaced by real English words), sometimes _is_in_isr() (but not always, even in contexts where that looks like it would be a mistake), and then call _Swap() if everything is OK, otherwise releasing the irq_lock(). Sometimes this was done directly, sometimes via the inverted test, sometimes (poll, heh) by doing the test when the thread state was modified and then needlessly passing the result up the call stack to the point of the _Swap(). And some places were just calling _reschedule_threads(), which did all this already. Unify all this madness. The old _reschedule_threads() function has split into two variants: _reschedule_yield() and _reschedule_noyield(). The latter is the "normal" one that respects the cooperative priority of the current thread (i.e. it won't switch out even if there is a higher priority thread ready -- the current thread has to pend itself first), the former is used in the handful of places where code was doing a swap unconditionally, just to preserve precise behavior across the refactor. I'm not at all convinced it should exist... Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
189 lines
3.9 KiB
C
189 lines
3.9 KiB
C
/*
|
|
* Copyright (c) 2017 Intel Corporation
|
|
*
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
*/
|
|
|
|
#include <kernel.h>
|
|
#include <ksched.h>
|
|
#include <wait_q.h>
|
|
#include <init.h>
|
|
#include <string.h>
|
|
#include <misc/__assert.h>
|
|
#include <kswap.h>
|
|
|
|
/* Linker-defined symbols bound the static pool structs */
|
|
extern struct k_mem_pool _k_mem_pool_list_start[];
|
|
extern struct k_mem_pool _k_mem_pool_list_end[];
|
|
|
|
s64_t _tick_get(void);
|
|
|
|
static struct k_mem_pool *get_pool(int id)
|
|
{
|
|
return &_k_mem_pool_list_start[id];
|
|
}
|
|
|
|
static int pool_id(struct k_mem_pool *pool)
|
|
{
|
|
return pool - &_k_mem_pool_list_start[0];
|
|
}
|
|
|
|
static void k_mem_pool_init(struct k_mem_pool *p)
|
|
{
|
|
sys_dlist_init(&p->wait_q);
|
|
_sys_mem_pool_base_init(&p->base);
|
|
}
|
|
|
|
int init_static_pools(struct device *unused)
|
|
{
|
|
ARG_UNUSED(unused);
|
|
struct k_mem_pool *p;
|
|
|
|
for (p = _k_mem_pool_list_start; p < _k_mem_pool_list_end; p++) {
|
|
k_mem_pool_init(p);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
SYS_INIT(init_static_pools, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
|
|
|
|
int k_mem_pool_alloc(struct k_mem_pool *p, struct k_mem_block *block,
|
|
size_t size, s32_t timeout)
|
|
{
|
|
int ret, key;
|
|
s64_t end = 0;
|
|
|
|
__ASSERT(!(_is_in_isr() && timeout != K_NO_WAIT), "");
|
|
|
|
if (timeout > 0) {
|
|
end = _tick_get() + _ms_to_ticks(timeout);
|
|
}
|
|
|
|
while (1) {
|
|
u32_t level_num, block_num;
|
|
|
|
ret = _sys_mem_pool_block_alloc(&p->base, size, &level_num,
|
|
&block_num, &block->data);
|
|
block->id.pool = pool_id(p);
|
|
block->id.level = level_num;
|
|
block->id.block = block_num;
|
|
|
|
if (ret == 0 || timeout == K_NO_WAIT ||
|
|
ret == -EAGAIN || (ret && ret != -ENOMEM)) {
|
|
return ret;
|
|
}
|
|
|
|
key = irq_lock();
|
|
_pend_current_thread(&p->wait_q, timeout);
|
|
_Swap(key);
|
|
|
|
if (timeout != K_FOREVER) {
|
|
timeout = end - _tick_get();
|
|
|
|
if (timeout < 0) {
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
return -EAGAIN;
|
|
}
|
|
|
|
void k_mem_pool_free_id(struct k_mem_block_id *id)
|
|
{
|
|
int key, need_sched = 0;
|
|
struct k_mem_pool *p = get_pool(id->pool);
|
|
|
|
_sys_mem_pool_block_free(&p->base, id->level, id->block);
|
|
|
|
/* Wake up anyone blocked on this pool and let them repeat
|
|
* their allocation attempts
|
|
*/
|
|
key = irq_lock();
|
|
|
|
while (!sys_dlist_is_empty(&p->wait_q)) {
|
|
struct k_thread *th = (void *)sys_dlist_peek_head(&p->wait_q);
|
|
|
|
_unpend_thread(th);
|
|
_abort_thread_timeout(th);
|
|
_ready_thread(th);
|
|
need_sched = 1;
|
|
}
|
|
|
|
if (need_sched && !_is_in_isr()) {
|
|
_reschedule_noyield(key);
|
|
} else {
|
|
irq_unlock(key);
|
|
}
|
|
}
|
|
|
|
void k_mem_pool_free(struct k_mem_block *block)
|
|
{
|
|
k_mem_pool_free_id(&block->id);
|
|
}
|
|
|
|
#if (CONFIG_HEAP_MEM_POOL_SIZE > 0)
|
|
|
|
/*
|
|
* Heap is defined using HEAP_MEM_POOL_SIZE configuration option.
|
|
*
|
|
* This module defines the heap memory pool and the _HEAP_MEM_POOL symbol
|
|
* that has the address of the associated memory pool struct.
|
|
*/
|
|
|
|
K_MEM_POOL_DEFINE(_heap_mem_pool, 64, CONFIG_HEAP_MEM_POOL_SIZE, 1, 4);
|
|
#define _HEAP_MEM_POOL (&_heap_mem_pool)
|
|
|
|
void *k_malloc(size_t size)
|
|
{
|
|
struct k_mem_block block;
|
|
|
|
/*
|
|
* get a block large enough to hold an initial (hidden) block
|
|
* descriptor, as well as the space the caller requested
|
|
*/
|
|
if (__builtin_add_overflow(size, sizeof(struct k_mem_block_id),
|
|
&size)) {
|
|
return NULL;
|
|
}
|
|
if (k_mem_pool_alloc(_HEAP_MEM_POOL, &block, size, K_NO_WAIT) != 0) {
|
|
return NULL;
|
|
}
|
|
|
|
/* save the block descriptor info at the start of the actual block */
|
|
memcpy(block.data, &block.id, sizeof(struct k_mem_block_id));
|
|
|
|
/* return address of the user area part of the block to the caller */
|
|
return (char *)block.data + sizeof(struct k_mem_block_id);
|
|
}
|
|
|
|
|
|
void k_free(void *ptr)
|
|
{
|
|
if (ptr != NULL) {
|
|
/* point to hidden block descriptor at start of block */
|
|
ptr = (char *)ptr - sizeof(struct k_mem_block_id);
|
|
|
|
/* return block to the heap memory pool */
|
|
k_mem_pool_free_id(ptr);
|
|
}
|
|
}
|
|
|
|
void *k_calloc(size_t nmemb, size_t size)
|
|
{
|
|
void *ret;
|
|
size_t bounds;
|
|
|
|
if (__builtin_mul_overflow(nmemb, size, &bounds)) {
|
|
return NULL;
|
|
}
|
|
|
|
ret = k_malloc(bounds);
|
|
if (ret) {
|
|
memset(ret, 0, bounds);
|
|
}
|
|
return ret;
|
|
}
|
|
#endif
|