kernel: Remove z_mem_pool wrapper internals
These implemented a k_mem_pool in terms of the now universal k_heap utility. That's no longer necessary now that the k_mem_pool API has been removed. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
parent
9028ba5e27
commit
3c2c1d85b0
|
@ -661,12 +661,6 @@ static inline void k_thread_heap_assign(struct k_thread *thread,
|
|||
thread->resource_pool = heap;
|
||||
}
|
||||
|
||||
static inline void z_thread_resource_pool_assign(struct k_thread *thread,
|
||||
struct k_mem_pool *pool)
|
||||
{
|
||||
k_thread_heap_assign(thread, pool ? pool->heap : NULL);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO)
|
||||
/**
|
||||
* @brief Obtain stack usage information for the specified thread
|
||||
|
|
|
@ -27,29 +27,4 @@ struct k_mem_block {
|
|||
};
|
||||
};
|
||||
|
||||
struct k_mem_pool {
|
||||
struct k_heap *heap;
|
||||
};
|
||||
|
||||
/* Sizing is a heuristic, as k_mem_pool made promises about layout
|
||||
* that k_heap does not. We make space for the number of maximum
|
||||
* objects defined, and include extra so there's enough metadata space
|
||||
* available for the maximum number of minimum-sized objects to be
|
||||
* stored: 8 bytes for each desired chunk header, and a 15 word block
|
||||
* to reserve room for a "typical" set of bucket list heads and the heap
|
||||
* footer(this size was picked more to conform with existing test
|
||||
* expectations than any rigorous theory -- we have tests that rely on being
|
||||
* able to allocate the blocks promised and ones that make assumptions about
|
||||
* when memory will run out).
|
||||
*/
|
||||
#define Z_MEM_POOL_DEFINE(name, minsz, maxsz, nmax, align) \
|
||||
K_HEAP_DEFINE(poolheap_##name, \
|
||||
((maxsz) * (nmax)) \
|
||||
+ 8 * ((maxsz) * (nmax) / (minsz)) \
|
||||
+ 15 * sizeof(void *)); \
|
||||
struct k_mem_pool name = { \
|
||||
.heap = &poolheap_##name \
|
||||
}
|
||||
|
||||
|
||||
#endif /* ZEPHYR_INCLUDE_MEMPOOL_HEAP_H_ */
|
||||
|
|
|
@ -63,28 +63,3 @@ void k_heap_free(struct k_heap *h, void *mem)
|
|||
k_spin_unlock(&h->lock, key);
|
||||
}
|
||||
}
|
||||
|
||||
/* Compatibility layer for legacy k_mem_pool code on top of a k_heap
|
||||
* backend.
|
||||
*/
|
||||
|
||||
int z_mem_pool_alloc(struct k_mem_pool *p, struct k_mem_block *block,
|
||||
size_t size, k_timeout_t timeout)
|
||||
{
|
||||
block->id.heap = p->heap;
|
||||
block->data = k_heap_alloc(p->heap, size, timeout);
|
||||
|
||||
/* The legacy API returns -EAGAIN on timeout expiration, but
|
||||
* -ENOMEM if the timeout was K_NO_WAIT. Don't ask.
|
||||
*/
|
||||
if (size != 0 && block->data == NULL) {
|
||||
return K_TIMEOUT_EQ(timeout, K_NO_WAIT) ? -ENOMEM : -EAGAIN;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
void z_mem_pool_free_id(struct k_mem_block_id *id)
|
||||
{
|
||||
k_heap_free(id->heap, id->data);
|
||||
}
|
||||
|
|
|
@ -8,11 +8,6 @@
|
|||
#include <string.h>
|
||||
#include <sys/math_extras.h>
|
||||
|
||||
void z_mem_pool_free(struct k_mem_block *block)
|
||||
{
|
||||
z_mem_pool_free_id(&block->id);
|
||||
}
|
||||
|
||||
void *z_heap_malloc(struct k_heap *heap, size_t size)
|
||||
{
|
||||
/*
|
||||
|
@ -34,12 +29,6 @@ void *z_heap_malloc(struct k_heap *heap, size_t size)
|
|||
|
||||
/* return address of the user area part of the block to the caller */
|
||||
return (char *)&blk[1];
|
||||
|
||||
}
|
||||
|
||||
void *z_mem_pool_malloc(struct k_mem_pool *pool, size_t size)
|
||||
{
|
||||
return z_heap_malloc(pool->heap, size);
|
||||
}
|
||||
|
||||
void k_free(void *ptr)
|
||||
|
|
Loading…
Reference in a new issue