lib/os/heap: Add sys_heap_aligned_alloc()
Add support for a C11-style aligned_alloc() in the heap implementation. This is properly optimized, in the sense that unused prefix/suffix data around the chosen allocation is returned to the heap and made available for general allocation. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
parent
1f29dd3251
commit
ed258e9c6f
|
@ -88,6 +88,21 @@ void sys_heap_init(struct sys_heap *h, void *mem, size_t bytes);
|
|||
*/
|
||||
void *sys_heap_alloc(struct sys_heap *h, size_t bytes);
|
||||
|
||||
/** @brief Allocate aligned memory from a sys_heap
|
||||
*
|
||||
* Behaves in all ways like sys_heap_alloc(), except that the returned
|
||||
* memory (if available) will have a starting address in memory which
|
||||
* is a multiple of the specified power-of-two alignment value in
|
||||
* bytes. The resulting memory can be returned to the heap using
|
||||
* sys_heap_free().
|
||||
*
|
||||
* @param h Heap from which to allocate
|
||||
* @param align Alignment in bytes, must be a power of two
|
||||
* @param bytes Number of bytes requested
|
||||
* @return Pointer to memory the caller can now use
|
||||
*/
|
||||
void *sys_heap_aligned_alloc(struct sys_heap *h, size_t align, size_t bytes);
|
||||
|
||||
/** @brief Free memory into a sys_heap
|
||||
*
|
||||
* De-allocates a pointer to memory previously returned from
|
||||
|
|
|
@ -46,4 +46,14 @@ config SYS_HEAP_ALLOC_LOOPS
|
|||
keeps the maximum runtime at a tight bound so that the heap
|
||||
is useful in locked or ISR contexts.
|
||||
|
||||
config SYS_HEAP_ALIGNED_ALLOC
|
||||
bool "Enable sys_heap_aligned_alloc() API"
|
||||
help
|
||||
When true, the sys_heap_aligned_alloc() API is available to
|
||||
guarantee alignment of returned heap blocks in an efficient
|
||||
way. For technical reasons, this requires the use of the
|
||||
"big" 8 byte heap block header format, so it will moderately
|
||||
increase heap memory overhead on 32 bit platforms when using
|
||||
small (<256kb) heaps.
|
||||
|
||||
endmenu
|
||||
|
|
|
@ -21,9 +21,19 @@ static void *chunk_mem(struct z_heap *h, chunkid_t c)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline bool solo_free_header(struct z_heap *h, chunkid_t c)
|
||||
{
|
||||
return (IS_ENABLED(CONFIG_SYS_HEAP_ALIGNED_ALLOC)
|
||||
&& chunk_size(h, c) == 1);
|
||||
}
|
||||
|
||||
static void free_list_remove(struct z_heap *h, int bidx,
|
||||
chunkid_t c)
|
||||
{
|
||||
if (solo_free_header(h, c)) {
|
||||
return;
|
||||
}
|
||||
|
||||
struct z_heap_bucket *b = &h->buckets[bidx];
|
||||
|
||||
CHECK(!chunk_used(h, c));
|
||||
|
@ -46,6 +56,10 @@ static void free_list_remove(struct z_heap *h, int bidx,
|
|||
|
||||
static void free_list_add(struct z_heap *h, chunkid_t c)
|
||||
{
|
||||
if (solo_free_header(h, c)) {
|
||||
return;
|
||||
}
|
||||
|
||||
int bi = bucket_idx(h, chunk_size(h, c));
|
||||
|
||||
if (h->buckets[bi].next == 0) {
|
||||
|
@ -231,6 +245,54 @@ void *sys_heap_alloc(struct sys_heap *heap, size_t bytes)
|
|||
return chunk_mem(heap->heap, c);
|
||||
}
|
||||
|
||||
void *sys_heap_aligned_alloc(struct sys_heap *heap, size_t align, size_t bytes)
|
||||
{
|
||||
struct z_heap *h = heap->heap;
|
||||
|
||||
CHECK((align & (align - 1)) == 0);
|
||||
CHECK(big_heap(h));
|
||||
if (bytes == 0) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Find a free block that is guaranteed to fit */
|
||||
size_t chunksz = bytes_to_chunksz(h, bytes);
|
||||
size_t mask = (align / CHUNK_UNIT) - 1;
|
||||
size_t padsz = MAX(CHUNK_UNIT, chunksz + mask);
|
||||
chunkid_t c0 = alloc_chunks(h, padsz);
|
||||
|
||||
if (c0 == 0) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Align within memory, using "chunk index" units. Remember
|
||||
* the block we're aligning starts in the chunk AFTER the
|
||||
* header!
|
||||
*/
|
||||
size_t c0i = ((size_t) &chunk_buf(h)[c0 + 1]) / CHUNK_UNIT;
|
||||
size_t ci = ((c0i + mask) & ~mask);
|
||||
chunkid_t c = c0 + (ci - c0i);
|
||||
|
||||
CHECK(c >= c0 && c < c0 + padsz);
|
||||
CHECK((((size_t) chunk_mem(h, c)) & (align - 1)) == 0);
|
||||
|
||||
/* Split and free unused prefix */
|
||||
if (c > c0) {
|
||||
split_chunks(h, c0, c);
|
||||
set_chunk_used(h, c, true);
|
||||
free_chunks(h, c0);
|
||||
}
|
||||
|
||||
/* Split and free unused suffix */
|
||||
if (chunksz < chunk_size(h, c)) {
|
||||
split_chunks(h, c, c + chunksz);
|
||||
set_chunk_used(h, c, true);
|
||||
free_chunks(h, c + chunksz);
|
||||
}
|
||||
|
||||
return chunk_mem(h, c);
|
||||
}
|
||||
|
||||
void sys_heap_init(struct sys_heap *heap, void *mem, size_t bytes)
|
||||
{
|
||||
/* Must fit in a 32 bit count of HUNK_UNIT */
|
||||
|
|
|
@ -66,7 +66,8 @@ struct z_heap {
|
|||
|
||||
static inline bool big_heap_chunks(size_t chunks)
|
||||
{
|
||||
return sizeof(void *) > 4 || chunks > 0x7fff;
|
||||
return IS_ENABLED(CONFIG_SYS_HEAP_ALIGNED_ALLOC)
|
||||
|| sizeof(void *) > 4 || chunks > 0x7fff;
|
||||
}
|
||||
|
||||
static inline bool big_heap_bytes(size_t bytes)
|
||||
|
|
Loading…
Reference in a new issue