lib/os/heap: General refactoring
Miscellaneous refactoring and simplification. No behavioral changes: Make split_alloc() take and return chunk IDs and not memory pointers, leaving the conversion between memory/chunks the job of the higher level sys_heap_alloc() API. This cleans up the internals for code that wants to do allocation but has its own ideas about what to do with the resulting chunks. Add split_chunks() and merge_chunks() utilities to own the linear/size pointers and have split_alloc() and free_chunks() use them instead of doing the list management directly. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
parent
48f43b57f5
commit
1f29dd3251
127
lib/os/heap.c
127
lib/os/heap.c
|
@ -9,6 +9,10 @@
|
|||
|
||||
static void *chunk_mem(struct z_heap *h, chunkid_t c)
|
||||
{
|
||||
if (c == 0) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
chunk_unit_t *buf = chunk_buf(h);
|
||||
uint8_t *ret = ((uint8_t *)&buf[c]) + chunk_header_bytes(h);
|
||||
|
||||
|
@ -66,10 +70,37 @@ static void free_list_add(struct z_heap *h, chunkid_t c)
|
|||
}
|
||||
}
|
||||
|
||||
/* Splits a chunk "lc" into a left chunk and a right chunk at "rc".
|
||||
* Leaves both chunks marked "free"
|
||||
*/
|
||||
static void split_chunks(struct z_heap *h, chunkid_t lc, chunkid_t rc)
|
||||
{
|
||||
CHECK(rc > lc);
|
||||
CHECK(rc - lc < chunk_size(h, lc));
|
||||
|
||||
size_t sz0 = chunk_size(h, lc);
|
||||
size_t lsz = rc - lc;
|
||||
size_t rsz = sz0 - lsz;
|
||||
|
||||
set_chunk_size(h, lc, lsz);
|
||||
set_chunk_size(h, rc, rsz);
|
||||
set_left_chunk_size(h, rc, lsz);
|
||||
set_left_chunk_size(h, right_chunk(h, rc), rsz);
|
||||
}
|
||||
|
||||
/* Does not modify free list */
|
||||
static void merge_chunks(struct z_heap *h, chunkid_t lc, chunkid_t rc)
|
||||
{
|
||||
size_t newsz = chunk_size(h, lc) + chunk_size(h, rc);
|
||||
|
||||
set_chunk_size(h, lc, newsz);
|
||||
set_left_chunk_size(h, right_chunk(h, rc), newsz);
|
||||
}
|
||||
|
||||
/* Allocates (fit check has already been perfomred) from the next
|
||||
* chunk at the specified bucket level
|
||||
*/
|
||||
static void *split_alloc(struct z_heap *h, int bidx, size_t sz)
|
||||
static chunkid_t split_alloc(struct z_heap *h, int bidx, size_t sz)
|
||||
{
|
||||
CHECK(h->buckets[bidx].next != 0
|
||||
&& sz <= chunk_size(h, h->buckets[bidx].next));
|
||||
|
@ -79,24 +110,37 @@ static void *split_alloc(struct z_heap *h, int bidx, size_t sz)
|
|||
free_list_remove(h, bidx, c);
|
||||
|
||||
/* Split off remainder if it's usefully large */
|
||||
size_t rem = chunk_size(h, c) - sz;
|
||||
|
||||
CHECK(rem < h->len);
|
||||
|
||||
if (rem >= min_chunk_size(h)) {
|
||||
chunkid_t c2 = c + sz;
|
||||
chunkid_t c3 = right_chunk(h, c);
|
||||
|
||||
set_chunk_size(h, c, sz);
|
||||
set_chunk_size(h, c2, rem);
|
||||
set_left_chunk_size(h, c2, sz);
|
||||
set_left_chunk_size(h, c3, rem);
|
||||
free_list_add(h, c2);
|
||||
if ((chunk_size(h, c) - sz) >= (big_heap(h) ? 2 : 1)) {
|
||||
split_chunks(h, c, c + sz);
|
||||
free_list_add(h, c + sz);
|
||||
}
|
||||
|
||||
set_chunk_used(h, c, true);
|
||||
return c;
|
||||
}
|
||||
|
||||
return chunk_mem(h, c);
|
||||
static void free_chunks(struct z_heap *h, chunkid_t c)
|
||||
{
|
||||
set_chunk_used(h, c, false);
|
||||
|
||||
/* Merge with free right chunk? */
|
||||
if (!chunk_used(h, right_chunk(h, c))) {
|
||||
int bi = bucket_idx(h, chunk_size(h, right_chunk(h, c)));
|
||||
|
||||
free_list_remove(h, bi, right_chunk(h, c));
|
||||
merge_chunks(h, c, right_chunk(h, c));
|
||||
}
|
||||
|
||||
/* Merge with free left chunk? */
|
||||
if (!chunk_used(h, left_chunk(h, c))) {
|
||||
int bi = bucket_idx(h, chunk_size(h, left_chunk(h, c)));
|
||||
|
||||
free_list_remove(h, bi, left_chunk(h, c));
|
||||
merge_chunks(h, left_chunk(h, c), c);
|
||||
c = left_chunk(h, c);
|
||||
}
|
||||
|
||||
free_list_add(h, c);
|
||||
}
|
||||
|
||||
void sys_heap_free(struct sys_heap *heap, void *mem)
|
||||
|
@ -104,7 +148,6 @@ void sys_heap_free(struct sys_heap *heap, void *mem)
|
|||
if (mem == NULL) {
|
||||
return; /* ISO C free() semantics */
|
||||
}
|
||||
|
||||
struct z_heap *h = heap->heap;
|
||||
chunkid_t c = ((uint8_t *)mem - chunk_header_bytes(h)
|
||||
- (uint8_t *)chunk_buf(h)) / CHUNK_UNIT;
|
||||
|
@ -115,51 +158,26 @@ void sys_heap_free(struct sys_heap *heap, void *mem)
|
|||
*/
|
||||
__ASSERT(chunk_used(h, c),
|
||||
"unexpected heap state (double-free?) for memory at %p", mem);
|
||||
|
||||
/*
|
||||
* It is easy to catch many common memory overflow cases with
|
||||
* a quick check on this and next chunk header fields that are
|
||||
* immediately before and after the freed memory.
|
||||
*/
|
||||
__ASSERT(left_chunk(h, right_chunk(h, c)) == c,
|
||||
"corrupted heap bounds (buffer overflow?) for memory at %p", mem);
|
||||
"corrupted heap bounds (buffer overflow?) for memory at %p",
|
||||
mem);
|
||||
|
||||
/* Merge with right chunk? We can just absorb it. */
|
||||
if (!chunk_used(h, right_chunk(h, c))) {
|
||||
chunkid_t rc = right_chunk(h, c);
|
||||
size_t newsz = chunk_size(h, c) + chunk_size(h, rc);
|
||||
|
||||
free_list_remove(h, bucket_idx(h, chunk_size(h, rc)), rc);
|
||||
set_chunk_size(h, c, newsz);
|
||||
set_left_chunk_size(h, right_chunk(h, c), newsz);
|
||||
}
|
||||
|
||||
/* Merge with left chunk? It absorbs us. */
|
||||
if (!chunk_used(h, left_chunk(h, c))) {
|
||||
chunkid_t lc = left_chunk(h, c);
|
||||
chunkid_t rc = right_chunk(h, c);
|
||||
size_t csz = chunk_size(h, c);
|
||||
size_t merged_sz = csz + chunk_size(h, lc);
|
||||
|
||||
free_list_remove(h, bucket_idx(h, chunk_size(h, lc)), lc);
|
||||
set_chunk_size(h, lc, merged_sz);
|
||||
set_left_chunk_size(h, rc, merged_sz);
|
||||
|
||||
c = lc;
|
||||
}
|
||||
|
||||
set_chunk_used(h, c, false);
|
||||
free_list_add(h, c);
|
||||
free_chunks(h, c);
|
||||
}
|
||||
|
||||
void *sys_heap_alloc(struct sys_heap *heap, size_t bytes)
|
||||
static chunkid_t alloc_chunks(struct z_heap *h, size_t sz)
|
||||
{
|
||||
struct z_heap *h = heap->heap;
|
||||
size_t sz = bytes_to_chunksz(h, bytes);
|
||||
int bi = bucket_idx(h, sz);
|
||||
struct z_heap_bucket *b = &h->buckets[bi];
|
||||
|
||||
if (bytes == 0 || bi > bucket_idx(h, h->len)) {
|
||||
return NULL;
|
||||
if (bi > bucket_idx(h, h->len)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* First try a bounded count of items from the minimal bucket
|
||||
|
@ -199,7 +217,18 @@ void *sys_heap_alloc(struct sys_heap *heap, size_t bytes)
|
|||
return split_alloc(h, minbucket, sz);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void *sys_heap_alloc(struct sys_heap *heap, size_t bytes)
|
||||
{
|
||||
if (bytes == 0) {
|
||||
return NULL;
|
||||
}
|
||||
size_t chunksz = bytes_to_chunksz(heap->heap, bytes);
|
||||
chunkid_t c = alloc_chunks(heap->heap, chunksz);
|
||||
|
||||
return chunk_mem(heap->heap, c);
|
||||
}
|
||||
|
||||
void sys_heap_init(struct sys_heap *heap, void *mem, size_t bytes)
|
||||
|
|
Loading…
Reference in a new issue