sys_heap: provide more chunk_fields accessors

Let's provide accessors for getting and setting every field to make the
chunk header layout abstracted away from the main code. Those are:

SIZE_AND_USED: chunk_used(), chunk_size(), set_chunk_used() and
chunk_size().

LEFT_SIZE: left_chunk() and set_left_chunk_size().

FREE_PREV: prev_free_chunk() and set_prev_free_chunk().

FREE_NEXT: next_free_chunk() and set_next_free_chunk().

To be consistent, the former chunk_set_used() is now set_chunk_used().

Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
This commit is contained in:
Nicolas Pitre 2019-09-24 23:20:53 -04:00 committed by Carles Cufí
parent f97eca26e6
commit 54950aca01
3 changed files with 48 additions and 25 deletions

View file

@ -88,7 +88,7 @@ bool sys_heap_validate(struct sys_heap *heap)
if (!valid_chunk(h, c)) {
return false;
}
chunk_set_used(h, c, true);
set_chunk_used(h, c, true);
}
bool empty = (h->avail_buckets & (1 << b)) == 0;
@ -129,7 +129,7 @@ bool sys_heap_validate(struct sys_heap *heap)
}
prev_chunk = c;
chunk_set_used(h, c, false);
set_chunk_used(h, c, false);
}
if (c != h->len) {
return false; /* Should have exactly consumed the buffer */
@ -151,7 +151,7 @@ bool sys_heap_validate(struct sys_heap *heap)
if (chunk_used(h, c)) {
return false;
}
chunk_set_used(h, c, true);
set_chunk_used(h, c, true);
}
}
@ -159,7 +159,7 @@ bool sys_heap_validate(struct sys_heap *heap)
* fields. One more linear pass to fix them up
*/
for (c = h->chunk0; c <= max_chunkid(h); c = right_chunk(h, c)) {
chunk_set_used(h, c, !chunk_used(h, c));
set_chunk_used(h, c, !chunk_used(h, c));
}
return true;
}

View file

@ -37,8 +37,8 @@ static void free_list_remove(struct z_heap *h, int bidx,
second = next_free_chunk(h, c);
b->next = second;
chunk_set(h, first, FREE_NEXT, second);
chunk_set(h, second, FREE_PREV, first);
set_next_free_chunk(h, first, second);
set_prev_free_chunk(h, second, first);
}
}
@ -53,17 +53,17 @@ static void free_list_add(struct z_heap *h, chunkid_t c)
/* Empty list, first item */
h->avail_buckets |= (1 << bi);
h->buckets[bi].next = c;
chunk_set(h, c, FREE_PREV, c);
chunk_set(h, c, FREE_NEXT, c);
set_prev_free_chunk(h, c, c);
set_next_free_chunk(h, c, c);
} else {
/* Insert before (!) the "next" pointer */
chunkid_t second = h->buckets[bi].next;
chunkid_t first = prev_free_chunk(h, second);
chunk_set(h, c, FREE_PREV, first);
chunk_set(h, c, FREE_NEXT, second);
chunk_set(h, first, FREE_NEXT, c);
chunk_set(h, second, FREE_PREV, c);
set_prev_free_chunk(h, c, first);
set_next_free_chunk(h, c, second);
set_next_free_chunk(h, first, c);
set_prev_free_chunk(h, second, c);
}
CHECK(h->avail_buckets & (1 << bucket_idx(h, chunk_size(h, c))));
@ -95,16 +95,16 @@ static void *split_alloc(struct z_heap *h, int bidx, size_t sz)
chunkid_t c2 = c + sz;
chunkid_t c3 = right_chunk(h, c);
chunk_set(h, c, SIZE_AND_USED, sz);
chunk_set(h, c2, SIZE_AND_USED, rem);
chunk_set(h, c2, LEFT_SIZE, sz);
set_chunk_size(h, c, sz);
set_chunk_size(h, c2, rem);
set_left_chunk_size(h, c2, sz);
if (!last_chunk(h, c2)) {
chunk_set(h, c3, LEFT_SIZE, rem);
set_left_chunk_size(h, c3, rem);
}
free_list_add(h, c2);
}
chunk_set_used(h, c, true);
set_chunk_used(h, c, true);
return chunk_mem(h, c);
}
@ -125,9 +125,9 @@ void sys_heap_free(struct sys_heap *heap, void *mem)
size_t newsz = chunk_size(h, c) + chunk_size(h, rc);
free_list_remove(h, bucket_idx(h, chunk_size(h, rc)), rc);
chunk_set(h, c, SIZE_AND_USED, newsz);
set_chunk_size(h, c, newsz);
if (!last_chunk(h, c)) {
chunk_set(h, right_chunk(h, c), LEFT_SIZE, newsz);
set_left_chunk_size(h, right_chunk(h, c), newsz);
}
}
@ -139,15 +139,15 @@ void sys_heap_free(struct sys_heap *heap, void *mem)
size_t merged_sz = csz + chunk_size(h, lc);
free_list_remove(h, bucket_idx(h, chunk_size(h, lc)), lc);
chunk_set(h, lc, SIZE_AND_USED, merged_sz);
set_chunk_size(h, lc, merged_sz);
if (!last_chunk(h, lc)) {
chunk_set(h, rc, LEFT_SIZE, merged_sz);
set_left_chunk_size(h, rc, merged_sz);
}
c = lc;
}
chunk_set_used(h, c, false);
set_chunk_used(h, c, false);
free_list_add(h, c);
}
@ -234,6 +234,6 @@ void sys_heap_init(struct sys_heap *heap, void *mem, size_t bytes)
heap->heap->buckets[i].next = 0;
}
chunk_set(h, h->chunk0, SIZE_AND_USED, buf_sz - h->chunk0);
set_chunk_size(h, h->chunk0, buf_sz - h->chunk0);
free_list_add(h, h->chunk0);
}

View file

@ -108,13 +108,18 @@ static ALWAYS_INLINE size_t chunk_size(struct z_heap *h, chunkid_t c)
return chunk_field(h, c, SIZE_AND_USED) & h->size_mask;
}
static inline void chunk_set_used(struct z_heap *h, chunkid_t c,
bool used)
static inline void set_chunk_used(struct z_heap *h, chunkid_t c, bool used)
{
chunk_set(h, c, SIZE_AND_USED,
chunk_size(h, c) | (used ? (h->size_mask + 1) : 0));
}
static inline void set_chunk_size(struct z_heap *h, chunkid_t c, size_t size)
{
chunk_set(h, c, SIZE_AND_USED,
size | (chunk_used(h, c) ? (h->size_mask + 1) : 0));
}
static inline chunkid_t prev_free_chunk(struct z_heap *h, chunkid_t c)
{
return chunk_field(h, c, FREE_PREV);
@ -125,6 +130,18 @@ static inline chunkid_t next_free_chunk(struct z_heap *h, chunkid_t c)
return chunk_field(h, c, FREE_NEXT);
}
static inline void set_prev_free_chunk(struct z_heap *h, chunkid_t c,
chunkid_t prev)
{
chunk_set(h, c, FREE_PREV, prev);
}
static inline void set_next_free_chunk(struct z_heap *h, chunkid_t c,
chunkid_t next)
{
chunk_set(h, c, FREE_NEXT, next);
}
static inline chunkid_t left_chunk(struct z_heap *h, chunkid_t c)
{
return c - chunk_field(h, c, LEFT_SIZE);
@ -135,6 +152,12 @@ static inline chunkid_t right_chunk(struct z_heap *h, chunkid_t c)
return c + chunk_size(h, c);
}
static inline void set_left_chunk_size(struct z_heap *h, chunkid_t c,
size_t size)
{
chunk_set(h, c, LEFT_SIZE, size);
}
static inline size_t chunk_header_bytes(struct z_heap *h)
{
return big_heap(h) ? 8 : 4;