sys_heap: optimize struct z_heap
It is possible to remove a few fields from struct z_heap, removing some runtime indirections by doing so: - The buf pointer is actually the same as the struct z_heap pointer itself. So let's simply create chunk_buf() that perform a type conversion. That type is also chunk_unit_t now rather than u64_t so it can be defined based on CHUNK_UNIT. - Replace the struct z_heap_bucket pointer by a zero-sized array at the end of struct z_heap. - Make chunk #0 into an actual chunk with its own header. This allows for removing the chunk0 field and streamlining the code. This way h->chunk0 becomes right_chunk(h, 0). This sets the table for further simplifications to come. Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
This commit is contained in:
parent
e2b64777e5
commit
e553161b8e
|
@ -24,7 +24,7 @@ static size_t max_chunkid(struct z_heap *h)
|
|||
|
||||
static bool in_bounds(struct z_heap *h, chunkid_t c)
|
||||
{
|
||||
return (c >= h->chunk0)
|
||||
return (c >= right_chunk(h, 0))
|
||||
&& (c <= max_chunkid(h))
|
||||
&& (chunk_size(h, c) < h->len);
|
||||
}
|
||||
|
@ -34,7 +34,7 @@ static bool valid_chunk(struct z_heap *h, chunkid_t c)
|
|||
return (chunk_size(h, c) > 0
|
||||
&& (c + chunk_size(h, c) <= h->len)
|
||||
&& in_bounds(h, c)
|
||||
&& ((c == h->chunk0) || in_bounds(h, left_chunk(h, c)))
|
||||
&& (!left_chunk(h, c) || in_bounds(h, left_chunk(h, c)))
|
||||
&& (chunk_used(h, c) || in_bounds(h, prev_free_chunk(h, c)))
|
||||
&& (chunk_used(h, c) || in_bounds(h, next_free_chunk(h, c))));
|
||||
}
|
||||
|
@ -114,18 +114,15 @@ bool sys_heap_validate(struct sys_heap *heap)
|
|||
*/
|
||||
chunkid_t prev_chunk = 0;
|
||||
|
||||
for (c = h->chunk0; c <= max_chunkid(h); c = right_chunk(h, c)) {
|
||||
for (c = right_chunk(h, 0); c <= max_chunkid(h); c = right_chunk(h, c)) {
|
||||
if (!valid_chunk(h, c)) {
|
||||
return false;
|
||||
}
|
||||
if (!chunk_used(h, c)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (c != h->chunk0) {
|
||||
if (left_chunk(h, c) != prev_chunk) {
|
||||
return false;
|
||||
}
|
||||
if (left_chunk(h, c) != prev_chunk) {
|
||||
return false;
|
||||
}
|
||||
prev_chunk = c;
|
||||
|
||||
|
@ -158,7 +155,7 @@ bool sys_heap_validate(struct sys_heap *heap)
|
|||
/* Now we are valid, but have managed to invert all the in-use
|
||||
* fields. One more linear pass to fix them up
|
||||
*/
|
||||
for (c = h->chunk0; c <= max_chunkid(h); c = right_chunk(h, c)) {
|
||||
for (c = right_chunk(h, 0); c <= max_chunkid(h); c = right_chunk(h, c)) {
|
||||
set_chunk_used(h, c, !chunk_used(h, c));
|
||||
}
|
||||
return true;
|
||||
|
|
|
@ -9,7 +9,8 @@
|
|||
|
||||
static void *chunk_mem(struct z_heap *h, chunkid_t c)
|
||||
{
|
||||
uint8_t *ret = ((uint8_t *)&h->buf[c]) + chunk_header_bytes(h);
|
||||
chunk_unit_t *buf = chunk_buf(h);
|
||||
uint8_t *ret = ((uint8_t *)&buf[c]) + chunk_header_bytes(h);
|
||||
|
||||
CHECK(!(((size_t)ret) & (big_heap(h) ? 7 : 3)));
|
||||
|
||||
|
@ -117,7 +118,7 @@ void sys_heap_free(struct sys_heap *heap, void *mem)
|
|||
|
||||
struct z_heap *h = heap->heap;
|
||||
chunkid_t c = ((uint8_t *)mem - chunk_header_bytes(h)
|
||||
- (uint8_t *)h->buf) / CHUNK_UNIT;
|
||||
- (uint8_t *)chunk_buf(h)) / CHUNK_UNIT;
|
||||
|
||||
/* Merge with right chunk? We can just absorb it. */
|
||||
if (!last_chunk(h, c) && !chunk_used(h, right_chunk(h, c))) {
|
||||
|
@ -132,7 +133,7 @@ void sys_heap_free(struct sys_heap *heap, void *mem)
|
|||
}
|
||||
|
||||
/* Merge with left chunk? It absorbs us. */
|
||||
if (c != h->chunk0 && !chunk_used(h, left_chunk(h, c))) {
|
||||
if (!chunk_used(h, left_chunk(h, c))) {
|
||||
chunkid_t lc = left_chunk(h, c);
|
||||
chunkid_t rc = right_chunk(h, c);
|
||||
size_t csz = chunk_size(h, c);
|
||||
|
@ -202,37 +203,38 @@ void *sys_heap_alloc(struct sys_heap *heap, size_t bytes)
|
|||
|
||||
void sys_heap_init(struct sys_heap *heap, void *mem, size_t bytes)
|
||||
{
|
||||
/* Must fit in a 32 bit count of u64's */
|
||||
#if __SIZEOF_SIZE_T__ > 4
|
||||
CHECK(bytes < 0x800000000ULL);
|
||||
#endif
|
||||
/* Must fit in a 32 bit count of HUNK_UNIT */
|
||||
CHECK(bytes / CHUNK_UNIT <= 0xffffffffU);
|
||||
|
||||
/* Round the start up, the end down */
|
||||
size_t addr = ((size_t)mem + CHUNK_UNIT - 1) & ~(CHUNK_UNIT - 1);
|
||||
size_t end = ((size_t)mem + bytes) & ~(CHUNK_UNIT - 1);
|
||||
uintptr_t addr = ROUND_UP(mem, CHUNK_UNIT);
|
||||
uintptr_t end = ROUND_DOWN((uint8_t *)mem + bytes, CHUNK_UNIT);
|
||||
size_t buf_sz = (end - addr) / CHUNK_UNIT;
|
||||
size_t hdr_chunks = chunksz(sizeof(struct z_heap));
|
||||
|
||||
CHECK(end > addr);
|
||||
CHECK(buf_sz > chunksz(sizeof(struct z_heap)));
|
||||
|
||||
struct z_heap *h = (struct z_heap *)addr;
|
||||
|
||||
heap->heap = (struct z_heap *)addr;
|
||||
h->buf = (uint64_t *)addr;
|
||||
h->buckets = (void *)(addr + CHUNK_UNIT * hdr_chunks);
|
||||
heap->heap = h;
|
||||
h->chunk0_hdr_area = 0;
|
||||
h->len = buf_sz;
|
||||
h->avail_buckets = 0;
|
||||
|
||||
size_t buckets_bytes = ((bucket_idx(h, buf_sz) + 1)
|
||||
* sizeof(struct z_heap_bucket));
|
||||
int nb_buckets = bucket_idx(h, buf_sz) + 1;
|
||||
size_t chunk0_size = chunksz(sizeof(struct z_heap) +
|
||||
nb_buckets * sizeof(struct z_heap_bucket));
|
||||
|
||||
h->chunk0 = hdr_chunks + chunksz(buckets_bytes);
|
||||
CHECK(chunk0_size < buf_sz);
|
||||
|
||||
for (int i = 0; i <= bucket_idx(heap->heap, heap->heap->len); i++) {
|
||||
heap->heap->buckets[i].list_size = 0;
|
||||
heap->heap->buckets[i].next = 0;
|
||||
for (int i = 0; i < nb_buckets; i++) {
|
||||
h->buckets[i].list_size = 0;
|
||||
h->buckets[i].next = 0;
|
||||
}
|
||||
|
||||
set_chunk_size(h, h->chunk0, buf_sz - h->chunk0);
|
||||
free_list_add(h, h->chunk0);
|
||||
set_chunk_size(h, 0, chunk0_size);
|
||||
set_chunk_used(h, 0, true);
|
||||
|
||||
set_chunk_size(h, chunk0_size, buf_sz - chunk0_size);
|
||||
set_left_chunk_size(h, chunk0_size, chunk0_size);
|
||||
free_list_add(h, chunk0_size);
|
||||
}
|
||||
|
|
|
@ -49,30 +49,38 @@ typedef size_t chunkid_t;
|
|||
|
||||
#define CHUNK_UNIT 8
|
||||
|
||||
enum chunk_fields { SIZE_AND_USED, LEFT_SIZE, FREE_PREV, FREE_NEXT };
|
||||
typedef struct { char bytes[CHUNK_UNIT]; } chunk_unit_t;
|
||||
|
||||
struct z_heap {
|
||||
uint64_t *buf;
|
||||
struct z_heap_bucket *buckets;
|
||||
uint32_t len;
|
||||
uint32_t chunk0;
|
||||
uint32_t avail_buckets;
|
||||
};
|
||||
enum chunk_fields { SIZE_AND_USED, LEFT_SIZE, FREE_PREV, FREE_NEXT };
|
||||
|
||||
struct z_heap_bucket {
|
||||
chunkid_t next;
|
||||
size_t list_size;
|
||||
};
|
||||
|
||||
struct z_heap {
|
||||
uint64_t chunk0_hdr_area; /* matches the largest header */
|
||||
uint32_t len;
|
||||
uint32_t avail_buckets;
|
||||
struct z_heap_bucket buckets[0];
|
||||
};
|
||||
|
||||
static inline bool big_heap(struct z_heap *h)
|
||||
{
|
||||
return sizeof(size_t) > 4 || h->len > 0x7fff;
|
||||
}
|
||||
|
||||
static inline chunk_unit_t *chunk_buf(struct z_heap *h)
|
||||
{
|
||||
/* the struct z_heap matches with the first chunk */
|
||||
return (chunk_unit_t *)h;
|
||||
}
|
||||
|
||||
static inline size_t chunk_field(struct z_heap *h, chunkid_t c,
|
||||
enum chunk_fields f)
|
||||
{
|
||||
void *cmem = &h->buf[c];
|
||||
chunk_unit_t *buf = chunk_buf(h);
|
||||
void *cmem = &buf[c];
|
||||
|
||||
if (big_heap(h)) {
|
||||
return ((uint32_t *)cmem)[f];
|
||||
|
@ -84,9 +92,10 @@ static inline size_t chunk_field(struct z_heap *h, chunkid_t c,
|
|||
static inline void chunk_set(struct z_heap *h, chunkid_t c,
|
||||
enum chunk_fields f, chunkid_t val)
|
||||
{
|
||||
CHECK(c >= h->chunk0 && c < h->len);
|
||||
CHECK(c < h->len);
|
||||
|
||||
void *cmem = &h->buf[c];
|
||||
chunk_unit_t *buf = chunk_buf(h);
|
||||
void *cmem = &buf[c];
|
||||
|
||||
if (big_heap(h)) {
|
||||
CHECK(val == (uint32_t)val);
|
||||
|
@ -109,7 +118,8 @@ static inline size_t chunk_size(struct z_heap *h, chunkid_t c)
|
|||
|
||||
static inline void set_chunk_used(struct z_heap *h, chunkid_t c, bool used)
|
||||
{
|
||||
void *cmem = &h->buf[c];
|
||||
chunk_unit_t *buf = chunk_buf(h);
|
||||
void *cmem = &buf[c];
|
||||
|
||||
if (big_heap(h)) {
|
||||
if (used) {
|
||||
|
|
Loading…
Reference in a new issue