sys_heap: remove need for last_chunk()

We already have chunk #0 containing our struct z_heap and marked as
used. We can add a partial chunk at the very end that is also marked
as used. By doing so there is no longer a need for checking heap
boundaries at run time when merging/splitting chunks meaning fewer
conditionals in the code's hot path.

Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
This commit is contained in:
Nicolas Pitre 2019-09-26 01:59:35 -04:00 committed by Carles Cufí
parent 6d827fa080
commit d1125d21d4
4 changed files with 40 additions and 23 deletions

View file

@ -36,17 +36,17 @@ struct k_mem_pool {
* objects defined, and include extra so there's enough metadata space
* available for the maximum number of minimum-sized objects to be
* stored: 8 bytes for each desired chunk header, and a 24 word block
* to reserve room for a "typical" set of bucket list heads (this size
* was picked more to conform with existing test expectations than any
* rigorous theory -- we have tests that rely on being able to
* allocate the blocks promised and ones that make assumptions about
* to reserve room for a "typical" set of bucket list heads and one heap
* footer(this size was picked more to conform with existing test
* expectations than any rigorous theory -- we have tests that rely on being
* able to allocate the blocks promised and ones that make assumptions about
* when memory will run out).
*/
#define Z_MEM_POOL_DEFINE(name, minsz, maxsz, nmax, align) \
K_HEAP_DEFINE(poolheap_##name, \
((maxsz) * (nmax)) \
+ 8 * ((maxsz) * (nmax) / (minsz)) \
+ 24 * sizeof(void *)); \
+ 25 * sizeof(void *)); \
struct k_mem_pool name = { \
.heap = &poolheap_##name \
}

View file

@ -34,7 +34,8 @@ static bool valid_chunk(struct z_heap *h, chunkid_t c)
return (chunk_size(h, c) > 0
&& (c + chunk_size(h, c) <= h->len)
&& in_bounds(h, c)
&& (!left_chunk(h, c) || in_bounds(h, left_chunk(h, c)))
&& (right_chunk(h, left_chunk(h, c)) == c)
&& (left_chunk(h, right_chunk(h, c)) == c)
&& (chunk_used(h, c) || in_bounds(h, prev_free_chunk(h, c)))
&& (chunk_used(h, c) || in_bounds(h, next_free_chunk(h, c))));
}

View file

@ -70,11 +70,6 @@ static void free_list_add(struct z_heap *h, chunkid_t c)
CHECK(h->avail_buckets & (1 << bucket_idx(h, chunk_size(h, c))));
}
static ALWAYS_INLINE bool last_chunk(struct z_heap *h, chunkid_t c)
{
return (c + chunk_size(h, c)) == h->len;
}
/* Allocates (fit check has already been perfomred) from the next
* chunk at the specified bucket level
*/
@ -99,9 +94,7 @@ static void *split_alloc(struct z_heap *h, int bidx, size_t sz)
set_chunk_size(h, c, sz);
set_chunk_size(h, c2, rem);
set_left_chunk_size(h, c2, sz);
if (!last_chunk(h, c2)) {
set_left_chunk_size(h, c3, rem);
}
set_left_chunk_size(h, c3, rem);
free_list_add(h, c2);
}
@ -121,15 +114,13 @@ void sys_heap_free(struct sys_heap *heap, void *mem)
- (uint8_t *)chunk_buf(h)) / CHUNK_UNIT;
/* Merge with right chunk? We can just absorb it. */
if (!last_chunk(h, c) && !chunk_used(h, right_chunk(h, c))) {
if (!chunk_used(h, right_chunk(h, c))) {
chunkid_t rc = right_chunk(h, c);
size_t newsz = chunk_size(h, c) + chunk_size(h, rc);
free_list_remove(h, bucket_idx(h, chunk_size(h, rc)), rc);
set_chunk_size(h, c, newsz);
if (!last_chunk(h, c)) {
set_left_chunk_size(h, right_chunk(h, c), newsz);
}
set_left_chunk_size(h, right_chunk(h, c), newsz);
}
/* Merge with left chunk? It absorbs us. */
@ -141,9 +132,7 @@ void sys_heap_free(struct sys_heap *heap, void *mem)
free_list_remove(h, bucket_idx(h, chunk_size(h, lc)), lc);
set_chunk_size(h, lc, merged_sz);
if (!last_chunk(h, lc)) {
set_left_chunk_size(h, rc, merged_sz);
}
set_left_chunk_size(h, rc, merged_sz);
c = lc;
}
@ -206,6 +195,10 @@ void sys_heap_init(struct sys_heap *heap, void *mem, size_t bytes)
/* Must fit in a 32 bit count of HUNK_UNIT */
CHECK(bytes / CHUNK_UNIT <= 0xffffffffU);
/* Reserve the final marker chunk's header */
CHECK(bytes > heap_footer_bytes(bytes));
bytes -= heap_footer_bytes(bytes);
/* Round the start up, the end down */
uintptr_t addr = ROUND_UP(mem, CHUNK_UNIT);
uintptr_t end = ROUND_DOWN((uint8_t *)mem + bytes, CHUNK_UNIT);
@ -231,10 +224,18 @@ void sys_heap_init(struct sys_heap *heap, void *mem, size_t bytes)
h->buckets[i].next = 0;
}
/* chunk containing our struct z_heap */
set_chunk_size(h, 0, chunk0_size);
set_chunk_used(h, 0, true);
/* chunk containing the free heap */
set_chunk_size(h, chunk0_size, buf_sz - chunk0_size);
set_left_chunk_size(h, chunk0_size, chunk0_size);
/* the end marker chunk */
set_chunk_size(h, buf_sz, 0);
set_left_chunk_size(h, buf_sz, buf_sz - chunk0_size);
set_chunk_used(h, buf_sz, true);
free_list_add(h, chunk0_size);
}

View file

@ -65,9 +65,19 @@ struct z_heap {
struct z_heap_bucket buckets[0];
};
static inline bool big_heap_chunks(size_t chunks)
{
return sizeof(void *) > 4 || chunks > 0x7fff;
}
static inline bool big_heap_bytes(size_t bytes)
{
return big_heap_chunks(bytes / CHUNK_UNIT);
}
static inline bool big_heap(struct z_heap *h)
{
return sizeof(size_t) > 4 || h->len > 0x7fff;
return big_heap_chunks(h->len);
}
static inline chunk_unit_t *chunk_buf(struct z_heap *h)
@ -92,7 +102,7 @@ static inline size_t chunk_field(struct z_heap *h, chunkid_t c,
static inline void chunk_set(struct z_heap *h, chunkid_t c,
enum chunk_fields f, chunkid_t val)
{
CHECK(c < h->len);
CHECK(c <= h->len);
chunk_unit_t *buf = chunk_buf(h);
void *cmem = &buf[c];
@ -189,6 +199,11 @@ static inline size_t chunk_header_bytes(struct z_heap *h)
return big_heap(h) ? 8 : 4;
}
static inline size_t heap_footer_bytes(size_t size)
{
return big_heap_bytes(size) ? 8 : 4;
}
static inline size_t chunksz(size_t bytes)
{
return (bytes + CHUNK_UNIT - 1) / CHUNK_UNIT;