kernel: userspace: aligned memory allocation for dynamic objects

This allows allocating dynamic kernel objects with memory alignment
requirements. The first candidate is for thread objects where,
on some architectures, it must be aligned for saving/restoring
registers.

Signed-off-by: Daniel Leung <daniel.leung@intel.com>
This commit is contained in:
Daniel Leung 2020-12-15 13:50:48 -08:00 committed by Andrew Boie
parent 0c9f9691c4
commit fe477ea6d3
2 changed files with 92 additions and 13 deletions

View file

@ -267,12 +267,37 @@ __syscall void *k_object_alloc(enum k_objects otype);
* and may be freed later by passing the actual object pointer (found
* in the returned z_object's 'name' member) to k_object_free().
*
* @param align Required memory alignment for the allocated object
* @param size Size of the allocated object
* @return NULL on insufficient memory
* @return A pointer to the associated z_object that is installed in the
* kernel object tables
*/
struct z_object *z_dynamic_object_create(size_t size);
struct z_object *z_dynamic_object_aligned_create(size_t align, size_t size);
/**
* Allocate memory and install as a generic kernel object
*
* This is a low-level function to allocate some memory, and register that
* allocated memory in the kernel object lookup tables with type K_OBJ_ANY.
* Initialization state and thread permissions will be cleared. The
* returned z_object's data value will be uninitialized.
*
* Most users will want to use k_object_alloc() instead.
*
* Memory allocated will be drawn from the calling thread's reasource pool
* and may be freed later by passing the actual object pointer (found
* in the returned z_object's 'name' member) to k_object_free().
*
* @param size Size of the allocated object
* @return NULL on insufficient memory
* @return A pointer to the associated z_object that is installed in the
* kernel object tables
*/
static inline struct z_object *z_dynamic_object_create(size_t size)
{
return z_dynamic_object_aligned_create(0, size);
}
/**
* Free a kernel object previously allocated with k_object_alloc()
@ -293,6 +318,15 @@ static inline void *z_impl_k_object_alloc(enum k_objects otype)
return NULL;
}
static inline struct z_object *z_dynamic_object_aligned_create(size_t align,
size_t size)
{
ARG_UNUSED(align);
ARG_UNUSED(size);
return NULL;
}
static inline struct z_object *z_dynamic_object_create(size_t size)
{
ARG_UNUSED(size);

View file

@ -110,11 +110,35 @@ uint8_t *z_priv_stack_find(k_thread_stack_t *stack)
#endif /* CONFIG_GEN_PRIV_STACKS */
#ifdef CONFIG_DYNAMIC_OBJECTS
/*
* Note that dyn_obj->data is where the kernel object resides
* so it is the one that actually needs to be aligned.
* Due to the need to get the the fields inside struct dyn_obj
* from kernel object pointers (i.e. from data[]), the offset
* from data[] needs to be fixed at build time. Therefore,
* data[] is declared with __aligned(), such that when dyn_obj
* is allocated with alignment, data[] is also aligned.
* Due to this requirement, data[] needs to be aligned with
* the maximum alignment needed for all kernel objects
* (hence the following DYN_OBJ_DATA_ALIGN).
*/
#ifdef ARCH_DYMANIC_OBJ_K_THREAD_ALIGNMENT
#define DYN_OBJ_DATA_ALIGN_K_THREAD (ARCH_DYMANIC_OBJ_K_THREAD_ALIGNMENT)
#else
#define DYN_OBJ_DATA_ALIGN_K_THREAD (sizeof(void *))
#endif
#define DYN_OBJ_DATA_ALIGN \
MAX(DYN_OBJ_DATA_ALIGN_K_THREAD, (sizeof(void *)))
struct dyn_obj {
struct z_object kobj;
sys_dnode_t obj_list;
struct rbnode node; /* must be immediately before data member */
uint8_t data[]; /* The object itself */
/* The object itself */
uint8_t data[] __aligned(DYN_OBJ_DATA_ALIGN_K_THREAD);
};
extern struct z_object *z_object_gperf_find(const void *obj);
@ -156,6 +180,26 @@ static size_t obj_size_get(enum k_objects otype)
return ret;
}
static size_t obj_align_get(enum k_objects otype)
{
size_t ret;
switch (otype) {
case K_OBJ_THREAD:
#ifdef ARCH_DYMANIC_OBJ_K_THREAD_ALIGNMENT
ret = ARCH_DYMANIC_OBJ_K_THREAD_ALIGNMENT;
#else
ret = sizeof(void *);
#endif
break;
default:
ret = sizeof(void *);
break;
}
return ret;
}
static bool node_lessthan(struct rbnode *a, struct rbnode *b)
{
return a < b;
@ -166,6 +210,13 @@ static inline struct dyn_obj *node_to_dyn_obj(struct rbnode *node)
return CONTAINER_OF(node, struct dyn_obj, node);
}
static inline struct rbnode *dyn_obj_to_node(void *obj)
{
struct dyn_obj *dobj = CONTAINER_OF(obj, struct dyn_obj, data);
return &dobj->node;
}
static struct dyn_obj *dyn_object_find(void *obj)
{
struct rbnode *node;
@ -176,7 +227,7 @@ static struct dyn_obj *dyn_object_find(void *obj)
* so just a little arithmetic is necessary to locate the
* corresponding struct rbnode
*/
node = (struct rbnode *)((char *)obj - sizeof(struct rbnode));
node = dyn_obj_to_node(obj);
k_spinlock_key_t key = k_spin_lock(&lists_lock);
if (rb_contains(&obj_rb_tree, node)) {
@ -252,11 +303,11 @@ static void thread_idx_free(uintptr_t tidx)
sys_bitfield_set_bit((mem_addr_t)_thread_idx_map, tidx);
}
struct z_object *z_dynamic_object_create(size_t size)
struct z_object *z_dynamic_object_aligned_create(size_t align, size_t size)
{
struct dyn_obj *dyn;
dyn = z_thread_malloc(sizeof(*dyn) + size);
dyn = z_thread_aligned_alloc(align, sizeof(*dyn) + size);
if (dyn == NULL) {
LOG_ERR("could not allocate kernel object, out of memory");
return NULL;
@ -288,13 +339,6 @@ void *z_impl_k_object_alloc(enum k_objects otype)
switch (otype) {
case K_OBJ_THREAD:
/* aligned allocator required for X86 and X86_64 */
if (IS_ENABLED(CONFIG_X86) || IS_ENABLED(CONFIG_X86_64)) {
LOG_ERR("object type '%s' forbidden on x86 and x86_64",
otype_to_str(otype));
return NULL;
}
if (!thread_idx_alloc(&tidx)) {
LOG_ERR("out of free thread indexes");
return NULL;
@ -313,7 +357,8 @@ void *z_impl_k_object_alloc(enum k_objects otype)
break;
}
zo = z_dynamic_object_create(obj_size_get(otype));
zo = z_dynamic_object_aligned_create(obj_align_get(otype),
obj_size_get(otype));
if (zo == NULL) {
return NULL;
}