syscall: rename z_object_find -> k_object_find

Rename internal API to not use z_/Z_.

Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
Anas Nashif 2023-09-27 10:49:28 +00:00 committed by Carles Cufí
parent 27d74e95c9
commit c25d0804f0
16 changed files with 34 additions and 34 deletions

View file

@ -102,7 +102,7 @@ void z_dump_object_error(int retval, const void *obj,
* @return Kernel object's metadata, or NULL if the parameter wasn't the
* memory address of a kernel object
*/
struct k_object *z_object_find(const void *obj);
struct k_object *k_object_find(const void *obj);
typedef void (*_wordlist_cb_func_t)(struct k_object *ko, void *context);
@ -448,7 +448,7 @@ static inline int k_object_validation_check(struct k_object *ko,
#define K_SYSCALL_IS_OBJ(ptr, type, init) \
K_SYSCALL_VERIFY_MSG(k_object_validation_check( \
z_object_find((const void *)ptr), \
k_object_find((const void *)ptr), \
(const void *)ptr, \
type, init) == 0, "access denied")

View file

@ -18,7 +18,7 @@
PROVIDE(z_object_gperf_find = .);
PROVIDE(z_object_gperf_wordlist_foreach = .);
#else
PROVIDE(z_object_find = .);
PROVIDE(k_object_find = .);
PROVIDE(k_object_wordlist_foreach = .);
#endif
#endif

View file

@ -50,7 +50,7 @@ union k_object_data {
};
/* Table generated by gperf, these objects are retrieved via
* z_object_find() */
* k_object_find() */
struct k_object {
void *name;
uint8_t perms[CONFIG_MAX_THREAD_BYTES];

View file

@ -152,7 +152,7 @@ int z_impl_k_thread_stack_free(k_thread_stack_t *stack)
if (IS_ENABLED(CONFIG_DYNAMIC_THREAD_ALLOC)) {
#ifdef CONFIG_USERSPACE
if (z_object_find(stack)) {
if (k_object_find(stack)) {
k_object_free(stack);
} else {
k_free(stack);

View file

@ -16,7 +16,7 @@ static struct z_futex_data *k_futex_find_data(struct k_futex *futex)
{
struct k_object *obj;
obj = z_object_find(futex);
obj = k_object_find(futex);
if (obj == NULL || obj->type != K_OBJ_FUTEX) {
return NULL;
}

View file

@ -1878,7 +1878,7 @@ int z_impl_k_thread_join(struct k_thread *thread, k_timeout_t timeout)
*/
static bool thread_obj_validate(struct k_thread *thread)
{
struct k_object *ko = z_object_find(thread);
struct k_object *ko = k_object_find(thread);
int ret = z_object_validate(ko, K_OBJ_THREAD, _OBJ_INIT_TRUE);
switch (ret) {

View file

@ -360,7 +360,7 @@ static inline int z_vrfy_k_thread_name_copy(k_tid_t thread,
{
#ifdef CONFIG_THREAD_NAME
size_t len;
struct k_object *ko = z_object_find(thread);
struct k_object *ko = k_object_find(thread);
/* Special case: we allow reading the names of initialized threads
* even if we don't have permission on them
@ -715,7 +715,7 @@ k_tid_t z_impl_k_thread_create(struct k_thread *new_thread,
#ifdef CONFIG_USERSPACE
bool z_stack_is_user_capable(k_thread_stack_t *stack)
{
return z_object_find(stack) != NULL;
return k_object_find(stack) != NULL;
}
k_tid_t z_vrfy_k_thread_create(struct k_thread *new_thread,
@ -733,7 +733,7 @@ k_tid_t z_vrfy_k_thread_create(struct k_thread *new_thread,
/* No need to check z_stack_is_user_capable(), it won't be in the
* object table if it isn't
*/
stack_object = z_object_find(stack);
stack_object = k_object_find(stack);
Z_OOPS(K_SYSCALL_VERIFY_MSG(k_object_validation_check(stack_object, stack,
K_OBJ_THREAD_STACK_ELEMENT,
_OBJ_INIT_FALSE) == 0,

View file

@ -119,7 +119,7 @@ struct perm_ctx {
*/
uint8_t *z_priv_stack_find(k_thread_stack_t *stack)
{
struct k_object *obj = z_object_find(stack);
struct k_object *obj = k_object_find(stack);
__ASSERT(obj != NULL, "stack object not found");
__ASSERT(obj->type == K_OBJ_THREAD_STACK_ELEMENT,
@ -475,7 +475,7 @@ void k_object_free(void *obj)
}
}
struct k_object *z_object_find(const void *obj)
struct k_object *k_object_find(const void *obj)
{
struct k_object *ret;
@ -516,7 +516,7 @@ static unsigned int thread_index_get(struct k_thread *thread)
{
struct k_object *ko;
ko = z_object_find(thread);
ko = k_object_find(thread);
if (ko == NULL) {
return -1;
@ -691,7 +691,7 @@ void z_dump_object_error(int retval, const void *obj, struct k_object *ko,
void z_impl_k_object_access_grant(const void *object, struct k_thread *thread)
{
struct k_object *ko = z_object_find(object);
struct k_object *ko = k_object_find(object);
if (ko != NULL) {
k_thread_perms_set(ko, thread);
@ -700,7 +700,7 @@ void z_impl_k_object_access_grant(const void *object, struct k_thread *thread)
void k_object_access_revoke(const void *object, struct k_thread *thread)
{
struct k_object *ko = z_object_find(object);
struct k_object *ko = k_object_find(object);
if (ko != NULL) {
k_thread_perms_clear(ko, thread);
@ -714,7 +714,7 @@ void z_impl_k_object_release(const void *object)
void k_object_access_all_grant(const void *object)
{
struct k_object *ko = z_object_find(object);
struct k_object *ko = k_object_find(object);
if (ko != NULL) {
ko->flags |= K_OBJ_FLAG_PUBLIC;
@ -766,7 +766,7 @@ void k_object_init(const void *obj)
* finalizes it
*/
ko = z_object_find(obj);
ko = k_object_find(obj);
if (ko == NULL) {
/* Supervisor threads can ignore rules about kernel objects
* and may declare them on stacks, etc. Such objects will never
@ -781,7 +781,7 @@ void k_object_init(const void *obj)
void k_object_recycle(const void *obj)
{
struct k_object *ko = z_object_find(obj);
struct k_object *ko = k_object_find(obj);
if (ko != NULL) {
(void)memset(ko->perms, 0, sizeof(ko->perms));
@ -795,7 +795,7 @@ void k_object_uninit(const void *obj)
struct k_object *ko;
/* See comments in k_object_init() */
ko = z_object_find(obj);
ko = k_object_find(obj);
if (ko == NULL) {
return;
}

View file

@ -16,7 +16,7 @@ static struct k_object *validate_kernel_object(const void *obj,
struct k_object *ko;
int ret;
ko = z_object_find(obj);
ko = k_object_find(obj);
/* This can be any kernel object and it doesn't have to be
* initialized
@ -50,7 +50,7 @@ bool k_object_is_valid(const void *obj, enum k_objects otype)
* syscall_dispatch.c declares weak handlers results in build errors if these
* are located in userspace.c. Just put in a separate file.
*
* To avoid double z_object_find() lookups, we don't call the implementation
* To avoid double k_object_find() lookups, we don't call the implementation
* function, but call a level deeper.
*/
static inline void z_vrfy_k_object_access_grant(const void *object,

View file

@ -13,7 +13,7 @@ static struct k_mutex *get_k_mutex(struct sys_mutex *mutex)
{
struct k_object *obj;
obj = z_object_find(mutex);
obj = k_object_find(mutex);
if (obj == NULL || obj->type != K_OBJ_SYS_MUTEX) {
return NULL;
}

View file

@ -752,7 +752,7 @@ void z_object_gperf_wordlist_foreach(_wordlist_cb_func_t func, void *context)
}
#ifndef CONFIG_DYNAMIC_OBJECTS
struct k_object *z_object_find(const void *obj)
struct k_object *k_object_find(const void *obj)
ALIAS_OF(z_object_gperf_find);
void k_object_wordlist_foreach(_wordlist_cb_func_t func, void *context)

View file

@ -83,7 +83,7 @@ def process_line(line, fp):
return
# Set the lookup function to static inline so it gets rolled into
# z_object_find(), nothing else will use it
# k_object_find(), nothing else will use it
if re.search(args.pattern + " [*]$", line):
fp.write("static inline " + line)
return

View file

@ -481,7 +481,7 @@ ZTEST(mem_protect_kobj, test_thread_has_residual_permissions)
* @ingroup kernel_memprotect_tests
*
* @see k_object_access_grant(), k_object_access_revoke(),
* z_object_find()
* k_object_find()
*/
ZTEST(mem_protect_kobj, test_kobject_access_grant_to_invalid_thread)
{
@ -1069,12 +1069,12 @@ ZTEST(mem_protect_kobj, test_mark_thread_exit_uninitialized)
k_thread_join(&child_thread, K_FOREVER);
/* check thread is uninitialized after its exit */
ko = z_object_find(&child_thread);
ko = k_object_find(&child_thread);
ret = z_object_validate(ko, K_OBJ_ANY, _OBJ_INIT_FALSE);
zassert_equal(ret, _OBJ_INIT_FALSE);
/* check stack is uninitialized after thread exit */
ko = z_object_find(child_stack);
ko = k_object_find(child_stack);
ret = z_object_validate(ko, K_OBJ_ANY, _OBJ_INIT_FALSE);
zassert_equal(ret, _OBJ_INIT_FALSE);
}

View file

@ -34,9 +34,9 @@ static int test_object(struct k_sem *sem, int retval)
/* Expected to fail; bypass k_object_validation_check() so we don't
* fill the logs with spam
*/
ret = z_object_validate(z_object_find(sem), K_OBJ_SEM, 0);
ret = z_object_validate(k_object_find(sem), K_OBJ_SEM, 0);
} else {
ret = k_object_validation_check(z_object_find(sem), sem,
ret = k_object_validation_check(k_object_find(sem), sem,
K_OBJ_SEM, 0);
}
@ -179,7 +179,7 @@ ZTEST(object_validation, test_no_ref_dyn_kobj_release_mem)
k_object_access_revoke(test_dyn_mutex, thread);
/* check object was released, when no threads have access to it */
ret = z_object_validate(z_object_find(test_dyn_mutex), K_OBJ_MUTEX, 0);
ret = z_object_validate(k_object_find(test_dyn_mutex), K_OBJ_MUTEX, 0);
zassert_true(ret == -EBADF, "Dynamic kernel object not released");
}

View file

@ -861,7 +861,7 @@ static struct k_sem recycle_sem;
* @details Test recycle valid/invalid kernel object, see if
* perms_count changes as expected.
*
* @see k_object_recycle(), z_object_find()
* @see k_object_recycle(), k_object_find()
*
* @ingroup kernel_memprotect_tests
*/
@ -874,12 +874,12 @@ ZTEST(userspace, test_object_recycle)
/* Validate recycle invalid objects, after recycling this invalid
* object, perms_count should finally still be 1.
*/
ko = z_object_find(&dummy);
ko = k_object_find(&dummy);
zassert_true(ko == NULL, "not an invalid object");
k_object_recycle(&dummy);
ko = z_object_find(&recycle_sem);
ko = k_object_find(&recycle_sem);
(void)memset(ko->perms, 0xFF, sizeof(ko->perms));
k_object_recycle(&recycle_sem);

View file

@ -335,7 +335,7 @@ void scenario_entry(void *stack_obj, size_t obj_size, size_t reported_size,
#ifdef CONFIG_USERSPACE
struct k_object *zo;
zo = z_object_find(stack_obj);
zo = k_object_find(stack_obj);
if (zo != NULL) {
is_user = true;
#ifdef CONFIG_GEN_PRIV_STACKS