Revert "posix: use sys_sem instead of k_spinlock for pool synch"

This reverts commit 6e66aa1f7c.

And fixes an indentation issue in the original code,
which triggers checkpatch.

This PR:
https://github.com/zephyrproject-rtos/zephyr/pull/71718
introduced a regression breaking CI:
https://github.com/zephyrproject-rtos/zephyr/issues/71814
Let's just revert it until a better fix is done.

Signed-off-by: Alberto Escolar Piedras <alberto.escolar.piedras@nordicsemi.no>
This commit is contained in:
Alberto Escolar Piedras 2024-04-23 16:06:38 +02:00 committed by Anas Nashif
parent c9faea2c2b
commit 84e7107b32
4 changed files with 161 additions and 160 deletions

View file

@ -11,7 +11,6 @@
#include <zephyr/posix/pthread.h>
#include <zephyr/sys/bitarray.h>
#include <zephyr/sys/__assert.h>
#include <zephyr/sys/sem.h>
struct pthread_key_data {
sys_snode_t node;
@ -20,7 +19,7 @@ struct pthread_key_data {
LOG_MODULE_REGISTER(pthread_key, CONFIG_PTHREAD_KEY_LOG_LEVEL);
static SYS_SEM_DEFINE(pthread_key_lock, 1, 1);
static struct k_spinlock pthread_key_lock;
/* This is non-standard (i.e. an implementation detail) */
#define PTHREAD_KEY_INITIALIZER (-1)
@ -129,40 +128,42 @@ int pthread_key_create(pthread_key_t *key,
int pthread_key_delete(pthread_key_t key)
{
size_t bit;
int ret = 0;
pthread_key_obj *key_obj = NULL;
__unused int ret;
pthread_key_obj *key_obj;
struct pthread_key_data *key_data;
sys_snode_t *node_l, *next_node_l;
k_spinlock_key_t key_key;
SYS_SEM_LOCK(&pthread_key_lock) {
key_obj = get_posix_key(key);
if (key_obj == NULL) {
ret = EINVAL;
SYS_SEM_LOCK_BREAK;
}
key_key = k_spin_lock(&pthread_key_lock);
/* Delete thread-specific elements associated with the key */
SYS_SLIST_FOR_EACH_NODE_SAFE(&(key_obj->key_data_l), node_l, next_node_l) {
/* Remove the object from the list key_data_l */
key_data = (struct pthread_key_data *)sys_slist_get(&(key_obj->key_data_l));
/* Deallocate the object's memory */
k_free((void *)key_data);
LOG_DBG("Freed key data %p for key %x in thread %x", key_data, key,
pthread_self());
}
bit = posix_key_to_offset(key_obj);
ret = sys_bitarray_free(&posix_key_bitarray, 1, bit);
__ASSERT_NO_MSG(ret == 0);
key_obj = get_posix_key(key);
if (key_obj == NULL) {
k_spin_unlock(&pthread_key_lock, key_key);
return EINVAL;
}
if (ret == 0) {
LOG_DBG("Deleted key %p (%x)", key_obj, key);
/* Delete thread-specific elements associated with the key */
SYS_SLIST_FOR_EACH_NODE_SAFE(&(key_obj->key_data_l),
node_l, next_node_l) {
/* Remove the object from the list key_data_l */
key_data = (struct pthread_key_data *)
sys_slist_get(&(key_obj->key_data_l));
/* Deallocate the object's memory */
k_free((void *)key_data);
LOG_DBG("Freed key data %p for key %x in thread %x", key_data, key, pthread_self());
}
return ret;
bit = posix_key_to_offset(key_obj);
ret = sys_bitarray_free(&posix_key_bitarray, 1, bit);
__ASSERT_NO_MSG(ret == 0);
k_spin_unlock(&pthread_key_lock, key_key);
LOG_DBG("Deleted key %p (%x)", key_obj, key);
return 0;
}
/**
@ -172,10 +173,12 @@ int pthread_key_delete(pthread_key_t key)
*/
int pthread_setspecific(pthread_key_t key, const void *value)
{
pthread_key_obj *key_obj = NULL;
pthread_key_obj *key_obj;
struct posix_thread *thread;
struct pthread_key_data *key_data;
sys_snode_t *node_l = NULL;
pthread_thread_data *thread_spec_data;
k_spinlock_key_t key_key;
sys_snode_t *node_l;
int retval = 0;
thread = to_posix_thread(pthread_self());
@ -187,37 +190,37 @@ int pthread_setspecific(pthread_key_t key, const void *value)
* If the key is already in the list, re-assign its value.
* Else add the key to the thread's list.
*/
SYS_SEM_LOCK(&pthread_key_lock) {
key_obj = get_posix_key(key);
if (key_obj == NULL) {
retval = EINVAL;
SYS_SEM_LOCK_BREAK;
key_key = k_spin_lock(&pthread_key_lock);
key_obj = get_posix_key(key);
if (key_obj == NULL) {
k_spin_unlock(&pthread_key_lock, key_key);
return EINVAL;
}
SYS_SLIST_FOR_EACH_NODE(&(thread->key_list), node_l) {
thread_spec_data = (pthread_thread_data *)node_l;
if (thread_spec_data->key == key_obj) {
/* Key is already present so
* associate thread specific data
*/
thread_spec_data->spec_data = (void *)value;
LOG_DBG("Paired key %x to value %p for thread %x", key, value,
pthread_self());
goto out;
}
}
SYS_SLIST_FOR_EACH_NODE(&(thread->key_list), node_l) {
pthread_thread_data *thread_spec_data = (pthread_thread_data *)node_l;
if (thread_spec_data->key == key_obj) {
/* Key is already present so associate thread specific data */
thread_spec_data->spec_data = (void *)value;
LOG_DBG("Paired key %x to value %p for thread %x", key, value,
pthread_self());
break;
}
}
if (node_l != NULL) {
/* Key is already present, so we are done */
SYS_SEM_LOCK_BREAK;
}
/* Key and data need to be added */
if (node_l == NULL) {
key_data = k_malloc(sizeof(struct pthread_key_data));
if (key_data == NULL) {
LOG_DBG("Failed to allocate key data for key %x", key);
retval = ENOMEM;
SYS_SEM_LOCK_BREAK;
goto out;
}
LOG_DBG("Allocated key data %p for key %x in thread %x", key_data, key,
@ -236,6 +239,9 @@ int pthread_setspecific(pthread_key_t key, const void *value)
LOG_DBG("Paired key %x to value %p for thread %x", key, value, pthread_self());
}
out:
k_spin_unlock(&pthread_key_lock, key_key);
return retval;
}
@ -251,30 +257,33 @@ void *pthread_getspecific(pthread_key_t key)
pthread_thread_data *thread_spec_data;
void *value = NULL;
sys_snode_t *node_l;
k_spinlock_key_t key_key;
thread = to_posix_thread(pthread_self());
if (thread == NULL) {
return NULL;
}
SYS_SEM_LOCK(&pthread_key_lock) {
key_obj = get_posix_key(key);
if (key_obj == NULL) {
value = NULL;
SYS_SEM_LOCK_BREAK;
}
key_key = k_spin_lock(&pthread_key_lock);
/* Traverse the list of keys set by the thread, looking for key */
key_obj = get_posix_key(key);
if (key_obj == NULL) {
k_spin_unlock(&pthread_key_lock, key_key);
return NULL;
}
SYS_SLIST_FOR_EACH_NODE(&(thread->key_list), node_l) {
thread_spec_data = (pthread_thread_data *)node_l;
if (thread_spec_data->key == key_obj) {
/* Key is present, so get the set thread data */
value = thread_spec_data->spec_data;
break;
}
/* Traverse the list of keys set by the thread, looking for key */
SYS_SLIST_FOR_EACH_NODE(&(thread->key_list), node_l) {
thread_spec_data = (pthread_thread_data *)node_l;
if (thread_spec_data->key == key_obj) {
/* Key is present, so get the set thread data */
value = thread_spec_data->spec_data;
break;
}
}
k_spin_unlock(&pthread_key_lock, key_key);
return value;
}

View file

@ -12,11 +12,10 @@
#include <zephyr/logging/log.h>
#include <zephyr/posix/pthread.h>
#include <zephyr/sys/bitarray.h>
#include <zephyr/sys/sem.h>
LOG_MODULE_REGISTER(pthread_mutex, CONFIG_PTHREAD_MUTEX_LOG_LEVEL);
static SYS_SEM_DEFINE(lock, 1, 1);
static struct k_spinlock pthread_mutex_spinlock;
int64_t timespec_to_timeoutms(const struct timespec *abstime);
@ -107,41 +106,35 @@ struct k_mutex *to_posix_mutex(pthread_mutex_t *mu)
static int acquire_mutex(pthread_mutex_t *mu, k_timeout_t timeout)
{
int type;
size_t bit;
int ret = 0;
int type = -1;
size_t bit = -1;
size_t lock_count = -1;
struct k_mutex *m = NULL;
struct k_thread *owner = NULL;
struct k_mutex *m;
k_spinlock_key_t key;
SYS_SEM_LOCK(&lock) {
m = to_posix_mutex(mu);
if (m == NULL) {
ret = EINVAL;
SYS_SEM_LOCK_BREAK;
}
key = k_spin_lock(&pthread_mutex_spinlock);
LOG_DBG("Locking mutex %p with timeout %llx", m, timeout.ticks);
bit = posix_mutex_to_offset(m);
type = posix_mutex_type[bit];
owner = m->owner;
lock_count = m->lock_count;
m = to_posix_mutex(mu);
if (m == NULL) {
k_spin_unlock(&pthread_mutex_spinlock, key);
return EINVAL;
}
if (ret != 0) {
goto handle_error;
}
LOG_DBG("Locking mutex %p with timeout %llx", m, timeout.ticks);
if (owner == k_current_get()) {
bit = posix_mutex_to_offset(m);
type = posix_mutex_type[bit];
if (m->owner == k_current_get()) {
switch (type) {
case PTHREAD_MUTEX_NORMAL:
if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
k_spin_unlock(&pthread_mutex_spinlock, key);
LOG_DBG("Timeout locking mutex %p", m);
ret = EBUSY;
break;
return EBUSY;
}
/* On most POSIX systems, this usually results in an infinite loop */
k_spin_unlock(&pthread_mutex_spinlock, key);
LOG_DBG("Attempt to relock non-recursive mutex %p", m);
do {
(void)k_sleep(K_FOREVER);
@ -149,7 +142,7 @@ static int acquire_mutex(pthread_mutex_t *mu, k_timeout_t timeout)
CODE_UNREACHABLE;
break;
case PTHREAD_MUTEX_RECURSIVE:
if (lock_count >= MUTEX_MAX_REC_LOCK) {
if (m->lock_count >= MUTEX_MAX_REC_LOCK) {
LOG_DBG("Mutex %p locked recursively too many times", m);
ret = EAGAIN;
}
@ -164,6 +157,7 @@ static int acquire_mutex(pthread_mutex_t *mu, k_timeout_t timeout)
break;
}
}
k_spin_unlock(&pthread_mutex_spinlock, key);
if (ret == 0) {
ret = k_mutex_lock(m, timeout);
@ -177,7 +171,6 @@ static int acquire_mutex(pthread_mutex_t *mu, k_timeout_t timeout)
}
}
handle_error:
if (ret < 0) {
LOG_DBG("k_mutex_unlock() failed: %d", ret);
ret = -ret;

View file

@ -16,7 +16,6 @@
#include <zephyr/sys/atomic.h>
#include <zephyr/posix/pthread.h>
#include <zephyr/posix/unistd.h>
#include <zephyr/sys/sem.h>
#include <zephyr/sys/slist.h>
#include <zephyr/sys/util.h>
@ -88,7 +87,7 @@ static sys_dlist_t posix_thread_q[] = {
SYS_DLIST_STATIC_INIT(&posix_thread_q[POSIX_THREAD_DONE_Q]),
};
static struct posix_thread posix_thread_pool[CONFIG_MAX_PTHREAD_COUNT];
static SYS_SEM_DEFINE(pthread_pool_lock, 1, 1);
static struct k_spinlock pthread_pool_lock;
static int pthread_concurrency;
static inline void posix_thread_q_set(struct posix_thread *t, enum posix_thread_qid qid)
@ -220,7 +219,7 @@ void __z_pthread_cleanup_push(void *cleanup[3], void (*routine)(void *arg), void
struct posix_thread *t = NULL;
struct __pthread_cleanup *const c = (struct __pthread_cleanup *)cleanup;
SYS_SEM_LOCK(&pthread_pool_lock) {
K_SPINLOCK(&pthread_pool_lock) {
t = to_posix_thread(pthread_self());
BUILD_ASSERT(3 * sizeof(void *) == sizeof(*c));
__ASSERT_NO_MSG(t != NULL);
@ -237,7 +236,7 @@ void __z_pthread_cleanup_pop(int execute)
struct __pthread_cleanup *c = NULL;
struct posix_thread *t = NULL;
SYS_SEM_LOCK(&pthread_pool_lock) {
K_SPINLOCK(&pthread_pool_lock) {
t = to_posix_thread(pthread_self());
__ASSERT_NO_MSG(t != NULL);
node = sys_slist_get(&t->cleanup_list);
@ -477,6 +476,7 @@ static K_WORK_DELAYABLE_DEFINE(posix_thread_recycle_work, posix_thread_recycle_w
static void posix_thread_finalize(struct posix_thread *t, void *retval)
{
sys_snode_t *node_l;
k_spinlock_key_t key;
pthread_key_obj *key_obj;
pthread_thread_data *thread_spec_data;
@ -491,11 +491,11 @@ static void posix_thread_finalize(struct posix_thread *t, void *retval)
}
/* move thread from run_q to done_q */
SYS_SEM_LOCK(&pthread_pool_lock) {
sys_dlist_remove(&t->q_node);
posix_thread_q_set(t, POSIX_THREAD_DONE_Q);
t->retval = retval;
}
key = k_spin_lock(&pthread_pool_lock);
sys_dlist_remove(&t->q_node);
posix_thread_q_set(t, POSIX_THREAD_DONE_Q);
t->retval = retval;
k_spin_unlock(&pthread_pool_lock, key);
/* trigger recycle work */
(void)k_work_schedule(&posix_thread_recycle_work, K_MSEC(CONFIG_PTHREAD_RECYCLER_DELAY_MS));
@ -526,22 +526,22 @@ static void zephyr_thread_wrapper(void *arg1, void *arg2, void *arg3)
static void posix_thread_recycle(void)
{
k_spinlock_key_t key;
struct posix_thread *t;
struct posix_thread *safe_t;
sys_dlist_t recyclables = SYS_DLIST_STATIC_INIT(&recyclables);
SYS_SEM_LOCK(&pthread_pool_lock) {
SYS_DLIST_FOR_EACH_CONTAINER_SAFE(&posix_thread_q[POSIX_THREAD_DONE_Q], t, safe_t,
q_node) {
if (t->attr.detachstate == PTHREAD_CREATE_JOINABLE) {
/* thread has not been joined yet */
continue;
}
sys_dlist_remove(&t->q_node);
sys_dlist_append(&recyclables, &t->q_node);
key = k_spin_lock(&pthread_pool_lock);
SYS_DLIST_FOR_EACH_CONTAINER_SAFE(&posix_thread_q[POSIX_THREAD_DONE_Q], t, safe_t, q_node) {
if (t->attr.detachstate == PTHREAD_CREATE_JOINABLE) {
/* thread has not been joined yet */
continue;
}
sys_dlist_remove(&t->q_node);
sys_dlist_append(&recyclables, &t->q_node);
}
k_spin_unlock(&pthread_pool_lock, key);
if (sys_dlist_is_empty(&recyclables)) {
return;
@ -557,12 +557,12 @@ static void posix_thread_recycle(void)
}
}
SYS_SEM_LOCK(&pthread_pool_lock) {
while (!sys_dlist_is_empty(&recyclables)) {
t = CONTAINER_OF(sys_dlist_get(&recyclables), struct posix_thread, q_node);
posix_thread_q_set(t, POSIX_THREAD_READY_Q);
}
key = k_spin_lock(&pthread_pool_lock);
while (!sys_dlist_is_empty(&recyclables)) {
t = CONTAINER_OF(sys_dlist_get(&recyclables), struct posix_thread, q_node);
posix_thread_q_set(t, POSIX_THREAD_READY_Q);
}
k_spin_unlock(&pthread_pool_lock, key);
}
/**
@ -587,7 +587,7 @@ int pthread_create(pthread_t *th, const pthread_attr_t *_attr, void *(*threadrou
/* reclaim resources greedily */
posix_thread_recycle();
SYS_SEM_LOCK(&pthread_pool_lock) {
K_SPINLOCK(&pthread_pool_lock) {
if (!sys_dlist_is_empty(&posix_thread_q[POSIX_THREAD_READY_Q])) {
t = CONTAINER_OF(sys_dlist_get(&posix_thread_q[POSIX_THREAD_READY_Q]),
struct posix_thread, q_node);
@ -603,7 +603,7 @@ int pthread_create(pthread_t *th, const pthread_attr_t *_attr, void *(*threadrou
err = pthread_barrier_init(&barrier, NULL, 2);
if (err != 0) {
/* cannot allocate barrier. move thread back to ready_q */
SYS_SEM_LOCK(&pthread_pool_lock) {
K_SPINLOCK(&pthread_pool_lock) {
sys_dlist_remove(&t->q_node);
posix_thread_q_set(t, POSIX_THREAD_READY_Q);
}
@ -625,7 +625,7 @@ int pthread_create(pthread_t *th, const pthread_attr_t *_attr, void *(*threadrou
}
if (err != 0) {
/* cannot allocate pthread attributes (e.g. stack) */
SYS_SEM_LOCK(&pthread_pool_lock) {
K_SPINLOCK(&pthread_pool_lock) {
sys_dlist_remove(&t->q_node);
posix_thread_q_set(t, POSIX_THREAD_READY_Q);
}
@ -673,7 +673,7 @@ int pthread_getconcurrency(void)
{
int ret = 0;
SYS_SEM_LOCK(&pthread_pool_lock) {
K_SPINLOCK(&pthread_pool_lock) {
ret = pthread_concurrency;
}
@ -690,7 +690,7 @@ int pthread_setconcurrency(int new_level)
return EAGAIN;
}
SYS_SEM_LOCK(&pthread_pool_lock) {
K_SPINLOCK(&pthread_pool_lock) {
pthread_concurrency = new_level;
}
@ -705,8 +705,8 @@ int pthread_setconcurrency(int new_level)
int pthread_setcancelstate(int state, int *oldstate)
{
int ret = 0;
struct posix_thread *t;
bool cancel_pending = false;
struct posix_thread *t = NULL;
bool cancel_type = PTHREAD_CANCEL_ENABLE;
if (state != PTHREAD_CANCEL_ENABLE && state != PTHREAD_CANCEL_DISABLE) {
@ -714,11 +714,11 @@ int pthread_setcancelstate(int state, int *oldstate)
return EINVAL;
}
SYS_SEM_LOCK(&pthread_pool_lock) {
K_SPINLOCK(&pthread_pool_lock) {
t = to_posix_thread(pthread_self());
if (t == NULL) {
ret = EINVAL;
SYS_SEM_LOCK_BREAK;
K_SPINLOCK_BREAK;
}
if (oldstate != NULL) {
@ -753,11 +753,11 @@ int pthread_setcanceltype(int type, int *oldtype)
return EINVAL;
}
SYS_SEM_LOCK(&pthread_pool_lock) {
K_SPINLOCK(&pthread_pool_lock) {
t = to_posix_thread(pthread_self());
if (t == NULL) {
ret = EINVAL;
SYS_SEM_LOCK_BREAK;
K_SPINLOCK_BREAK;
}
if (oldtype != NULL) {
@ -776,16 +776,16 @@ int pthread_setcanceltype(int type, int *oldtype)
*/
void pthread_testcancel(void)
{
struct posix_thread *t;
bool cancel_pended = false;
struct posix_thread *t = NULL;
SYS_SEM_LOCK(&pthread_pool_lock) {
K_SPINLOCK(&pthread_pool_lock) {
t = to_posix_thread(pthread_self());
if (t == NULL) {
SYS_SEM_LOCK_BREAK;
K_SPINLOCK_BREAK;
}
if (t->attr.cancelstate != PTHREAD_CANCEL_ENABLE) {
SYS_SEM_LOCK_BREAK;
K_SPINLOCK_BREAK;
}
if (t->attr.cancelpending) {
cancel_pended = true;
@ -810,17 +810,17 @@ int pthread_cancel(pthread_t pthread)
bool cancel_type = PTHREAD_CANCEL_DEFERRED;
struct posix_thread *t = NULL;
SYS_SEM_LOCK(&pthread_pool_lock) {
K_SPINLOCK(&pthread_pool_lock) {
t = to_posix_thread(pthread);
if (t == NULL) {
ret = ESRCH;
SYS_SEM_LOCK_BREAK;
K_SPINLOCK_BREAK;
}
if (!__attr_is_initialized(&t->attr)) {
/* thread has already terminated */
ret = ESRCH;
SYS_SEM_LOCK_BREAK;
K_SPINLOCK_BREAK;
}
t->attr.cancelpending = true;
@ -852,11 +852,11 @@ int pthread_setschedparam(pthread_t pthread, int policy, const struct sched_para
return EINVAL;
}
SYS_SEM_LOCK(&pthread_pool_lock) {
K_SPINLOCK(&pthread_pool_lock) {
t = to_posix_thread(pthread);
if (t == NULL) {
ret = ESRCH;
SYS_SEM_LOCK_BREAK;
K_SPINLOCK_BREAK;
}
new_prio = posix_to_zephyr_priority(param->sched_priority, policy);
@ -892,11 +892,11 @@ int pthread_setschedprio(pthread_t thread, int prio)
return EINVAL;
}
SYS_SEM_LOCK(&pthread_pool_lock) {
K_SPINLOCK(&pthread_pool_lock) {
t = to_posix_thread(thread);
if (t == NULL) {
ret = ESRCH;
SYS_SEM_LOCK_BREAK;
K_SPINLOCK_BREAK;
}
new_prio = posix_to_zephyr_priority(prio, policy);
@ -965,16 +965,16 @@ int pthread_getschedparam(pthread_t pthread, int *policy, struct sched_param *pa
return EINVAL;
}
SYS_SEM_LOCK(&pthread_pool_lock) {
K_SPINLOCK(&pthread_pool_lock) {
t = to_posix_thread(pthread);
if (t == NULL) {
ret = ESRCH;
SYS_SEM_LOCK_BREAK;
K_SPINLOCK_BREAK;
}
if (!__attr_is_initialized(&t->attr)) {
ret = ESRCH;
SYS_SEM_LOCK_BREAK;
K_SPINLOCK_BREAK;
}
param->sched_priority =
@ -999,7 +999,7 @@ int pthread_once(pthread_once_t *once, void (*init_func)(void))
return EINVAL;
}
SYS_SEM_LOCK(&pthread_pool_lock) {
K_SPINLOCK(&pthread_pool_lock) {
if (!_once->flag) {
run_init_func = true;
_once->flag = true;
@ -1023,10 +1023,10 @@ void pthread_exit(void *retval)
{
struct posix_thread *self = NULL;
SYS_SEM_LOCK(&pthread_pool_lock) {
K_SPINLOCK(&pthread_pool_lock) {
self = to_posix_thread(pthread_self());
if (self == NULL) {
SYS_SEM_LOCK_BREAK;
K_SPINLOCK_BREAK;
}
/* Mark a thread as cancellable before exiting */
@ -1060,11 +1060,11 @@ int pthread_join(pthread_t pthread, void **status)
return EDEADLK;
}
SYS_SEM_LOCK(&pthread_pool_lock) {
K_SPINLOCK(&pthread_pool_lock) {
t = to_posix_thread(pthread);
if (t == NULL) {
ret = ESRCH;
SYS_SEM_LOCK_BREAK;
K_SPINLOCK_BREAK;
}
LOG_DBG("Pthread %p joining..", &t->thread);
@ -1072,12 +1072,12 @@ int pthread_join(pthread_t pthread, void **status)
if (t->attr.detachstate != PTHREAD_CREATE_JOINABLE) {
/* undefined behaviour */
ret = EINVAL;
SYS_SEM_LOCK_BREAK;
K_SPINLOCK_BREAK;
}
if (posix_thread_q_get(t) == POSIX_THREAD_READY_Q) {
ret = ESRCH;
SYS_SEM_LOCK_BREAK;
K_SPINLOCK_BREAK;
}
/*
@ -1124,18 +1124,18 @@ int pthread_detach(pthread_t pthread)
int ret = 0;
struct posix_thread *t;
SYS_SEM_LOCK(&pthread_pool_lock) {
K_SPINLOCK(&pthread_pool_lock) {
t = to_posix_thread(pthread);
if (t == NULL) {
ret = ESRCH;
SYS_SEM_LOCK_BREAK;
K_SPINLOCK_BREAK;
}
if (posix_thread_q_get(t) == POSIX_THREAD_READY_Q ||
t->attr.detachstate != PTHREAD_CREATE_JOINABLE) {
LOG_DBG("Pthread %p cannot be detached", &t->thread);
ret = EINVAL;
SYS_SEM_LOCK_BREAK;
K_SPINLOCK_BREAK;
}
t->attr.detachstate = PTHREAD_CREATE_DETACHED;
@ -1437,11 +1437,11 @@ int pthread_sigmask(int how, const sigset_t *ZRESTRICT set, sigset_t *ZRESTRICT
return EINVAL;
}
SYS_SEM_LOCK(&pthread_pool_lock) {
K_SPINLOCK(&pthread_pool_lock) {
t = to_posix_thread(pthread_self());
if (t == NULL) {
ret = ESRCH;
SYS_SEM_LOCK_BREAK;
K_SPINLOCK_BREAK;
}
if (oset != NULL) {
@ -1449,7 +1449,7 @@ int pthread_sigmask(int how, const sigset_t *ZRESTRICT set, sigset_t *ZRESTRICT
}
if (set == NULL) {
SYS_SEM_LOCK_BREAK;
K_SPINLOCK_BREAK;
}
switch (how) {

View file

@ -11,7 +11,6 @@
#include <zephyr/logging/log.h>
#include <zephyr/posix/pthread.h>
#include <zephyr/sys/bitarray.h>
#include <zephyr/sys/sem.h>
#define CONCURRENT_READER_LIMIT (CONFIG_MAX_PTHREAD_COUNT + 1)
@ -33,7 +32,7 @@ static uint32_t write_lock_acquire(struct posix_rwlock *rwl, int32_t timeout);
LOG_MODULE_REGISTER(pthread_rwlock, CONFIG_PTHREAD_RWLOCK_LOG_LEVEL);
static SYS_SEM_DEFINE(posix_rwlock_lock, 1, 1);
static struct k_spinlock posix_rwlock_spinlock;
static struct posix_rwlock posix_rwlock_pool[CONFIG_MAX_PTHREAD_RWLOCK_COUNT];
SYS_BITARRAY_DEFINE_STATIC(posix_rwlock_bitarray, CONFIG_MAX_PTHREAD_RWLOCK_COUNT);
@ -151,10 +150,10 @@ int pthread_rwlock_destroy(pthread_rwlock_t *rwlock)
return EINVAL;
}
SYS_SEM_LOCK(&posix_rwlock_lock) {
K_SPINLOCK(&posix_rwlock_spinlock) {
if (rwl->wr_owner != NULL) {
ret = EBUSY;
SYS_SEM_LOCK_BREAK;
K_SPINLOCK_BREAK;
}
bit = posix_rwlock_to_offset(rwl);