kernel: demote K_THREAD_STACK_BUFFER() to private
This macro is slated for complete removal, as it's not possible on arches with an MPU stack guard to know the true buffer bounds without also knowing the runtime state of its associated thread. As removing this completely would be invasive to where we are in the 1.14 release, demote to a private kernel Z_ API instead. The current way that the macro is being used internally will not cause any undue harm, we just don't want any external code depending on it. The final work to remove this (and overhaul stack specification in general) will take place in 1.15 in the context of #14269 Fixes: #14766 Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
parent
b379030724
commit
4e5c093e66
|
@ -67,7 +67,7 @@ void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|||
void *parameter1, void *parameter2, void *parameter3,
|
||||
int priority, unsigned int options)
|
||||
{
|
||||
char *pStackMem = K_THREAD_STACK_BUFFER(stack);
|
||||
char *pStackMem = Z_THREAD_STACK_BUFFER(stack);
|
||||
Z_ASSERT_VALID_PRIO(priority, pEntry);
|
||||
|
||||
char *stackEnd;
|
||||
|
|
|
@ -55,7 +55,7 @@ static ALWAYS_INLINE void z_irq_setup(void)
|
|||
z_arc_v2_aux_reg_write(_ARC_V2_AUX_IRQ_CTRL, aux_irq_ctrl_value);
|
||||
|
||||
_kernel.irq_stack =
|
||||
K_THREAD_STACK_BUFFER(_interrupt_stack) + CONFIG_ISR_STACK_SIZE;
|
||||
Z_THREAD_STACK_BUFFER(_interrupt_stack) + CONFIG_ISR_STACK_SIZE;
|
||||
}
|
||||
|
||||
#endif /* _ASMLANGUAGE */
|
||||
|
|
|
@ -55,7 +55,7 @@ void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|||
void *parameter1, void *parameter2, void *parameter3,
|
||||
int priority, unsigned int options)
|
||||
{
|
||||
char *pStackMem = K_THREAD_STACK_BUFFER(stack);
|
||||
char *pStackMem = Z_THREAD_STACK_BUFFER(stack);
|
||||
char *stackEnd;
|
||||
/* Offset between the top of stack and the high end of stack area. */
|
||||
u32_t top_of_stack_offset = 0U;
|
||||
|
|
|
@ -41,10 +41,10 @@ static ALWAYS_INLINE void z_InterruptStackSetup(void)
|
|||
{
|
||||
#if defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT) && \
|
||||
defined(CONFIG_USERSPACE)
|
||||
u32_t msp = (u32_t)(K_THREAD_STACK_BUFFER(_interrupt_stack) +
|
||||
u32_t msp = (u32_t)(Z_THREAD_STACK_BUFFER(_interrupt_stack) +
|
||||
CONFIG_ISR_STACK_SIZE - MPU_GUARD_ALIGN_AND_SIZE);
|
||||
#else
|
||||
u32_t msp = (u32_t)(K_THREAD_STACK_BUFFER(_interrupt_stack) +
|
||||
u32_t msp = (u32_t)(Z_THREAD_STACK_BUFFER(_interrupt_stack) +
|
||||
CONFIG_ISR_STACK_SIZE);
|
||||
#endif
|
||||
|
||||
|
|
|
@ -63,11 +63,11 @@ z_arch_switch_to_main_thread(struct k_thread *main_thread,
|
|||
#if defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT) && \
|
||||
defined(CONFIG_USERSPACE)
|
||||
start_of_main_stack =
|
||||
K_THREAD_STACK_BUFFER(main_stack) + main_stack_size -
|
||||
Z_THREAD_STACK_BUFFER(main_stack) + main_stack_size -
|
||||
MPU_GUARD_ALIGN_AND_SIZE;
|
||||
#else
|
||||
start_of_main_stack =
|
||||
K_THREAD_STACK_BUFFER(main_stack) + main_stack_size;
|
||||
Z_THREAD_STACK_BUFFER(main_stack) + main_stack_size;
|
||||
#endif
|
||||
start_of_main_stack = (void *)STACK_ROUND_DOWN(start_of_main_stack);
|
||||
|
||||
|
|
|
@ -36,7 +36,7 @@ void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|||
void *arg1, void *arg2, void *arg3,
|
||||
int priority, unsigned int options)
|
||||
{
|
||||
char *stack_memory = K_THREAD_STACK_BUFFER(stack);
|
||||
char *stack_memory = Z_THREAD_STACK_BUFFER(stack);
|
||||
Z_ASSERT_VALID_PRIO(priority, thread_func);
|
||||
|
||||
struct init_stack_frame *iframe;
|
||||
|
|
|
@ -32,7 +32,7 @@ void k_cpu_atomic_idle(unsigned int key);
|
|||
static ALWAYS_INLINE void kernel_arch_init(void)
|
||||
{
|
||||
_kernel.irq_stack =
|
||||
K_THREAD_STACK_BUFFER(_interrupt_stack) + CONFIG_ISR_STACK_SIZE;
|
||||
Z_THREAD_STACK_BUFFER(_interrupt_stack) + CONFIG_ISR_STACK_SIZE;
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE void
|
||||
|
|
|
@ -51,7 +51,7 @@ void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|||
int priority, unsigned int options)
|
||||
{
|
||||
|
||||
char *stack_memory = K_THREAD_STACK_BUFFER(stack);
|
||||
char *stack_memory = Z_THREAD_STACK_BUFFER(stack);
|
||||
|
||||
Z_ASSERT_VALID_PRIO(priority, thread_func);
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|||
void *arg1, void *arg2, void *arg3,
|
||||
int priority, unsigned int options)
|
||||
{
|
||||
char *stack_memory = K_THREAD_STACK_BUFFER(stack);
|
||||
char *stack_memory = Z_THREAD_STACK_BUFFER(stack);
|
||||
Z_ASSERT_VALID_PRIO(priority, thread_func);
|
||||
|
||||
struct __esf *stack_init;
|
||||
|
|
|
@ -28,7 +28,7 @@ void k_cpu_atomic_idle(unsigned int key);
|
|||
static ALWAYS_INLINE void kernel_arch_init(void)
|
||||
{
|
||||
_kernel.irq_stack =
|
||||
K_THREAD_STACK_BUFFER(_interrupt_stack) + CONFIG_ISR_STACK_SIZE;
|
||||
Z_THREAD_STACK_BUFFER(_interrupt_stack) + CONFIG_ISR_STACK_SIZE;
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE void
|
||||
|
|
|
@ -68,7 +68,7 @@ void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|||
struct _x86_initial_frame *initial_frame;
|
||||
|
||||
Z_ASSERT_VALID_PRIO(priority, entry);
|
||||
stack_buf = K_THREAD_STACK_BUFFER(stack);
|
||||
stack_buf = Z_THREAD_STACK_BUFFER(stack);
|
||||
z_new_thread_init(thread, stack_buf, stack_size, priority, options);
|
||||
|
||||
#if CONFIG_X86_USERSPACE
|
||||
|
|
|
@ -36,7 +36,7 @@ extern K_THREAD_STACK_DEFINE(_interrupt_stack, CONFIG_ISR_STACK_SIZE);
|
|||
static inline void kernel_arch_init(void)
|
||||
{
|
||||
_kernel.nested = 0;
|
||||
_kernel.irq_stack = K_THREAD_STACK_BUFFER(_interrupt_stack) +
|
||||
_kernel.irq_stack = Z_THREAD_STACK_BUFFER(_interrupt_stack) +
|
||||
CONFIG_ISR_STACK_SIZE;
|
||||
#if CONFIG_X86_STACK_PROTECTION
|
||||
z_x86_mmu_set_flags(&z_x86_kernel_pdpt, _interrupt_stack, MMU_PAGE_SIZE,
|
||||
|
|
|
@ -29,7 +29,7 @@ void z_new_thread(struct k_thread *t, k_thread_stack_t *stack,
|
|||
void *args[] = { entry, p1, p2, p3 };
|
||||
int nargs = 4;
|
||||
int eflags = 0x200;
|
||||
char *base = K_THREAD_STACK_BUFFER(stack);
|
||||
char *base = Z_THREAD_STACK_BUFFER(stack);
|
||||
char *top = base + sz;
|
||||
|
||||
z_new_thread_init(t, base, sz, prio, opts);
|
||||
|
|
|
@ -48,7 +48,7 @@ void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|||
void *p1, void *p2, void *p3,
|
||||
int priority, unsigned int options)
|
||||
{
|
||||
char *pStack = K_THREAD_STACK_BUFFER(stack);
|
||||
char *pStack = Z_THREAD_STACK_BUFFER(stack);
|
||||
|
||||
/* Align stack end to maximum alignment requirement. */
|
||||
char *stackEnd = (char *)ROUND_DOWN(pStack + stackSize, 16);
|
||||
|
|
|
@ -63,7 +63,7 @@ void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack, size_t sz,
|
|||
k_thread_entry_t entry, void *p1, void *p2, void *p3,
|
||||
int prio, unsigned int opts)
|
||||
{
|
||||
char *base = K_THREAD_STACK_BUFFER(stack);
|
||||
char *base = Z_THREAD_STACK_BUFFER(stack);
|
||||
char *top = base + sz;
|
||||
|
||||
/* Align downward. The API as specified requires a runtime check. */
|
||||
|
|
|
@ -70,7 +70,7 @@ static ALWAYS_INLINE void kernel_arch_init(void)
|
|||
cpu0->nested = 0;
|
||||
|
||||
#if CONFIG_XTENSA_ASM2
|
||||
cpu0->irq_stack = (K_THREAD_STACK_BUFFER(_interrupt_stack) +
|
||||
cpu0->irq_stack = (Z_THREAD_STACK_BUFFER(_interrupt_stack) +
|
||||
CONFIG_ISR_STACK_SIZE);
|
||||
|
||||
/* The asm2 scheme keeps the kernel pointer in MISC0 for easy
|
||||
|
@ -90,7 +90,7 @@ static ALWAYS_INLINE void kernel_arch_init(void)
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_INIT_STACKS
|
||||
memset(K_THREAD_STACK_BUFFER(_interrupt_stack), 0xAA,
|
||||
memset(Z_THREAD_STACK_BUFFER(_interrupt_stack), 0xAA,
|
||||
CONFIG_ISR_STACK_SIZE);
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -502,7 +502,7 @@ static void cc1200_rx(struct device *dev)
|
|||
}
|
||||
|
||||
net_analyze_stack("CC1200 Rx Fiber stack",
|
||||
K_THREAD_STACK_BUFFER(cc1200->rx_stack),
|
||||
Z_THREAD_STACK_BUFFER(cc1200->rx_stack),
|
||||
K_THREAD_STACK_SIZEOF(cc1200->rx_stack));
|
||||
continue;
|
||||
flush:
|
||||
|
|
|
@ -671,7 +671,7 @@ static void cc2520_rx(int arg)
|
|||
}
|
||||
|
||||
net_analyze_stack("CC2520 Rx Fiber stack",
|
||||
K_THREAD_STACK_BUFFER(cc2520->cc2520_rx_stack),
|
||||
Z_THREAD_STACK_BUFFER(cc2520->cc2520_rx_stack),
|
||||
K_THREAD_STACK_SIZEOF(cc2520->cc2520_rx_stack));
|
||||
continue;
|
||||
flush:
|
||||
|
|
|
@ -579,7 +579,7 @@ static inline void mcr20a_rx(struct mcr20a_context *mcr20a, u8_t len)
|
|||
}
|
||||
|
||||
net_analyze_stack("MCR20A Rx Fiber stack",
|
||||
K_THREAD_STACK_BUFFER(mcr20a->mcr20a_rx_stack),
|
||||
Z_THREAD_STACK_BUFFER(mcr20a->mcr20a_rx_stack),
|
||||
K_THREAD_STACK_SIZEOF(mcr20a->mcr20a_rx_stack));
|
||||
return;
|
||||
out:
|
||||
|
|
|
@ -121,7 +121,7 @@ static void nrf5_rx_thread(void *arg1, void *arg2, void *arg3)
|
|||
if (CONFIG_IEEE802154_DRIVER_LOG_LEVEL >= LOG_LEVEL_DBG) {
|
||||
net_analyze_stack(
|
||||
"nRF5 rx stack",
|
||||
K_THREAD_STACK_BUFFER(nrf5_radio->rx_stack),
|
||||
Z_THREAD_STACK_BUFFER(nrf5_radio->rx_stack),
|
||||
K_THREAD_STACK_SIZEOF(nrf5_radio->rx_stack));
|
||||
}
|
||||
|
||||
|
|
|
@ -163,7 +163,7 @@ static struct winc1500_data w1500_data;
|
|||
static void stack_stats(void)
|
||||
{
|
||||
net_analyze_stack("WINC1500 stack",
|
||||
K_THREAD_STACK_BUFFER(winc1500_stack),
|
||||
Z_THREAD_STACK_BUFFER(winc1500_stack),
|
||||
K_THREAD_STACK_SIZEOF(winc1500_stack));
|
||||
}
|
||||
|
||||
|
|
|
@ -342,7 +342,7 @@ static inline void k_obj_free(void *obj)
|
|||
/** @} */
|
||||
|
||||
/* Using typedef deliberately here, this is quite intended to be an opaque
|
||||
* type. K_THREAD_STACK_BUFFER() should be used to access the data within.
|
||||
* type.
|
||||
*
|
||||
* The purpose of this data type is to clearly distinguish between the
|
||||
* declared symbol for a stack (of type k_thread_stack_t) and the underlying
|
||||
|
@ -474,8 +474,8 @@ typedef struct _thread_base _thread_base_t;
|
|||
#if defined(CONFIG_THREAD_STACK_INFO)
|
||||
/* Contains the stack information of a thread */
|
||||
struct _thread_stack_info {
|
||||
/* Stack Start - Identical to K_THREAD_STACK_BUFFER() on the stack
|
||||
* object. Represents thread-writable stack area without any extras.
|
||||
/* Stack start - Represents the start address of the thread-writable
|
||||
* stack area.
|
||||
*/
|
||||
u32_t start;
|
||||
|
||||
|
@ -4585,7 +4585,7 @@ extern void z_timer_expiration_handler(struct _timeout *t);
|
|||
#define K_THREAD_STACK_MEMBER(sym, size) Z_ARCH_THREAD_STACK_MEMBER(sym, size)
|
||||
#define K_THREAD_STACK_SIZEOF(sym) Z_ARCH_THREAD_STACK_SIZEOF(sym)
|
||||
#define K_THREAD_STACK_RESERVED Z_ARCH_THREAD_STACK_RESERVED
|
||||
static inline char *K_THREAD_STACK_BUFFER(k_thread_stack_t *sym)
|
||||
static inline char *Z_THREAD_STACK_BUFFER(k_thread_stack_t *sym)
|
||||
{
|
||||
return Z_ARCH_THREAD_STACK_BUFFER(sym);
|
||||
}
|
||||
|
@ -4600,7 +4600,8 @@ static inline char *K_THREAD_STACK_BUFFER(k_thread_stack_t *sym)
|
|||
*
|
||||
* The declared symbol will always be a k_thread_stack_t which can be passed to
|
||||
* k_thread_create(), but should otherwise not be manipulated. If the buffer
|
||||
* inside needs to be examined, use K_THREAD_STACK_BUFFER().
|
||||
* inside needs to be examined, examine thread->stack_info for the associated
|
||||
* thread object to obtain the boundaries.
|
||||
*
|
||||
* It is legal to precede this definition with the 'static' keyword.
|
||||
*
|
||||
|
@ -4697,16 +4698,14 @@ static inline char *K_THREAD_STACK_BUFFER(k_thread_stack_t *sym)
|
|||
/**
|
||||
* @brief Get a pointer to the physical stack buffer
|
||||
*
|
||||
* Convenience macro to get at the real underlying stack buffer used by
|
||||
* the CPU. Guaranteed to be a character pointer of size K_THREAD_STACK_SIZEOF.
|
||||
* This is really only intended for diagnostic tools which want to examine
|
||||
* stack memory contents.
|
||||
* This macro is deprecated. If a stack buffer needs to be examined, the
|
||||
* bounds should be obtained from the associated thread's stack_info struct.
|
||||
*
|
||||
* @param sym Declared stack symbol name
|
||||
* @return The buffer itself, a char *
|
||||
* @req K-TSTACK-001
|
||||
*/
|
||||
static inline char *K_THREAD_STACK_BUFFER(k_thread_stack_t *sym)
|
||||
static inline char *Z_THREAD_STACK_BUFFER(k_thread_stack_t *sym)
|
||||
{
|
||||
return (char *)sym;
|
||||
}
|
||||
|
|
|
@ -85,7 +85,7 @@ static inline void stack_analyze(const char *name, const char *stack,
|
|||
#define STACK_ANALYZE(name, sym) \
|
||||
do { \
|
||||
stack_analyze(name, \
|
||||
K_THREAD_STACK_BUFFER(sym), \
|
||||
Z_THREAD_STACK_BUFFER(sym), \
|
||||
K_THREAD_STACK_SIZEOF(sym)); \
|
||||
} while (false)
|
||||
|
||||
|
|
|
@ -384,7 +384,7 @@ static void prepare_multithreading(struct k_thread *dummy_thread)
|
|||
init_idle_thread(_idle_thread1, _idle_stack1);
|
||||
_kernel.cpus[1].idle_thread = _idle_thread1;
|
||||
_kernel.cpus[1].id = 1;
|
||||
_kernel.cpus[1].irq_stack = K_THREAD_STACK_BUFFER(_interrupt_stack1)
|
||||
_kernel.cpus[1].irq_stack = Z_THREAD_STACK_BUFFER(_interrupt_stack1)
|
||||
+ CONFIG_ISR_STACK_SIZE;
|
||||
#endif
|
||||
|
||||
|
@ -392,7 +392,7 @@ static void prepare_multithreading(struct k_thread *dummy_thread)
|
|||
init_idle_thread(_idle_thread2, _idle_stack2);
|
||||
_kernel.cpus[2].idle_thread = _idle_thread2;
|
||||
_kernel.cpus[2].id = 2;
|
||||
_kernel.cpus[2].irq_stack = K_THREAD_STACK_BUFFER(_interrupt_stack2)
|
||||
_kernel.cpus[2].irq_stack = Z_THREAD_STACK_BUFFER(_interrupt_stack2)
|
||||
+ CONFIG_ISR_STACK_SIZE;
|
||||
#endif
|
||||
|
||||
|
@ -400,7 +400,7 @@ static void prepare_multithreading(struct k_thread *dummy_thread)
|
|||
init_idle_thread(_idle_thread3, _idle_stack3);
|
||||
_kernel.cpus[3].idle_thread = _idle_thread3;
|
||||
_kernel.cpus[3].id = 3;
|
||||
_kernel.cpus[3].irq_stack = K_THREAD_STACK_BUFFER(_interrupt_stack3)
|
||||
_kernel.cpus[3].irq_stack = Z_THREAD_STACK_BUFFER(_interrupt_stack3)
|
||||
+ CONFIG_ISR_STACK_SIZE;
|
||||
#endif
|
||||
|
||||
|
|
|
@ -369,7 +369,7 @@ void z_setup_new_thread(struct k_thread *new_thread,
|
|||
*/
|
||||
new_thread->userspace_local_data =
|
||||
(struct _thread_userspace_local_data *)
|
||||
(K_THREAD_STACK_BUFFER(stack) + stack_size);
|
||||
(Z_THREAD_STACK_BUFFER(stack) + stack_size);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
|
|
@ -205,12 +205,12 @@ void z_arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz,
|
|||
|
||||
sr.cpu = cpu_num;
|
||||
sr.fn = fn;
|
||||
sr.stack_top = K_THREAD_STACK_BUFFER(stack) + sz;
|
||||
sr.stack_top = Z_THREAD_STACK_BUFFER(stack) + sz;
|
||||
sr.arg = arg;
|
||||
sr.vecbase = vb;
|
||||
sr.alive = &alive_flag;
|
||||
|
||||
appcpu_top = K_THREAD_STACK_BUFFER(stack) + sz;
|
||||
appcpu_top = Z_THREAD_STACK_BUFFER(stack) + sz;
|
||||
|
||||
start_rec = &sr;
|
||||
|
||||
|
|
|
@ -215,7 +215,7 @@ static inline void mgmt_run_callbacks(struct mgmt_event_entry *mgmt_event)
|
|||
|
||||
#ifdef CONFIG_NET_DEBUG_MGMT_EVENT_STACK
|
||||
net_analyze_stack("Net MGMT event stack",
|
||||
K_THREAD_STACK_BUFFER(mgmt_stack),
|
||||
Z_THREAD_STACK_BUFFER(mgmt_stack),
|
||||
K_THREAD_STACK_SIZEOF(mgmt_stack));
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -2882,7 +2882,7 @@ static int cmd_net_stacks(const struct shell *shell, size_t argc,
|
|||
ARG_UNUSED(argv);
|
||||
|
||||
for (info = __net_stack_start; info != __net_stack_end; info++) {
|
||||
net_analyze_stack_get_values(K_THREAD_STACK_BUFFER(info->stack),
|
||||
net_analyze_stack_get_values(Z_THREAD_STACK_BUFFER(info->stack),
|
||||
info->size, &pcnt, &unused);
|
||||
|
||||
#if defined(CONFIG_INIT_STACKS)
|
||||
|
@ -2910,7 +2910,7 @@ static int cmd_net_stacks(const struct shell *shell, size_t argc,
|
|||
}
|
||||
|
||||
#if defined(CONFIG_INIT_STACKS)
|
||||
net_analyze_stack_get_values(K_THREAD_STACK_BUFFER(_main_stack),
|
||||
net_analyze_stack_get_values(Z_THREAD_STACK_BUFFER(_main_stack),
|
||||
K_THREAD_STACK_SIZEOF(_main_stack),
|
||||
&pcnt, &unused);
|
||||
PR("%s [%s] stack size %d/%d bytes unused %u usage %d/%d (%u %%)\n",
|
||||
|
@ -2918,7 +2918,7 @@ static int cmd_net_stacks(const struct shell *shell, size_t argc,
|
|||
CONFIG_MAIN_STACK_SIZE, unused,
|
||||
CONFIG_MAIN_STACK_SIZE - unused, CONFIG_MAIN_STACK_SIZE, pcnt);
|
||||
|
||||
net_analyze_stack_get_values(K_THREAD_STACK_BUFFER(_interrupt_stack),
|
||||
net_analyze_stack_get_values(Z_THREAD_STACK_BUFFER(_interrupt_stack),
|
||||
K_THREAD_STACK_SIZEOF(_interrupt_stack),
|
||||
&pcnt, &unused);
|
||||
PR("%s [%s] stack size %d/%d bytes unused %u usage %d/%d (%u %%)\n",
|
||||
|
@ -2926,7 +2926,7 @@ static int cmd_net_stacks(const struct shell *shell, size_t argc,
|
|||
CONFIG_ISR_STACK_SIZE, unused,
|
||||
CONFIG_ISR_STACK_SIZE - unused, CONFIG_ISR_STACK_SIZE, pcnt);
|
||||
|
||||
net_analyze_stack_get_values(K_THREAD_STACK_BUFFER(sys_work_q_stack),
|
||||
net_analyze_stack_get_values(Z_THREAD_STACK_BUFFER(sys_work_q_stack),
|
||||
K_THREAD_STACK_SIZEOF(sys_work_q_stack),
|
||||
&pcnt, &unused);
|
||||
PR("%s [%s] stack size %d/%d bytes unused %u usage %d/%d (%u %%)\n",
|
||||
|
|
|
@ -473,7 +473,7 @@ static void read_other_stack(void)
|
|||
k_sem_take(&uthread_start_sem, K_FOREVER);
|
||||
|
||||
/* Try to directly read the stack of the other thread. */
|
||||
ptr = (unsigned int *)K_THREAD_STACK_BUFFER(uthread_stack);
|
||||
ptr = (unsigned int *)Z_THREAD_STACK_BUFFER(uthread_stack);
|
||||
expect_fault = true;
|
||||
expected_reason = REASON_HW_EXCEPTION;
|
||||
BARRIER();
|
||||
|
@ -506,7 +506,7 @@ static void write_other_stack(void)
|
|||
k_sem_take(&uthread_start_sem, K_FOREVER);
|
||||
|
||||
/* Try to directly write the stack of the other thread. */
|
||||
ptr = (unsigned int *) K_THREAD_STACK_BUFFER(uthread_stack);
|
||||
ptr = (unsigned int *) Z_THREAD_STACK_BUFFER(uthread_stack);
|
||||
expect_fault = true;
|
||||
expected_reason = REASON_HW_EXCEPTION;
|
||||
BARRIER();
|
||||
|
@ -1017,9 +1017,9 @@ void stack_buffer_scenarios(k_thread_stack_t *stack_obj, size_t obj_size)
|
|||
|
||||
|
||||
/* This API is being removed just whine about it for now */
|
||||
if (K_THREAD_STACK_BUFFER(stack_obj) != stack_start) {
|
||||
printk("WARNING: K_THREAD_STACK_BUFFER() reports %p\n",
|
||||
K_THREAD_STACK_BUFFER(stack_obj));
|
||||
if (Z_THREAD_STACK_BUFFER(stack_obj) != stack_start) {
|
||||
printk("WARNING: Z_THREAD_STACK_BUFFER() reports %p\n",
|
||||
Z_THREAD_STACK_BUFFER(stack_obj));
|
||||
}
|
||||
|
||||
if (z_arch_is_user_context()) {
|
||||
|
|
Loading…
Reference in a new issue