kernel: Move per-cpu _kernel_t fields into separate struct
When in SMP mode, the nested/irq_stack/current fields are specific to the current CPU and not to the kernel as a whole, so we need an array of these. Place them in a _cpu_t struct and implement a _arch_curr_cpu() function to retrieve the pointer. When not in SMP mode, the first CPU's fields are defined as a unioned with the first _cpu_t record. This permits compatibility with legacy assembly on other platforms. Long term, all users, including uniprocessor architectures, should be updated to use the new scheme. Fundamentally this is just renaming: the structure layout and runtime code do not change on any existing platforms and won't until someone defines a second CPU. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
parent
9c62cc677d
commit
e694656345
|
@ -232,7 +232,7 @@ _switch_restore_pc:
|
|||
*/
|
||||
.align 4
|
||||
_handle_excint:
|
||||
EXCINT_HANDLER MISC0, ___kernel_t_nested_OFFSET, ___kernel_t_irq_stack_OFFSET
|
||||
EXCINT_HANDLER MISC0, ___cpu_t_nested_OFFSET, ___cpu_t_irq_stack_OFFSET
|
||||
|
||||
/* Define the actual vectors for the hardware-defined levels with
|
||||
* DEF_EXCINT. These load a C handler address and jump to our handler
|
||||
|
|
|
@ -140,16 +140,16 @@ static void dump_stack(int *stack)
|
|||
#if CONFIG_XTENSA_ASM2
|
||||
static inline void *restore_stack(void *interrupted_stack)
|
||||
{
|
||||
if (!_is_preempt(_kernel.current)) {
|
||||
if (!_is_preempt(_current)) {
|
||||
return interrupted_stack;
|
||||
}
|
||||
|
||||
int key = irq_lock();
|
||||
|
||||
_kernel.current->switch_handle = interrupted_stack;
|
||||
_kernel.current = _get_next_ready_thread();
|
||||
_current->switch_handle = interrupted_stack;
|
||||
_current = _get_next_ready_thread();
|
||||
|
||||
void *ret = _kernel.current->switch_handle;
|
||||
void *ret = _current->switch_handle;
|
||||
|
||||
irq_unlock(key);
|
||||
|
||||
|
|
|
@ -28,6 +28,15 @@ extern void _xt_coproc_init(void);
|
|||
|
||||
extern K_THREAD_STACK_DEFINE(_interrupt_stack, CONFIG_ISR_STACK_SIZE);
|
||||
|
||||
static ALWAYS_INLINE _cpu_t *_arch_curr_cpu(void)
|
||||
{
|
||||
void *val;
|
||||
|
||||
__asm__ volatile("rsr.misc0 %0" : "=r"(val));
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Performs architecture-specific initialization
|
||||
|
@ -40,11 +49,13 @@ extern K_THREAD_STACK_DEFINE(_interrupt_stack, CONFIG_ISR_STACK_SIZE);
|
|||
*/
|
||||
static ALWAYS_INLINE void kernel_arch_init(void)
|
||||
{
|
||||
_kernel.nested = 0;
|
||||
_cpu_t *cpu0 = &_kernel.cpus[0];
|
||||
|
||||
cpu0->nested = 0;
|
||||
|
||||
#if CONFIG_XTENSA_ASM2
|
||||
_kernel.irq_stack = (K_THREAD_STACK_BUFFER(_interrupt_stack) +
|
||||
CONFIG_ISR_STACK_SIZE);
|
||||
cpu0->irq_stack = (K_THREAD_STACK_BUFFER(_interrupt_stack) +
|
||||
CONFIG_ISR_STACK_SIZE);
|
||||
|
||||
/* The asm2 scheme keeps the kernel pointer in MISC0 for easy
|
||||
* access. That saves 4 bytes of immediate value to store the
|
||||
|
@ -52,9 +63,8 @@ static ALWAYS_INLINE void kernel_arch_init(void)
|
|||
* this record is a per-CPU thing and having it stored in a SR
|
||||
* already is a big win.
|
||||
*/
|
||||
void *cpuptr = &_kernel;
|
||||
__asm__ volatile("wsr.MISC0 %0; rsync" : : "r"(cpu0));
|
||||
|
||||
__asm__ volatile("wsr.MISC0 %0; rsync" : : "r"(cpuptr));
|
||||
#endif
|
||||
|
||||
#if !defined(CONFIG_XTENSA_ASM2) && XCHAL_CP_NUM > 0
|
||||
|
@ -102,7 +112,7 @@ static inline void _IntLibInit(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
#define _is_in_isr() (_kernel.nested != 0)
|
||||
#define _is_in_isr() (_arch_curr_cpu()->nested != 0)
|
||||
|
||||
#endif /* _ASMLANGUAGE */
|
||||
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#include <xtensa/config/core-isa.h>
|
||||
#include <xtensa/corebits.h>
|
||||
|
||||
#include <kernel_structs.h>
|
||||
#include <string.h>
|
||||
#include <toolchain/gcc.h>
|
||||
#include <zephyr/types.h>
|
||||
|
@ -60,6 +61,13 @@ void __attribute__((section(".iram1"))) __start(void)
|
|||
/* Disable CPU1 while we figure out how to have SMP in Zephyr. */
|
||||
*app_cpu_config_reg &= ~DPORT_APPCPU_CLKGATE_EN;
|
||||
|
||||
/* Initialize the architecture CPU pointer. Some of the
|
||||
* initialization code wants a valid _current before
|
||||
* kernel_arch_init() is invoked.
|
||||
*/
|
||||
__asm__ volatile("wsr.MISC0 %0; rsync" : : "r"(&_kernel.cpus[0]));
|
||||
|
||||
|
||||
/* Start Zephyr */
|
||||
_Cstart();
|
||||
|
||||
|
|
|
@ -17,14 +17,20 @@
|
|||
|
||||
GEN_ABS_SYM_BEGIN(_OffsetAbsSyms)
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
GEN_OFFSET_SYM(_kernel_t, current);
|
||||
GEN_OFFSET_SYM(_kernel_t, nested);
|
||||
GEN_OFFSET_SYM(_kernel_t, irq_stack);
|
||||
#endif
|
||||
|
||||
GEN_OFFSET_SYM(_cpu_t, current);
|
||||
GEN_OFFSET_SYM(_cpu_t, nested);
|
||||
GEN_OFFSET_SYM(_cpu_t, irq_stack);
|
||||
|
||||
#if defined(CONFIG_THREAD_MONITOR)
|
||||
GEN_OFFSET_SYM(_kernel_t, threads);
|
||||
#endif
|
||||
|
||||
GEN_OFFSET_SYM(_kernel_t, nested);
|
||||
GEN_OFFSET_SYM(_kernel_t, irq_stack);
|
||||
#ifdef CONFIG_SYS_POWER_MANAGEMENT
|
||||
GEN_OFFSET_SYM(_kernel_t, idle);
|
||||
#endif
|
||||
|
|
|
@ -79,8 +79,7 @@ struct _ready_q {
|
|||
|
||||
typedef struct _ready_q _ready_q_t;
|
||||
|
||||
struct _kernel {
|
||||
|
||||
struct _cpu {
|
||||
/* nested interrupt count */
|
||||
u32_t nested;
|
||||
|
||||
|
@ -89,6 +88,30 @@ struct _kernel {
|
|||
|
||||
/* currently scheduled thread */
|
||||
struct k_thread *current;
|
||||
};
|
||||
|
||||
typedef struct _cpu _cpu_t;
|
||||
|
||||
struct _kernel {
|
||||
/* For compatibility with pre-SMP code, union the first CPU
|
||||
* record with the legacy fields so code can continue to use
|
||||
* the "_kernel.XXX" expressions and assembly offsets.
|
||||
*/
|
||||
union {
|
||||
struct _cpu cpus[CONFIG_MP_NUM_CPUS];
|
||||
#ifndef CONFIG_SMP
|
||||
struct {
|
||||
/* nested interrupt count */
|
||||
u32_t nested;
|
||||
|
||||
/* interrupt stack pointer base */
|
||||
char *irq_stack;
|
||||
|
||||
/* currently scheduled thread */
|
||||
struct k_thread *current;
|
||||
};
|
||||
#endif
|
||||
};
|
||||
|
||||
#ifdef CONFIG_SYS_CLOCK_EXISTS
|
||||
/* queue of timeouts */
|
||||
|
@ -131,7 +154,12 @@ typedef struct _kernel _kernel_t;
|
|||
|
||||
extern struct _kernel _kernel;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#define _current (_arch_curr_cpu()->current)
|
||||
#else
|
||||
#define _current _kernel.current
|
||||
#endif
|
||||
|
||||
#define _ready_q _kernel.ready_q
|
||||
#define _timeout_q _kernel.timeout_q
|
||||
#define _threads _kernel.threads
|
||||
|
|
|
@ -34,7 +34,7 @@ static inline unsigned int _Swap(unsigned int key)
|
|||
struct k_thread *new_thread, *old_thread;
|
||||
int ret;
|
||||
|
||||
old_thread = _kernel.current;
|
||||
old_thread = _current;
|
||||
|
||||
_check_stack_sentinel();
|
||||
_update_time_slice_before_swap();
|
||||
|
@ -43,11 +43,11 @@ static inline unsigned int _Swap(unsigned int key)
|
|||
|
||||
old_thread->swap_retval = -EAGAIN;
|
||||
|
||||
_kernel.current = new_thread;
|
||||
_current = new_thread;
|
||||
_arch_switch(new_thread->switch_handle,
|
||||
&old_thread->switch_handle);
|
||||
|
||||
ret =_kernel.current->swap_retval;
|
||||
ret = _current->swap_retval;
|
||||
|
||||
irq_unlock(key);
|
||||
|
||||
|
|
Loading…
Reference in a new issue