arch: arc: add user space support for arc
* add the implementation of syscall * based on 'trap_s' intruction, id = 3 * add the privilege stack * the privilege stack is allocted with thread stack * for the kernel thread, the privilege stack is also a part of thread stack, the start of stack can be configured as stack guard * for the user thread, no stack guard, when the user stack is overflow, it will fall into kernel memory area which requires kernel privilege, privilege violation will be raised * modify the linker template and add MPU_ADDR_ALIGN * add user space corresponding codes in mpu * the user sp aux reg will be part of thread context * When user thread is interruptted for the 1st time, the context is saved in user stack (U bit of IRQ_CTLR is set to 1). When nest interrupt comes, the context is saved in thread's privilege stack * the arc_mpu_regions.c is moved to board folder, as it's board specific * the above codes have been tested through tests/kernel/mem_protect/ userspace for MPU version 2 Signed-off-by: Wayne Ren <wei.ren@synopsys.com>
This commit is contained in:
parent
3637d50c25
commit
f81dee0b2b
|
@ -41,6 +41,7 @@ menu "ARCv2 Family Options"
|
|||
config CPU_ARCV2
|
||||
bool
|
||||
select ARCH_HAS_STACK_PROTECTION
|
||||
select ARCH_HAS_USERSPACE if ARC_CORE_MPU
|
||||
default y
|
||||
help
|
||||
This option signifies the use of a CPU of the ARCv2 family.
|
||||
|
|
|
@ -22,3 +22,4 @@ zephyr_sources_ifdef(CONFIG_ARC_FIRQ fast_irq.S)
|
|||
zephyr_sources_if_kconfig(irq_offload.c)
|
||||
zephyr_sources_ifdef(CONFIG_ATOMIC_OPERATIONS_CUSTOM atomic.c)
|
||||
add_subdirectory_ifdef(CONFIG_CPU_HAS_MPU mpu)
|
||||
zephyr_sources_ifdef(CONFIG_USERSPACE userspace.S)
|
||||
|
|
|
@ -257,21 +257,13 @@ _firq_reschedule:
|
|||
*/
|
||||
_load_callee_saved_regs
|
||||
|
||||
#ifdef CONFIG_MPU_STACK_GUARD
|
||||
#if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
|
||||
push_s r2
|
||||
mov r0, r2
|
||||
bl configure_mpu_stack_guard
|
||||
bl configure_mpu_thread
|
||||
pop_s r2
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
push_s r2
|
||||
mov r0, r2
|
||||
bl configure_mpu_mem_domain
|
||||
pop_s r2
|
||||
#endif
|
||||
|
||||
|
||||
ld_s r3, [r2, _thread_offset_to_relinquish_cause]
|
||||
|
||||
breq r3, _CAUSE_RIRQ, _firq_return_from_rirq
|
||||
|
|
|
@ -84,3 +84,10 @@ FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,
|
|||
for (;;)
|
||||
;
|
||||
}
|
||||
|
||||
|
||||
FUNC_NORETURN void _arch_syscall_oops(void *ssf_ptr)
|
||||
{
|
||||
_SysFatalErrorHandler(_NANO_ERR_KERNEL_OOPS, ssf_ptr);
|
||||
CODE_UNREACHABLE;
|
||||
}
|
||||
|
|
|
@ -47,7 +47,7 @@ SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_tlb_miss_d)
|
|||
SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_prot_v)
|
||||
SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_privilege_v)
|
||||
SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_swi)
|
||||
#ifndef CONFIG_IRQ_OFFLOAD
|
||||
#if !defined(CONFIG_IRQ_OFFLOAD) && !defined(CONFIG_USERSPACE)
|
||||
SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_trap)
|
||||
#endif
|
||||
SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_extension)
|
||||
|
@ -109,8 +109,38 @@ exc_nest_handle:
|
|||
|
||||
#ifdef CONFIG_IRQ_OFFLOAD
|
||||
GTEXT(_irq_do_offload);
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_IRQ_OFFLOAD) || defined(CONFIG_USERSPACE)
|
||||
SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_trap)
|
||||
#ifdef CONFIG_USERSPACE
|
||||
/* get the id of trap_s */
|
||||
lr ilink, [_ARC_V2_ECR]
|
||||
and ilink, ilink, 0x3f
|
||||
cmp ilink, 0x3
|
||||
bne _do_other_trap
|
||||
/* do sys_call */
|
||||
mov ilink, _SYSCALL_LIMIT
|
||||
cmp r6, ilink
|
||||
blt valid_syscall_id
|
||||
|
||||
mov r0, r6
|
||||
mov r6, _SYSCALL_BAD
|
||||
|
||||
valid_syscall_id:
|
||||
lr ilink, [_ARC_V2_ERET]
|
||||
push ilink
|
||||
lr ilink, [_ARC_V2_ERSTATUS]
|
||||
push ilink
|
||||
|
||||
bclr ilink, ilink, _ARC_V2_STATUS32_U_BIT
|
||||
sr ilink, [_ARC_V2_ERSTATUS]
|
||||
|
||||
mov ilink, _arc_do_syscall
|
||||
sr ilink, [_ARC_V2_ERET]
|
||||
|
||||
rtie
|
||||
|
||||
/*
|
||||
* Before invoking exception handler, the kernel switches to an exception
|
||||
* stack to save the faulting thread's registers.
|
||||
|
@ -118,6 +148,8 @@ SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_trap)
|
|||
* a diagnostic message and halt.
|
||||
*/
|
||||
|
||||
_do_other_trap:
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
#ifdef CONFIG_ARC_STACK_CHECKING
|
||||
push_s r2
|
||||
/* disable stack checking */
|
||||
|
@ -148,7 +180,9 @@ SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_trap)
|
|||
trap_nest_handle:
|
||||
push_s r0
|
||||
|
||||
#ifdef CONFIG_IRQ_OFFLOAD
|
||||
jl _irq_do_offload
|
||||
#endif
|
||||
|
||||
pop sp
|
||||
|
||||
|
@ -200,5 +234,4 @@ _trap_check_for_swap:
|
|||
|
||||
/* Assumption: r2 has current thread */
|
||||
b _rirq_common_interrupt_swap
|
||||
|
||||
#endif /* CONFIG_IRQ_OFFLOAD */
|
||||
#endif /* CONFIG_IRQ_OFFLOAD || CONFIG_USERSPACE */
|
||||
|
|
|
@ -229,6 +229,13 @@ From RIRQ:
|
|||
*/
|
||||
|
||||
SECTION_FUNC(TEXT, _isr_wrapper)
|
||||
#if CONFIG_USERSPACE
|
||||
/* utilize the fact that Z bit is set if interrupt taken in U mode*/
|
||||
bnz _isr_from_privilege
|
||||
/* get the correct stack pointer, don't touch _ARC_V2_USER_SP in the future */
|
||||
aex sp, [_ARC_V2_USER_SP]
|
||||
_isr_from_privilege:
|
||||
#endif
|
||||
#if CONFIG_ARC_FIRQ
|
||||
#if CONFIG_RGF_NUM_BANKS == 1
|
||||
st r0,[saved_r0]
|
||||
|
|
|
@ -11,6 +11,27 @@
|
|||
#include <arch/arc/v2/mpu/arc_core_mpu.h>
|
||||
#include <logging/sys_log.h>
|
||||
|
||||
/*
|
||||
* @brief Configure MPU for the thread
|
||||
*
|
||||
* This function configures per thread memory map reprogramming the MPU.
|
||||
*
|
||||
* @param thread thread info data structure.
|
||||
*/
|
||||
void configure_mpu_thread(struct k_thread *thread)
|
||||
{
|
||||
arc_core_mpu_disable();
|
||||
#if defined(CONFIG_MPU_STACK_GUARD)
|
||||
configure_mpu_stack_guard(thread);
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_USERSPACE)
|
||||
configure_mpu_user_context(thread);
|
||||
configure_mpu_mem_domain(thread);
|
||||
#endif
|
||||
arc_core_mpu_enable();
|
||||
}
|
||||
|
||||
#if defined(CONFIG_MPU_STACK_GUARD)
|
||||
/*
|
||||
* @brief Configure MPU stack guard
|
||||
|
@ -22,15 +43,44 @@
|
|||
*/
|
||||
void configure_mpu_stack_guard(struct k_thread *thread)
|
||||
{
|
||||
arc_core_mpu_disable();
|
||||
#if defined(CONFIG_USERSPACE)
|
||||
if (!thread->arch.priv_stack_start) {
|
||||
/* the areas before and after the user stack of thread is
|
||||
* kernel only. These area can be used as stack guard.
|
||||
* -----------------------
|
||||
* | kernel only access |
|
||||
* |---------------------|
|
||||
* | user stack |
|
||||
* |----------------------
|
||||
* | privilege stack |
|
||||
* -----------------------
|
||||
*/
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
arc_core_mpu_configure(THREAD_STACK_GUARD_REGION,
|
||||
thread->stack_info.start - STACK_GUARD_SIZE,
|
||||
STACK_GUARD_SIZE);
|
||||
arc_core_mpu_enable();
|
||||
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_USERSPACE)
|
||||
/*
|
||||
* @brief Configure MPU user context
|
||||
*
|
||||
* This function configures the thread's user context.
|
||||
* The functionality is meant to be used during context switch.
|
||||
*
|
||||
* @param thread thread info data structure.
|
||||
*/
|
||||
void configure_mpu_user_context(struct k_thread *thread)
|
||||
{
|
||||
SYS_LOG_DBG("configure user thread %p's context", thread);
|
||||
arc_core_mpu_configure_user_context(thread);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* @brief Configure MPU memory domain
|
||||
*
|
||||
|
@ -42,9 +92,7 @@ void configure_mpu_stack_guard(struct k_thread *thread)
|
|||
void configure_mpu_mem_domain(struct k_thread *thread)
|
||||
{
|
||||
SYS_LOG_DBG("configure thread %p's domain", thread);
|
||||
arc_core_mpu_disable();
|
||||
arc_core_mpu_configure_mem_domain(thread->mem_domain_info.mem_domain);
|
||||
arc_core_mpu_enable();
|
||||
}
|
||||
|
||||
int _arch_mem_domain_max_partitions_get(void)
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#include <arch/arc/v2/aux_regs.h>
|
||||
#include <arch/arc/v2/mpu/arc_mpu.h>
|
||||
#include <arch/arc/v2/mpu/arc_core_mpu.h>
|
||||
#include <linker/linker-defs.h>
|
||||
#include <logging/sys_log.h>
|
||||
|
||||
|
||||
|
@ -66,8 +67,12 @@ static inline u8_t _get_num_regions(void)
|
|||
static inline u32_t _get_region_attr_by_type(u32_t type)
|
||||
{
|
||||
switch (type) {
|
||||
case THREAD_STACK_USER_REGION:
|
||||
return REGION_RAM_ATTR;
|
||||
case THREAD_STACK_REGION:
|
||||
return 0;
|
||||
return AUX_MPU_RDP_KW | AUX_MPU_RDP_KR;
|
||||
case THREAD_APP_DATA_REGION:
|
||||
return REGION_RAM_ATTR;
|
||||
case THREAD_STACK_GUARD_REGION:
|
||||
/* no Write and Execute to guard region */
|
||||
return AUX_MPU_RDP_UR | AUX_MPU_RDP_KR;
|
||||
|
@ -161,8 +166,11 @@ static inline u32_t _get_region_index_by_type(u32_t type)
|
|||
*/
|
||||
switch (type) {
|
||||
#if CONFIG_ARC_MPU_VER == 2
|
||||
case THREAD_STACK_USER_REGION:
|
||||
return _get_num_regions() - mpu_config.num_regions
|
||||
- THREAD_STACK_REGION;
|
||||
case THREAD_STACK_REGION:
|
||||
return _get_num_regions() - mpu_config.num_regions - type;
|
||||
case THREAD_APP_DATA_REGION:
|
||||
case THREAD_STACK_GUARD_REGION:
|
||||
return _get_num_regions() - mpu_config.num_regions - type;
|
||||
case THREAD_DOMAIN_PARTITION_REGION:
|
||||
|
@ -176,8 +184,10 @@ static inline u32_t _get_region_index_by_type(u32_t type)
|
|||
return _get_num_regions() - mpu_config.num_regions - type + 1;
|
||||
#endif
|
||||
#elif CONFIG_ARC_MPU_VER == 3
|
||||
case THREAD_STACK_USER_REGION:
|
||||
return mpu_config.num_regions + THREAD_STACK_REGION - 1;
|
||||
case THREAD_STACK_REGION:
|
||||
return mpu_config.num_regions + type - 1;
|
||||
case THREAD_APP_DATA_REGION:
|
||||
case THREAD_STACK_GUARD_REGION:
|
||||
return mpu_config.num_regions + type - 1;
|
||||
case THREAD_DOMAIN_PARTITION_REGION:
|
||||
|
@ -417,6 +427,37 @@ void arc_core_mpu_region(u32_t index, u32_t base, u32_t size,
|
|||
}
|
||||
|
||||
#if defined(CONFIG_USERSPACE)
|
||||
void arc_core_mpu_configure_user_context(struct k_thread *thread)
|
||||
{
|
||||
u32_t base = (u32_t)thread->stack_obj;
|
||||
u32_t size = thread->stack_info.size;
|
||||
|
||||
/* for kernel threads, no need to configure user context */
|
||||
if (!thread->arch.priv_stack_start) {
|
||||
return;
|
||||
}
|
||||
|
||||
arc_core_mpu_configure(THREAD_STACK_USER_REGION, base, size);
|
||||
|
||||
/* configure app data portion */
|
||||
#ifdef CONFIG_APPLICATION_MEMORY
|
||||
#if CONFIG_ARC_MPU_VER == 2
|
||||
base = (u32_t)&__app_ram_start;
|
||||
size = (u32_t)&__app_ram_end - (u32_t)&__app_ram_start;
|
||||
|
||||
/* set up app data region if exists, otherwise disable */
|
||||
if (size > 0) {
|
||||
arc_core_mpu_configure(THREAD_APP_DATA_REGION, base, size);
|
||||
}
|
||||
#elif CONFIG_ARC_MPU_VER == 3
|
||||
/*
|
||||
* ARC MPV v3 doesn't support MPU region overlap.
|
||||
* Application memory should be a static memory, defined in mpu_config
|
||||
*/
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief configure MPU regions for the memory partitions of the memory domain
|
||||
*
|
||||
|
|
|
@ -79,6 +79,9 @@ GEN_OFFSET_SYM(_callee_saved_stack_t, r24);
|
|||
GEN_OFFSET_SYM(_callee_saved_stack_t, r25);
|
||||
GEN_OFFSET_SYM(_callee_saved_stack_t, r26);
|
||||
GEN_OFFSET_SYM(_callee_saved_stack_t, fp);
|
||||
#ifdef CONFIG_USERSPACE
|
||||
GEN_OFFSET_SYM(_callee_saved_stack_t, user_sp);
|
||||
#endif
|
||||
GEN_OFFSET_SYM(_callee_saved_stack_t, r30);
|
||||
#ifdef CONFIG_FP_SHARING
|
||||
GEN_OFFSET_SYM(_callee_saved_stack_t, r58);
|
||||
|
|
|
@ -161,17 +161,10 @@ _rirq_common_interrupt_swap:
|
|||
*/
|
||||
_load_callee_saved_regs
|
||||
|
||||
#ifdef CONFIG_MPU_STACK_GUARD
|
||||
#if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
|
||||
push_s r2
|
||||
mov r0, r2
|
||||
bl configure_mpu_stack_guard
|
||||
pop_s r2
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
push_s r2
|
||||
mov r0, r2
|
||||
bl configure_mpu_mem_domain
|
||||
bl configure_mpu_thread
|
||||
pop_s r2
|
||||
#endif
|
||||
|
||||
|
|
|
@ -110,17 +110,10 @@ SECTION_FUNC(TEXT, __swap)
|
|||
|
||||
_load_callee_saved_regs
|
||||
|
||||
#ifdef CONFIG_MPU_STACK_GUARD
|
||||
#if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
|
||||
push_s r2
|
||||
mov r0, r2
|
||||
bl configure_mpu_stack_guard
|
||||
pop_s r2
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
push_s r2
|
||||
mov r0, r2
|
||||
bl configure_mpu_mem_domain
|
||||
bl configure_mpu_thread
|
||||
pop_s r2
|
||||
#endif
|
||||
|
||||
|
|
|
@ -64,14 +64,41 @@ void _new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|||
|
||||
char *stackEnd = pStackMem + stackSize;
|
||||
struct init_stack_frame *pInitCtx;
|
||||
|
||||
#if CONFIG_USERSPACE
|
||||
/* for kernel thread, the privilege stack is merged into thread stack */
|
||||
if (!(options & K_USER)) {
|
||||
/* if MPU_STACK_GUARD is enabled, reserve the the stack area
|
||||
* |---------------------|----------------|
|
||||
* | |(MPU STACK AREA)|
|
||||
* | |----------------|
|
||||
* | user stack | kernel thread |
|
||||
* |---------------------| stack |
|
||||
* | privilege stack | |
|
||||
* ----------------------------------------
|
||||
*/
|
||||
#ifdef CONFIG_MPU_STACK_GUARD
|
||||
pStackmem += STACK_GUARD_SIZE;
|
||||
stackSize = stackSize + CONFIG_PRIVILEGED_STACK_SIZE
|
||||
- STACK_GUARD_SIZE;
|
||||
#endif
|
||||
stackEnd += CONFIG_PRIVILEGED_STACK_SIZE;
|
||||
stackSize += CONFIG_PRIVILEGED_STACK_SIZE;
|
||||
}
|
||||
#endif
|
||||
_new_thread_init(thread, pStackMem, stackSize, priority, options);
|
||||
|
||||
/* carve the thread entry struct from the "base" of the stack */
|
||||
pInitCtx = (struct init_stack_frame *)(STACK_ROUND_DOWN(stackEnd) -
|
||||
sizeof(struct init_stack_frame));
|
||||
|
||||
#if CONFIG_USERSPACE
|
||||
if (options & K_USER) {
|
||||
pInitCtx->pc = ((u32_t)_arch_user_mode_enter);
|
||||
} else {
|
||||
pInitCtx->pc = ((u32_t)_thread_entry_wrapper);
|
||||
}
|
||||
#else
|
||||
pInitCtx->pc = ((u32_t)_thread_entry_wrapper);
|
||||
#endif
|
||||
pInitCtx->r0 = (u32_t)pEntry;
|
||||
pInitCtx->r1 = (u32_t)parameter1;
|
||||
pInitCtx->r2 = (u32_t)parameter2;
|
||||
|
@ -84,12 +111,24 @@ void _new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|||
* value.
|
||||
*/
|
||||
#ifdef CONFIG_ARC_STACK_CHECKING
|
||||
pInitCtx->status32 = _ARC_V2_STATUS32_SC | _ARC_V2_STATUS32_E(_ARC_V2_DEF_IRQ_LEVEL);
|
||||
pInitCtx->status32 = _ARC_V2_STATUS32_SC |
|
||||
_ARC_V2_STATUS32_E(_ARC_V2_DEF_IRQ_LEVEL);
|
||||
thread->arch.stack_base = (u32_t) stackEnd;
|
||||
#else
|
||||
pInitCtx->status32 = _ARC_V2_STATUS32_E(_ARC_V2_DEF_IRQ_LEVEL);
|
||||
#endif
|
||||
|
||||
#if CONFIG_USERSPACE
|
||||
if (options & K_USER) {
|
||||
thread->arch.priv_stack_start = (u32_t) stackEnd;
|
||||
thread->arch.priv_stack_size =
|
||||
(u32_t)CONFIG_PRIVILEGED_STACK_SIZE;
|
||||
} else {
|
||||
thread->arch.priv_stack_start = 0;
|
||||
thread->arch.priv_stack_size = 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_THREAD_MONITOR
|
||||
/*
|
||||
* In debug mode thread->entry give direct access to the thread entry
|
||||
|
@ -113,3 +152,17 @@ void _new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|||
|
||||
thread_monitor_init(thread);
|
||||
}
|
||||
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
|
||||
FUNC_NORETURN void _arch_user_mode_enter(k_thread_entry_t user_entry,
|
||||
void *p1, void *p2, void *p3)
|
||||
{
|
||||
_arc_userspace_enter(user_entry, p1, p2, p3,
|
||||
(u32_t)_current->stack_obj,
|
||||
_current->stack_info.size);
|
||||
CODE_UNREACHABLE;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -15,7 +15,6 @@
|
|||
#include <linker/sections.h>
|
||||
|
||||
GTEXT(_thread_entry_wrapper)
|
||||
GTEXT(_thread_entry)
|
||||
|
||||
/*
|
||||
* @brief Wrapper for _thread_entry
|
||||
|
|
118
arch/arc/core/userspace.S
Normal file
118
arch/arc/core/userspace.S
Normal file
|
@ -0,0 +1,118 @@
|
|||
/*
|
||||
* Copyright (c) 2017 Synopsys.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#include <offsets_short.h>
|
||||
#include <toolchain.h>
|
||||
#include <linker/sections.h>
|
||||
#include <kernel_structs.h>
|
||||
#include <arch/cpu.h>
|
||||
#include <syscall.h>
|
||||
|
||||
GTEXT(_arc_userspace_enter)
|
||||
GTEXT(_arc_do_syscall)
|
||||
GTEXT(_arch_is_user_context)
|
||||
|
||||
|
||||
/**
|
||||
*
|
||||
* User space entry function
|
||||
*
|
||||
* This function is the entry point to user mode from privileged execution.
|
||||
* The conversion is one way, and threads which transition to user mode do
|
||||
* not transition back later, unless they are doing system calls.
|
||||
*
|
||||
*/
|
||||
SECTION_FUNC(TEXT, _arc_userspace_enter)
|
||||
/*
|
||||
* In ARCv2, the U bit can only be set through exception return
|
||||
*/
|
||||
/* the end of user stack in r5 */
|
||||
add r5, r4, r5
|
||||
/* start of privilege stack */
|
||||
add r2, r5, CONFIG_PRIVILEGED_STACK_SIZE
|
||||
sub r5, r5, 16 /* skip r0, r1, r2, r3 */
|
||||
|
||||
#ifdef CONFIG_INIT_STACKS
|
||||
mov r0, 0xaaaaaaaa
|
||||
#else
|
||||
mov r0, 0x0
|
||||
#endif
|
||||
_clear_user_stack:
|
||||
st.ab r0, [r4, 4]
|
||||
cmp r4, r5
|
||||
jlt _clear_user_stack
|
||||
|
||||
lr r0, [_ARC_V2_STATUS32]
|
||||
bset r0, r0, _ARC_V2_STATUS32_U_BIT
|
||||
|
||||
mov r1, _thread_entry_wrapper
|
||||
|
||||
/* fake exception return */
|
||||
kflag _ARC_V2_STATUS32_AE
|
||||
|
||||
sr r0, [_ARC_V2_ERSTATUS]
|
||||
sr r1, [_ARC_V2_ERET]
|
||||
|
||||
/* when exception returns from kernel to user, sp and _ARC_V2_USER_SP
|
||||
* will be switched
|
||||
*/
|
||||
sr r5, [_ARC_V2_USER_SP]
|
||||
mov sp, r2
|
||||
|
||||
rtie
|
||||
|
||||
/**
|
||||
*
|
||||
* Userspace system call function
|
||||
*
|
||||
* This function is used to do system calls from unprivileged code. This
|
||||
* function is responsible for the following:
|
||||
* 1) Dispatching the system call
|
||||
* 2) Restoring stack and calling back to the caller of the system call
|
||||
*
|
||||
*/
|
||||
SECTION_FUNC(TEXT, _arc_do_syscall)
|
||||
/* r0-r5: arg1-arg6, r6 is call id */
|
||||
/* the call id is already checked in trap_s handler */
|
||||
push_s blink
|
||||
push_s r0
|
||||
|
||||
mov r0, _k_syscall_table
|
||||
ld.as blink, [r0, r6]
|
||||
|
||||
pop_s r0
|
||||
|
||||
jl [blink]
|
||||
|
||||
|
||||
pop_s blink
|
||||
|
||||
|
||||
/* through fake exception return, go back to the caller */
|
||||
kflag _ARC_V2_STATUS32_AE
|
||||
|
||||
/* the status and return addesss are saved in trap_s handler */
|
||||
pop r6
|
||||
sr r6, [_ARC_V2_ERSTATUS]
|
||||
pop r6
|
||||
sr r6, [_ARC_V2_ERET]
|
||||
rtie
|
||||
|
||||
SECTION_FUNC(TEXT, _arch_is_user_context)
|
||||
lr r0, [_ARC_V2_STATUS32]
|
||||
bbit1 r0, 20, 1f
|
||||
bset r1, r0, 20
|
||||
bclr r1, r1, 31
|
||||
kflag r1
|
||||
lr r1, [_ARC_V2_STATUS32]
|
||||
bbit0 r1, 20, 2f
|
||||
kflag r0
|
||||
1:
|
||||
j_s.d [blink]
|
||||
mov r0, 0
|
||||
2:
|
||||
j_s.d [blink]
|
||||
mov r0, 1
|
|
@ -129,6 +129,10 @@ struct _callee_saved_stack {
|
|||
u32_t r25;
|
||||
u32_t r26;
|
||||
u32_t fp; /* r27 */
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
u32_t user_sp;
|
||||
#endif
|
||||
/* r28 is the stack pointer and saved separately */
|
||||
/* r29 is ILINK and does not need to be saved */
|
||||
u32_t r30;
|
||||
|
|
|
@ -78,6 +78,9 @@ static inline void _IntLibInit(void)
|
|||
/* nothing needed, here because the kernel requires it */
|
||||
}
|
||||
|
||||
extern void _arc_userspace_enter(k_thread_entry_t user_entry, void *p1,
|
||||
void *p2, void *p3, u32_t stack, u32_t size);
|
||||
|
||||
#endif /* _ASMLANGUAGE */
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -63,6 +63,11 @@ struct _thread_arch {
|
|||
*/
|
||||
u32_t stack_base;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
u32_t priv_stack_start;
|
||||
u32_t priv_stack_size;
|
||||
#endif
|
||||
};
|
||||
|
||||
typedef struct _thread_arch _thread_arch_t;
|
||||
|
|
|
@ -41,6 +41,11 @@ extern "C" {
|
|||
st r25, [sp, ___callee_saved_stack_t_r25_OFFSET]
|
||||
st r26, [sp, ___callee_saved_stack_t_r26_OFFSET]
|
||||
st fp, [sp, ___callee_saved_stack_t_fp_OFFSET]
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
lr r13, [_ARC_V2_USER_SP]
|
||||
st r13, [sp, ___callee_saved_stack_t_user_sp_OFFSET]
|
||||
#endif
|
||||
st r30, [sp, ___callee_saved_stack_t_r30_OFFSET]
|
||||
|
||||
#ifdef CONFIG_FP_SHARING
|
||||
|
@ -93,6 +98,11 @@ extern "C" {
|
|||
sr r13, [_ARC_V2_FPU_DPFP2H]
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
ld_s r13, [sp, ___callee_saved_stack_t_user_sp_OFFSET]
|
||||
sr r13, [_ARC_V2_USER_SP]
|
||||
#endif
|
||||
|
||||
ld_s r13, [sp, ___callee_saved_stack_t_r13_OFFSET]
|
||||
|
|
|
@ -21,6 +21,7 @@ extern "C" {
|
|||
|
||||
#define _ARC_V2_AUX_IRQ_CTRL_BLINK (1 << 9)
|
||||
#define _ARC_V2_AUX_IRQ_CTRL_LOOP_REGS (1 << 10)
|
||||
#define _ARC_V2_AUX_IRQ_CTRL_U (1 << 11)
|
||||
#define _ARC_V2_AUX_IRQ_CTRL_LP (1 << 13)
|
||||
#define _ARC_V2_AUX_IRQ_CTRL_14_REGS 7
|
||||
#define _ARC_V2_AUX_IRQ_CTRL_16_REGS 8
|
||||
|
@ -45,6 +46,9 @@ static ALWAYS_INLINE void _irq_setup(void)
|
|||
_ARC_V2_AUX_IRQ_CTRL_LOOP_REGS | /* save lp_xxx registers */
|
||||
#ifdef CONFIG_CODE_DENSITY
|
||||
_ARC_V2_AUX_IRQ_CTRL_LP | /* save code density registers */
|
||||
#endif
|
||||
#ifdef CONFIG_USERSPACE
|
||||
_ARC_V2_AUX_IRQ_CTRL_U | /* save context into user stack */
|
||||
#endif
|
||||
_ARC_V2_AUX_IRQ_CTRL_BLINK | /* save blink */
|
||||
_ARC_V2_AUX_IRQ_CTRL_14_REGS /* save r0 -> r13 (caller-saved) */
|
||||
|
|
|
@ -12,5 +12,3 @@ zephyr_sources(
|
|||
soc.c
|
||||
soc_config.c
|
||||
)
|
||||
|
||||
zephyr_sources_ifdef(CONFIG_ARC_MPU_ENABLE arc_mpu_regions.c)
|
||||
|
|
|
@ -1,42 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2017 Synopsys
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#include <soc.h>
|
||||
#include <arch/arc/v2/mpu/arc_mpu.h>
|
||||
|
||||
static struct arc_mpu_region mpu_regions[] = {
|
||||
#if CONFIG_ICCM_SIZE > 0
|
||||
/* Region ICCM */
|
||||
MPU_REGION_ENTRY("ICCM",
|
||||
CONFIG_ICCM_BASE_ADDRESS,
|
||||
CONFIG_ICCM_SIZE * 1024,
|
||||
REGION_FLASH_ATTR),
|
||||
#endif
|
||||
#if CONFIG_DCCM_SIZE > 0
|
||||
/* Region DCCM */
|
||||
MPU_REGION_ENTRY("DCCM",
|
||||
CONFIG_DCCM_BASE_ADDRESS,
|
||||
CONFIG_DCCM_SIZE * 1024,
|
||||
REGION_RAM_ATTR),
|
||||
#endif
|
||||
#if CONFIG_SRAM_SIZE > 0
|
||||
/* Region DDR RAM */
|
||||
MPU_REGION_ENTRY("DDR RAM",
|
||||
CONFIG_SRAM_BASE_ADDRESS,
|
||||
CONFIG_SRAM_SIZE * 1024,
|
||||
REGION_ALL_ATTR),
|
||||
#endif
|
||||
/* Region Peripheral */
|
||||
MPU_REGION_ENTRY("PERIPHERAL",
|
||||
0xF0000000,
|
||||
64 * 1024,
|
||||
REGION_IO_ATTR),
|
||||
};
|
||||
|
||||
struct arc_mpu_config mpu_config = {
|
||||
.num_regions = ARRAY_SIZE(mpu_regions),
|
||||
.mpu_regions = mpu_regions,
|
||||
};
|
1
boards/arc/em_starterkit/CMakeLists.txt
Normal file
1
boards/arc/em_starterkit/CMakeLists.txt
Normal file
|
@ -0,0 +1 @@
|
|||
zephyr_sources_ifdef(CONFIG_ARC_MPU_ENABLE arc_mpu_regions.c)
|
92
boards/arc/em_starterkit/arc_mpu_regions.c
Normal file
92
boards/arc/em_starterkit/arc_mpu_regions.c
Normal file
|
@ -0,0 +1,92 @@
|
|||
/*
|
||||
* Copyright (c) 2017 Synopsys
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#include <soc.h>
|
||||
#include <arch/arc/v2/mpu/arc_mpu.h>
|
||||
#include <linker/linker-defs.h>
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
static struct arc_mpu_region mpu_regions[] = {
|
||||
#if CONFIG_ARC_MPU_VER == 3 && defined(CONFIG_APPLICATION_MEMORY)
|
||||
/* Region ICCM */
|
||||
MPU_REGION_ENTRY("IMAGE ROM",
|
||||
_image_rom_start,
|
||||
_image_rom_end,
|
||||
REGION_FLASH_ATTR),
|
||||
MPU_REGION_ENTRY("APP MEMORY",
|
||||
__app_ram_start,
|
||||
__app_ram_size,
|
||||
REGION_RAM_ATTR),
|
||||
MPU_REGION_ENTRY("KERNEL MEMORY",
|
||||
__kernel_ram_start,
|
||||
__kernel_ram_size,
|
||||
AUX_MPU_RDP_KW | AUX_MPU_RDP_KR),
|
||||
|
||||
#else
|
||||
#if CONFIG_ICCM_SIZE > 0
|
||||
/* Region ICCM */
|
||||
MPU_REGION_ENTRY("ICCM",
|
||||
CONFIG_ICCM_BASE_ADDRESS,
|
||||
CONFIG_ICCM_SIZE * 1024,
|
||||
REGION_FLASH_ATTR),
|
||||
#endif
|
||||
#if CONFIG_DCCM_SIZE > 0
|
||||
/* Region DCCM */
|
||||
MPU_REGION_ENTRY("DCCM",
|
||||
CONFIG_DCCM_BASE_ADDRESS,
|
||||
CONFIG_DCCM_SIZE * 1024,
|
||||
AUX_MPU_RDP_KW | AUX_MPU_RDP_KR),
|
||||
#endif
|
||||
#if CONFIG_SRAM_SIZE > 0
|
||||
/* Region DDR RAM */
|
||||
MPU_REGION_ENTRY("DDR RAM",
|
||||
CONFIG_SRAM_BASE_ADDRESS,
|
||||
CONFIG_SRAM_SIZE * 1024,
|
||||
AUX_MPU_RDP_KW | AUX_MPU_RDP_KR |
|
||||
AUX_MPU_RDP_KE | AUX_MPU_RDP_UE),
|
||||
#endif
|
||||
#endif /* ARC_MPU_VER == 3 */
|
||||
/* Region Peripheral */
|
||||
MPU_REGION_ENTRY("PERIPHERAL",
|
||||
0xF0000000,
|
||||
64 * 1024,
|
||||
AUX_MPU_RDP_KW | AUX_MPU_RDP_KR),
|
||||
};
|
||||
#else
|
||||
static struct arc_mpu_region mpu_regions[] = {
|
||||
#if CONFIG_ICCM_SIZE > 0
|
||||
/* Region ICCM */
|
||||
MPU_REGION_ENTRY("ICCM",
|
||||
CONFIG_ICCM_BASE_ADDRESS,
|
||||
CONFIG_ICCM_SIZE * 1024,
|
||||
REGION_FLASH_ATTR),
|
||||
#endif
|
||||
#if CONFIG_DCCM_SIZE > 0
|
||||
/* Region DCCM */
|
||||
MPU_REGION_ENTRY("DCCM",
|
||||
CONFIG_DCCM_BASE_ADDRESS,
|
||||
CONFIG_DCCM_SIZE * 1024,
|
||||
REGION_RAM_ATTR),
|
||||
#endif
|
||||
#if CONFIG_SRAM_SIZE > 0
|
||||
/* Region DDR RAM */
|
||||
MPU_REGION_ENTRY("DDR RAM",
|
||||
CONFIG_SRAM_BASE_ADDRESS,
|
||||
CONFIG_SRAM_SIZE * 1024,
|
||||
REGION_ALL_ATTR),
|
||||
#endif
|
||||
/* Region Peripheral */
|
||||
MPU_REGION_ENTRY("PERIPHERAL",
|
||||
0xF0000000,
|
||||
64 * 1024,
|
||||
REGION_IO_ATTR),
|
||||
};
|
||||
#endif
|
||||
|
||||
struct arc_mpu_config mpu_config = {
|
||||
.num_regions = ARRAY_SIZE(mpu_regions),
|
||||
.mpu_regions = mpu_regions,
|
||||
};
|
|
@ -39,7 +39,7 @@ extern "C" {
|
|||
#include <arch/arc/v2/addr_types.h>
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_MPU_STACK_GUARD)
|
||||
#if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
|
||||
#if defined(CONFIG_ARC_CORE_MPU)
|
||||
#if CONFIG_ARC_MPU_VER == 2
|
||||
/*
|
||||
|
@ -62,22 +62,93 @@ extern "C" {
|
|||
#define STACK_GUARD_SIZE 0
|
||||
#endif
|
||||
|
||||
#define STACK_SIZE_ALIGN(x) max(STACK_ALIGN, x)
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* @brief Calculate power of two ceiling for a buffer size input
|
||||
*
|
||||
*/
|
||||
#define POW2_CEIL(x) ((1 << (31 - __builtin_clz(x))) < x ? \
|
||||
1 << (31 - __builtin_clz(x) + 1) : \
|
||||
1 << (31 - __builtin_clz(x)))
|
||||
|
||||
#if defined(CONFIG_USERSPACE)
|
||||
|
||||
/*
|
||||
* if user space is enabled, for user thread, no STACK_GUARD area;
|
||||
* for kernel thread, the privilege stack can be used as STACK_GUARD
|
||||
* area, and the privilege stack size must be greater than STACK_GUARD_SIZE
|
||||
*/
|
||||
#if defined(CONFIG_MPU_STACK_GUARD) && \
|
||||
CONFIG_PRIVILEGED_STACK_SIZE < STACK_GUARD_SIZE
|
||||
#error "CONFIG_PRIVILEGED_STACK_SIZE must be larger than STACK_GUARD_SIZE"
|
||||
#endif
|
||||
|
||||
|
||||
#if CONFIG_ARC_MPU_VER == 2
|
||||
|
||||
#define _ARCH_THREAD_STACK_DEFINE(sym, size) \
|
||||
struct _k_thread_stack_element __noinit __aligned(STACK_ALIGN) \
|
||||
sym[size+STACK_GUARD_SIZE]
|
||||
struct _k_thread_stack_element __kernel_noinit \
|
||||
__aligned(POW2_CEIL(STACK_SIZE_ALIGN(size))) \
|
||||
sym[POW2_CEIL(STACK_SIZE_ALIGN(size)) + \
|
||||
CONFIG_PRIVILEGED_STACK_SIZE]
|
||||
|
||||
#define _ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
|
||||
struct _k_thread_stack_element __noinit __aligned(STACK_ALIGN) \
|
||||
sym[nmemb][size+STACK_GUARD_SIZE]
|
||||
struct _k_thread_stack_element __kernel_noinit \
|
||||
__aligned(POW2_CEIL(STACK_SIZE_ALIGN(size))) \
|
||||
sym[nmemb][POW2_CEIL(STACK_SIZE_ALIGN(size)) + \
|
||||
CONFIG_PRIVILEGED_STACK_SIZE]
|
||||
|
||||
#define _ARCH_THREAD_STACK_MEMBER(sym, size) \
|
||||
struct _k_thread_stack_element \
|
||||
__aligned(POW2_CEIL(STACK_SIZE_ALIGN(size))) \
|
||||
sym[POW2_CEIL(size) + CONFIG_PRIVILEGED_STACK_SIZE]
|
||||
|
||||
#elif CONFIG_ARC_MPU_VER == 3
|
||||
|
||||
#define _ARCH_THREAD_STACK_DEFINE(sym, size) \
|
||||
struct _k_thread_stack_element __kernel_noinit __aligned(STACK_ALIGN) \
|
||||
sym[size + CONFIG_PRIVILEGED_STACK_SIZE]
|
||||
|
||||
#define _ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
|
||||
struct _k_thread_stack_element __kernel_noinit __aligned(STACK_ALIGN) \
|
||||
sym[nmemb][size + CONFIG_PRIVILEGED_STACK_SIZE]
|
||||
|
||||
#define _ARCH_THREAD_STACK_MEMBER(sym, size) \
|
||||
struct _k_thread_stack_element __aligned(STACK_ALIGN) \
|
||||
sym[size+STACK_GUARD_SIZE]
|
||||
sym[size + CONFIG_PRIVILEGED_STACK_SIZE]
|
||||
|
||||
#endif /* CONFIG_ARC_MPU_VER */
|
||||
|
||||
#define _ARCH_THREAD_STACK_SIZEOF(sym) \
|
||||
(sizeof(sym) - CONFIG_PRIVILEGED_STACK_SIZE)
|
||||
|
||||
#define _ARCH_THREAD_STACK_BUFFER(sym) \
|
||||
((char *)(sym))
|
||||
|
||||
#else /* CONFIG_USERSPACE */
|
||||
|
||||
#define _ARCH_THREAD_STACK_DEFINE(sym, size) \
|
||||
struct _k_thread_stack_element __kernel_noinit __aligned(STACK_ALIGN) \
|
||||
sym[size + STACK_GUARD_SIZE]
|
||||
|
||||
#define _ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
|
||||
struct _k_thread_stack_element __kernel_noinit __aligned(STACK_ALIGN) \
|
||||
sym[nmemb][size + STACK_GUARD_SIZE]
|
||||
|
||||
#define _ARCH_THREAD_STACK_MEMBER(sym, size) \
|
||||
struct _k_thread_stack_element __aligned(STACK_ALIGN) \
|
||||
sym[size + STACK_GUARD_SIZE]
|
||||
|
||||
#define _ARCH_THREAD_STACK_SIZEOF(sym) (sizeof(sym) - STACK_GUARD_SIZE)
|
||||
|
||||
#define _ARCH_THREAD_STACK_BUFFER(sym) ((char *)(sym + STACK_GUARD_SIZE))
|
||||
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
#ifdef CONFIG_ARC_MPU
|
||||
#ifndef _ASMLANGUAGE
|
||||
|
@ -158,46 +229,125 @@ static inline u32_t _arch_syscall_invoke6(u32_t arg1, u32_t arg2, u32_t arg3,
|
|||
u32_t arg4, u32_t arg5, u32_t arg6,
|
||||
u32_t call_id)
|
||||
{
|
||||
return 0;
|
||||
register u32_t ret __asm__("r0") = arg1;
|
||||
register u32_t r1 __asm__("r1") = arg2;
|
||||
register u32_t r2 __asm__("r2") = arg3;
|
||||
register u32_t r3 __asm__("r3") = arg4;
|
||||
register u32_t r4 __asm__("r4") = arg5;
|
||||
register u32_t r5 __asm__("r5") = arg6;
|
||||
register u32_t r6 __asm__("r6") = call_id;
|
||||
|
||||
__asm__ volatile(
|
||||
"trap_s %[trap_s_id]\n"
|
||||
: "=r"(ret)
|
||||
: [trap_s_id] "i" (_TRAP_S_CALL_SYSTEM_CALL),
|
||||
"r" (ret), "r" (r1), "r" (r2), "r" (r3),
|
||||
"r" (r4), "r" (r5), "r" (r6));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline u32_t _arch_syscall_invoke5(u32_t arg1, u32_t arg2, u32_t arg3,
|
||||
u32_t arg4, u32_t arg5, u32_t call_id)
|
||||
{
|
||||
return 0;
|
||||
register u32_t ret __asm__("r0") = arg1;
|
||||
register u32_t r1 __asm__("r1") = arg2;
|
||||
register u32_t r2 __asm__("r2") = arg3;
|
||||
register u32_t r3 __asm__("r3") = arg4;
|
||||
register u32_t r4 __asm__("r4") = arg5;
|
||||
register u32_t r6 __asm__("r6") = call_id;
|
||||
|
||||
__asm__ volatile(
|
||||
"trap_s %[trap_s_id]\n"
|
||||
: "=r"(ret)
|
||||
: [trap_s_id] "i" (_TRAP_S_CALL_SYSTEM_CALL),
|
||||
"r" (ret), "r" (r1), "r" (r2), "r" (r3),
|
||||
"r" (r4), "r" (r6));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline u32_t _arch_syscall_invoke4(u32_t arg1, u32_t arg2, u32_t arg3,
|
||||
u32_t arg4, u32_t call_id)
|
||||
{
|
||||
return 0;
|
||||
register u32_t ret __asm__("r0") = arg1;
|
||||
register u32_t r1 __asm__("r1") = arg2;
|
||||
register u32_t r2 __asm__("r2") = arg3;
|
||||
register u32_t r3 __asm__("r3") = arg4;
|
||||
register u32_t r6 __asm__("r6") = call_id;
|
||||
|
||||
__asm__ volatile(
|
||||
"trap_s %[trap_s_id]\n"
|
||||
: "=r"(ret)
|
||||
: [trap_s_id] "i" (_TRAP_S_CALL_SYSTEM_CALL),
|
||||
"r" (ret), "r" (r1), "r" (r2), "r" (r3),
|
||||
"r" (r6));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline u32_t _arch_syscall_invoke3(u32_t arg1, u32_t arg2, u32_t arg3,
|
||||
u32_t call_id)
|
||||
{
|
||||
return 0;
|
||||
register u32_t ret __asm__("r0") = arg1;
|
||||
register u32_t r1 __asm__("r1") = arg2;
|
||||
register u32_t r2 __asm__("r2") = arg3;
|
||||
register u32_t r6 __asm__("r6") = call_id;
|
||||
|
||||
__asm__ volatile(
|
||||
"trap_s %[trap_s_id]\n"
|
||||
: "=r"(ret)
|
||||
: [trap_s_id] "i" (_TRAP_S_CALL_SYSTEM_CALL),
|
||||
"r" (ret), "r" (r1), "r" (r2), "r" (r6));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline u32_t _arch_syscall_invoke2(u32_t arg1, u32_t arg2, u32_t call_id)
|
||||
{
|
||||
return 0;
|
||||
register u32_t ret __asm__("r0") = arg1;
|
||||
register u32_t r1 __asm__("r1") = arg2;
|
||||
register u32_t r6 __asm__("r6") = call_id;
|
||||
|
||||
__asm__ volatile(
|
||||
"trap_s %[trap_s_id]\n"
|
||||
: "=r"(ret)
|
||||
: [trap_s_id] "i" (_TRAP_S_CALL_SYSTEM_CALL),
|
||||
"r" (ret), "r" (r1), "r" (r6));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline u32_t _arch_syscall_invoke1(u32_t arg1, u32_t call_id)
|
||||
{
|
||||
return 0;
|
||||
register u32_t ret __asm__("r0") = arg1;
|
||||
register u32_t r6 __asm__("r6") = call_id;
|
||||
|
||||
__asm__ volatile(
|
||||
"trap_s %[trap_s_id]\n"
|
||||
: "=r"(ret)
|
||||
: [trap_s_id] "i" (_TRAP_S_CALL_SYSTEM_CALL),
|
||||
"r" (ret), "r" (r6));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline u32_t _arch_syscall_invoke0(u32_t call_id)
|
||||
{
|
||||
return 0;
|
||||
register u32_t ret __asm__("r0");
|
||||
register u32_t r6 __asm__("r6") = call_id;
|
||||
|
||||
__asm__ volatile(
|
||||
"trap_s %[trap_s_id]\n"
|
||||
: "=r"(ret)
|
||||
: [trap_s_id] "i" (_TRAP_S_CALL_SYSTEM_CALL),
|
||||
"r" (ret), "r" (r6));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int _arch_is_user_context(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
extern int _arch_is_user_context(void);
|
||||
|
||||
#endif /* _ASMLANGUAGE */
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -25,6 +25,7 @@ extern "C" {
|
|||
#define _ARC_V2_SEC_STAT 0x09
|
||||
#define _ARC_V2_STATUS32 0x00a
|
||||
#define _ARC_V2_STATUS32_P0 0x00b
|
||||
#define _ARC_V2_USER_SP 0x00d
|
||||
#define _ARC_V2_AUX_IRQ_CTRL 0x00e
|
||||
#define _ARC_V2_IC_IVIC 0x010
|
||||
#define _ARC_V2_IC_CTRL 0x011
|
||||
|
@ -102,7 +103,8 @@ extern "C" {
|
|||
#define _ARC_V2_STATUS32_AE_BIT 5
|
||||
#define _ARC_V2_STATUS32_AE (1 << _ARC_V2_STATUS32_AE_BIT)
|
||||
#define _ARC_V2_STATUS32_DE (1 << 6)
|
||||
#define _ARC_V2_STATUS32_U (1 << 7)
|
||||
#define _ARC_V2_STATUS32_U_BIT 7
|
||||
#define _ARC_V2_STATUS32_U (1 << _ARC_V2_STATUS32_U_BIT)
|
||||
#define _ARC_V2_STATUS32_V (1 << 8)
|
||||
#define _ARC_V2_STATUS32_C (1 << 9)
|
||||
#define _ARC_V2_STATUS32_N (1 << 10)
|
||||
|
|
|
@ -33,6 +33,11 @@ extern void _SysFatalErrorHandler(unsigned int cause, const NANO_ESF *esf);
|
|||
#define _NANO_ERR_KERNEL_OOPS (4) /* Kernel oops (fatal to thread) */
|
||||
#define _NANO_ERR_KERNEL_PANIC (5) /* Kernel panic (fatal to system) */
|
||||
|
||||
|
||||
#define _TRAP_S_SCALL_IRQ_OFFLOAD 1
|
||||
#define _TRAP_S_CALL_RUNTIME_EXCEPT 2
|
||||
#define _TRAP_S_CALL_SYSTEM_CALL 3
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -37,6 +37,16 @@
|
|||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARC_MPU_ENABLE
|
||||
#if CONFIG_ARC_MPU_VER == 2
|
||||
#define MPU_ADDR_ALIGN . = ALIGN(2048);
|
||||
#elif CONFIG_ARC_MPU_VER == 3
|
||||
#define MPU_ADDR_ALIGN . = ALIGN(32);
|
||||
#endif
|
||||
#else
|
||||
#define MPU_ADDR_ALIGN
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_XIP)
|
||||
#define _DATA_IN_ROM __data_rom_start
|
||||
#else
|
||||
|
@ -108,7 +118,9 @@ SECTIONS {
|
|||
} GROUP_LINK_IN(ROMABLE_REGION)
|
||||
|
||||
_image_rodata_end = .;
|
||||
MPU_ADDR_ALIGN
|
||||
_image_rom_end = .;
|
||||
_image_rom_size = _image_rom_end - _image_rom_start;
|
||||
|
||||
GROUP_END(ROMABLE_REGION)
|
||||
|
||||
|
@ -117,6 +129,7 @@ SECTIONS {
|
|||
#ifdef CONFIG_APPLICATION_MEMORY
|
||||
SECTION_DATA_PROLOGUE(_APP_DATA_SECTION_NAME, (OPTIONAL),)
|
||||
{
|
||||
MPU_ADDR_ALIGN
|
||||
__app_ram_start = .;
|
||||
__app_data_ram_start = .;
|
||||
_image_ram_start = .;
|
||||
|
@ -142,12 +155,19 @@ SECTIONS {
|
|||
{
|
||||
APP_INPUT_SECTION(.noinit)
|
||||
APP_INPUT_SECTION(".noinit.*")
|
||||
/*
|
||||
* for MPU v2,the application memory section must be aligned to the size of
|
||||
* section
|
||||
*/
|
||||
MPU_ADDR_ALIGN
|
||||
} GROUP_DATA_LINK_IN(RAMABLE_REGION, RAMABLE_REGION)
|
||||
|
||||
__app_ram_end = .;
|
||||
__app_ram_size = __app_ram_end - __app_ram_start;
|
||||
#endif /* CONFIG_APPLICATION_MEMORY */
|
||||
|
||||
SECTION_DATA_PROLOGUE(_BSS_SECTION_NAME,(NOLOAD),) {
|
||||
MPU_ADDR_ALIGN
|
||||
/*
|
||||
* For performance, BSS section is assumed to be 4 byte aligned and
|
||||
* a multiple of 4 bytes
|
||||
|
@ -204,6 +224,8 @@ SECTIONS {
|
|||
|
||||
__data_ram_end = .;
|
||||
|
||||
MPU_ADDR_ALIGN
|
||||
|
||||
/* Define linker symbols */
|
||||
_image_ram_end = .;
|
||||
_end = .; /* end of image */
|
||||
|
|
|
@ -28,9 +28,11 @@ extern "C" {
|
|||
* be managed inside the MPU driver and not escalated.
|
||||
*/
|
||||
/* Thread Region Intent Type */
|
||||
#define THREAD_STACK_USER_REGION 0x0
|
||||
#define THREAD_STACK_REGION 0x1
|
||||
#define THREAD_STACK_GUARD_REGION 0x2
|
||||
#define THREAD_DOMAIN_PARTITION_REGION 0x3
|
||||
#define THREAD_APP_DATA_REGION 0x2
|
||||
#define THREAD_STACK_GUARD_REGION 0x3
|
||||
#define THREAD_DOMAIN_PARTITION_REGION 0x4
|
||||
|
||||
#if defined(CONFIG_ARC_CORE_MPU)
|
||||
/* ARC Core MPU Driver API */
|
||||
|
@ -95,7 +97,7 @@ void configure_mpu_stack_guard(struct k_thread *thread);
|
|||
#endif
|
||||
|
||||
#if defined(CONFIG_USERSPACE)
|
||||
|
||||
void arc_core_mpu_configure_user_context(struct k_thread *thread);
|
||||
void arc_core_mpu_configure_mem_domain(struct k_mem_domain *mem_domain);
|
||||
void arc_core_mpu_mem_partition_remove(u32_t part_index);
|
||||
void arc_core_mpu_configure_mem_partition(u32_t part_index,
|
||||
|
@ -112,8 +114,11 @@ int arc_core_mpu_buffer_validate(void *addr, size_t size, int write);
|
|||
* @param thread thread info data structure.
|
||||
*/
|
||||
void configure_mpu_mem_domain(struct k_thread *thread);
|
||||
|
||||
void configure_mpu_user_context(struct k_thread *thread);
|
||||
#endif
|
||||
|
||||
void configure_mpu_thread(struct k_thread *thread);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue