zephyr: replace zephyr integer types with C99 types

git grep -l 'u\(8\|16\|32\|64\)_t' | \
		xargs sed -i "s/u\(8\|16\|32\|64\)_t/uint\1_t/g"
	git grep -l 's\(8\|16\|32\|64\)_t' | \
		xargs sed -i "s/s\(8\|16\|32\|64\)_t/int\1_t/g"

Signed-off-by: Kumar Gala <kumar.gala@linaro.org>
This commit is contained in:
Kumar Gala 2020-05-27 11:26:57 -05:00 committed by Kumar Gala
parent ee6fa31af6
commit a1b77fd589
2364 changed files with 32505 additions and 32505 deletions

View file

@ -24,7 +24,7 @@ static struct k_spinlock arc_connect_spinlock;
/* Generate an inter-core interrupt to the target core */ /* Generate an inter-core interrupt to the target core */
void z_arc_connect_ici_generate(u32_t core) void z_arc_connect_ici_generate(uint32_t core)
{ {
LOCKED(&arc_connect_spinlock) { LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_INTRPT_GENERATE_IRQ, core); z_arc_connect_cmd(ARC_CONNECT_CMD_INTRPT_GENERATE_IRQ, core);
@ -32,7 +32,7 @@ void z_arc_connect_ici_generate(u32_t core)
} }
/* Acknowledge the inter-core interrupt raised by core */ /* Acknowledge the inter-core interrupt raised by core */
void z_arc_connect_ici_ack(u32_t core) void z_arc_connect_ici_ack(uint32_t core)
{ {
LOCKED(&arc_connect_spinlock) { LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_INTRPT_GENERATE_ACK, core); z_arc_connect_cmd(ARC_CONNECT_CMD_INTRPT_GENERATE_ACK, core);
@ -40,9 +40,9 @@ void z_arc_connect_ici_ack(u32_t core)
} }
/* Read inter-core interrupt status */ /* Read inter-core interrupt status */
u32_t z_arc_connect_ici_read_status(u32_t core) uint32_t z_arc_connect_ici_read_status(uint32_t core)
{ {
u32_t ret = 0; uint32_t ret = 0;
LOCKED(&arc_connect_spinlock) { LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_INTRPT_READ_STATUS, core); z_arc_connect_cmd(ARC_CONNECT_CMD_INTRPT_READ_STATUS, core);
@ -53,9 +53,9 @@ u32_t z_arc_connect_ici_read_status(u32_t core)
} }
/* Check the source of inter-core interrupt */ /* Check the source of inter-core interrupt */
u32_t z_arc_connect_ici_check_src(void) uint32_t z_arc_connect_ici_check_src(void)
{ {
u32_t ret = 0; uint32_t ret = 0;
LOCKED(&arc_connect_spinlock) { LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_INTRPT_CHECK_SOURCE, 0); z_arc_connect_cmd(ARC_CONNECT_CMD_INTRPT_CHECK_SOURCE, 0);
@ -68,7 +68,7 @@ u32_t z_arc_connect_ici_check_src(void)
/* Clear the inter-core interrupt */ /* Clear the inter-core interrupt */
void z_arc_connect_ici_clear(void) void z_arc_connect_ici_clear(void)
{ {
u32_t cpu, c; uint32_t cpu, c;
LOCKED(&arc_connect_spinlock) { LOCKED(&arc_connect_spinlock) {
@ -89,7 +89,7 @@ void z_arc_connect_ici_clear(void)
} }
/* Reset the cores in core_mask */ /* Reset the cores in core_mask */
void z_arc_connect_debug_reset(u32_t core_mask) void z_arc_connect_debug_reset(uint32_t core_mask)
{ {
LOCKED(&arc_connect_spinlock) { LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd_data(ARC_CONNECT_CMD_DEBUG_RESET, z_arc_connect_cmd_data(ARC_CONNECT_CMD_DEBUG_RESET,
@ -98,7 +98,7 @@ void z_arc_connect_debug_reset(u32_t core_mask)
} }
/* Halt the cores in core_mask */ /* Halt the cores in core_mask */
void z_arc_connect_debug_halt(u32_t core_mask) void z_arc_connect_debug_halt(uint32_t core_mask)
{ {
LOCKED(&arc_connect_spinlock) { LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd_data(ARC_CONNECT_CMD_DEBUG_HALT, z_arc_connect_cmd_data(ARC_CONNECT_CMD_DEBUG_HALT,
@ -107,7 +107,7 @@ void z_arc_connect_debug_halt(u32_t core_mask)
} }
/* Run the cores in core_mask */ /* Run the cores in core_mask */
void z_arc_connect_debug_run(u32_t core_mask) void z_arc_connect_debug_run(uint32_t core_mask)
{ {
LOCKED(&arc_connect_spinlock) { LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd_data(ARC_CONNECT_CMD_DEBUG_RUN, z_arc_connect_cmd_data(ARC_CONNECT_CMD_DEBUG_RUN,
@ -116,7 +116,7 @@ void z_arc_connect_debug_run(u32_t core_mask)
} }
/* Set core mask */ /* Set core mask */
void z_arc_connect_debug_mask_set(u32_t core_mask, u32_t mask) void z_arc_connect_debug_mask_set(uint32_t core_mask, uint32_t mask)
{ {
LOCKED(&arc_connect_spinlock) { LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd_data(ARC_CONNECT_CMD_DEBUG_SET_MASK, z_arc_connect_cmd_data(ARC_CONNECT_CMD_DEBUG_SET_MASK,
@ -125,9 +125,9 @@ void z_arc_connect_debug_mask_set(u32_t core_mask, u32_t mask)
} }
/* Read core mask */ /* Read core mask */
u32_t z_arc_connect_debug_mask_read(u32_t core_mask) uint32_t z_arc_connect_debug_mask_read(uint32_t core_mask)
{ {
u32_t ret = 0; uint32_t ret = 0;
LOCKED(&arc_connect_spinlock) { LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd_data(ARC_CONNECT_CMD_DEBUG_READ_MASK, z_arc_connect_cmd_data(ARC_CONNECT_CMD_DEBUG_READ_MASK,
@ -141,7 +141,7 @@ u32_t z_arc_connect_debug_mask_read(u32_t core_mask)
/* /*
* Select cores that should be halted if the core issuing the command is halted * Select cores that should be halted if the core issuing the command is halted
*/ */
void z_arc_connect_debug_select_set(u32_t core_mask) void z_arc_connect_debug_select_set(uint32_t core_mask)
{ {
LOCKED(&arc_connect_spinlock) { LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd_data(ARC_CONNECT_CMD_DEBUG_SET_SELECT, z_arc_connect_cmd_data(ARC_CONNECT_CMD_DEBUG_SET_SELECT,
@ -150,9 +150,9 @@ void z_arc_connect_debug_select_set(u32_t core_mask)
} }
/* Read the select value */ /* Read the select value */
u32_t z_arc_connect_debug_select_read(void) uint32_t z_arc_connect_debug_select_read(void)
{ {
u32_t ret = 0; uint32_t ret = 0;
LOCKED(&arc_connect_spinlock) { LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_DEBUG_READ_SELECT, 0); z_arc_connect_cmd(ARC_CONNECT_CMD_DEBUG_READ_SELECT, 0);
@ -163,9 +163,9 @@ u32_t z_arc_connect_debug_select_read(void)
} }
/* Read the status, halt or run of all cores in the system */ /* Read the status, halt or run of all cores in the system */
u32_t z_arc_connect_debug_en_read(void) uint32_t z_arc_connect_debug_en_read(void)
{ {
u32_t ret = 0; uint32_t ret = 0;
LOCKED(&arc_connect_spinlock) { LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_DEBUG_READ_EN, 0); z_arc_connect_cmd(ARC_CONNECT_CMD_DEBUG_READ_EN, 0);
@ -176,9 +176,9 @@ u32_t z_arc_connect_debug_en_read(void)
} }
/* Read the last command sent */ /* Read the last command sent */
u32_t z_arc_connect_debug_cmd_read(void) uint32_t z_arc_connect_debug_cmd_read(void)
{ {
u32_t ret = 0; uint32_t ret = 0;
LOCKED(&arc_connect_spinlock) { LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_DEBUG_READ_CMD, 0); z_arc_connect_cmd(ARC_CONNECT_CMD_DEBUG_READ_CMD, 0);
@ -189,9 +189,9 @@ u32_t z_arc_connect_debug_cmd_read(void)
} }
/* Read the value of internal MCD_CORE register */ /* Read the value of internal MCD_CORE register */
u32_t z_arc_connect_debug_core_read(void) uint32_t z_arc_connect_debug_core_read(void)
{ {
u32_t ret = 0; uint32_t ret = 0;
LOCKED(&arc_connect_spinlock) { LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_DEBUG_READ_CORE, 0); z_arc_connect_cmd(ARC_CONNECT_CMD_DEBUG_READ_CORE, 0);
@ -210,11 +210,11 @@ void z_arc_connect_gfrc_clear(void)
} }
/* Read total 64 bits of global free running counter */ /* Read total 64 bits of global free running counter */
u64_t z_arc_connect_gfrc_read(void) uint64_t z_arc_connect_gfrc_read(void)
{ {
u32_t low; uint32_t low;
u32_t high; uint32_t high;
u32_t key; uint32_t key;
/* /*
* each core has its own arc connect interface, i.e., * each core has its own arc connect interface, i.e.,
@ -233,7 +233,7 @@ u64_t z_arc_connect_gfrc_read(void)
arch_irq_unlock(key); arch_irq_unlock(key);
return (((u64_t)high) << 32) | low; return (((uint64_t)high) << 32) | low;
} }
/* Enable global free running counter */ /* Enable global free running counter */
@ -253,7 +253,7 @@ void z_arc_connect_gfrc_disable(void)
} }
/* Disable global free running counter */ /* Disable global free running counter */
void z_arc_connect_gfrc_core_set(u32_t core_mask) void z_arc_connect_gfrc_core_set(uint32_t core_mask)
{ {
LOCKED(&arc_connect_spinlock) { LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd_data(ARC_CONNECT_CMD_GFRC_SET_CORE, z_arc_connect_cmd_data(ARC_CONNECT_CMD_GFRC_SET_CORE,
@ -262,9 +262,9 @@ void z_arc_connect_gfrc_core_set(u32_t core_mask)
} }
/* Set the relevant cores to halt global free running counter */ /* Set the relevant cores to halt global free running counter */
u32_t z_arc_connect_gfrc_halt_read(void) uint32_t z_arc_connect_gfrc_halt_read(void)
{ {
u32_t ret = 0; uint32_t ret = 0;
LOCKED(&arc_connect_spinlock) { LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_GFRC_READ_HALT, 0); z_arc_connect_cmd(ARC_CONNECT_CMD_GFRC_READ_HALT, 0);
@ -275,9 +275,9 @@ u32_t z_arc_connect_gfrc_halt_read(void)
} }
/* Read the internal CORE register */ /* Read the internal CORE register */
u32_t z_arc_connect_gfrc_core_read(void) uint32_t z_arc_connect_gfrc_core_read(void)
{ {
u32_t ret = 0; uint32_t ret = 0;
LOCKED(&arc_connect_spinlock) { LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_GFRC_READ_CORE, 0); z_arc_connect_cmd(ARC_CONNECT_CMD_GFRC_READ_CORE, 0);
@ -304,9 +304,9 @@ void z_arc_connect_idu_disable(void)
} }
/* Read enable status of interrupt distribute unit */ /* Read enable status of interrupt distribute unit */
u32_t z_arc_connect_idu_read_enable(void) uint32_t z_arc_connect_idu_read_enable(void)
{ {
u32_t ret = 0; uint32_t ret = 0;
LOCKED(&arc_connect_spinlock) { LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_IDU_READ_ENABLE, 0); z_arc_connect_cmd(ARC_CONNECT_CMD_IDU_READ_ENABLE, 0);
@ -320,8 +320,8 @@ u32_t z_arc_connect_idu_read_enable(void)
* Set the triggering mode and distribution mode for the specified common * Set the triggering mode and distribution mode for the specified common
* interrupt * interrupt
*/ */
void z_arc_connect_idu_set_mode(u32_t irq_num, void z_arc_connect_idu_set_mode(uint32_t irq_num,
u16_t trigger_mode, u16_t distri_mode) uint16_t trigger_mode, uint16_t distri_mode)
{ {
LOCKED(&arc_connect_spinlock) { LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd_data(ARC_CONNECT_CMD_IDU_SET_MODE, z_arc_connect_cmd_data(ARC_CONNECT_CMD_IDU_SET_MODE,
@ -330,9 +330,9 @@ void z_arc_connect_idu_set_mode(u32_t irq_num,
} }
/* Read the internal MODE register of the specified common interrupt */ /* Read the internal MODE register of the specified common interrupt */
u32_t z_arc_connect_idu_read_mode(u32_t irq_num) uint32_t z_arc_connect_idu_read_mode(uint32_t irq_num)
{ {
u32_t ret = 0; uint32_t ret = 0;
LOCKED(&arc_connect_spinlock) { LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_IDU_READ_MODE, irq_num); z_arc_connect_cmd(ARC_CONNECT_CMD_IDU_READ_MODE, irq_num);
@ -346,7 +346,7 @@ u32_t z_arc_connect_idu_read_mode(u32_t irq_num)
* Set the target cores to receive the specified common interrupt * Set the target cores to receive the specified common interrupt
* when it is triggered * when it is triggered
*/ */
void z_arc_connect_idu_set_dest(u32_t irq_num, u32_t core_mask) void z_arc_connect_idu_set_dest(uint32_t irq_num, uint32_t core_mask)
{ {
LOCKED(&arc_connect_spinlock) { LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd_data(ARC_CONNECT_CMD_IDU_SET_DEST, z_arc_connect_cmd_data(ARC_CONNECT_CMD_IDU_SET_DEST,
@ -355,9 +355,9 @@ void z_arc_connect_idu_set_dest(u32_t irq_num, u32_t core_mask)
} }
/* Read the internal DEST register of the specified common interrupt */ /* Read the internal DEST register of the specified common interrupt */
u32_t z_arc_connect_idu_read_dest(u32_t irq_num) uint32_t z_arc_connect_idu_read_dest(uint32_t irq_num)
{ {
u32_t ret = 0; uint32_t ret = 0;
LOCKED(&arc_connect_spinlock) { LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_IDU_READ_DEST, irq_num); z_arc_connect_cmd(ARC_CONNECT_CMD_IDU_READ_DEST, irq_num);
@ -368,7 +368,7 @@ u32_t z_arc_connect_idu_read_dest(u32_t irq_num)
} }
/* Assert the specified common interrupt */ /* Assert the specified common interrupt */
void z_arc_connect_idu_gen_cirq(u32_t irq_num) void z_arc_connect_idu_gen_cirq(uint32_t irq_num)
{ {
LOCKED(&arc_connect_spinlock) { LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_IDU_GEN_CIRQ, irq_num); z_arc_connect_cmd(ARC_CONNECT_CMD_IDU_GEN_CIRQ, irq_num);
@ -376,7 +376,7 @@ void z_arc_connect_idu_gen_cirq(u32_t irq_num)
} }
/* Acknowledge the specified common interrupt */ /* Acknowledge the specified common interrupt */
void z_arc_connect_idu_ack_cirq(u32_t irq_num) void z_arc_connect_idu_ack_cirq(uint32_t irq_num)
{ {
LOCKED(&arc_connect_spinlock) { LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_IDU_ACK_CIRQ, irq_num); z_arc_connect_cmd(ARC_CONNECT_CMD_IDU_ACK_CIRQ, irq_num);
@ -384,9 +384,9 @@ void z_arc_connect_idu_ack_cirq(u32_t irq_num)
} }
/* Read the internal STATUS register of the specified common interrupt */ /* Read the internal STATUS register of the specified common interrupt */
u32_t z_arc_connect_idu_check_status(u32_t irq_num) uint32_t z_arc_connect_idu_check_status(uint32_t irq_num)
{ {
u32_t ret = 0; uint32_t ret = 0;
LOCKED(&arc_connect_spinlock) { LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_IDU_CHECK_STATUS, irq_num); z_arc_connect_cmd(ARC_CONNECT_CMD_IDU_CHECK_STATUS, irq_num);
@ -397,9 +397,9 @@ u32_t z_arc_connect_idu_check_status(u32_t irq_num)
} }
/* Read the internal SOURCE register of the specified common interrupt */ /* Read the internal SOURCE register of the specified common interrupt */
u32_t z_arc_connect_idu_check_source(u32_t irq_num) uint32_t z_arc_connect_idu_check_source(uint32_t irq_num)
{ {
u32_t ret = 0; uint32_t ret = 0;
LOCKED(&arc_connect_spinlock) { LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_IDU_CHECK_SOURCE, irq_num); z_arc_connect_cmd(ARC_CONNECT_CMD_IDU_CHECK_SOURCE, irq_num);
@ -410,7 +410,7 @@ u32_t z_arc_connect_idu_check_source(u32_t irq_num)
} }
/* Mask or unmask the specified common interrupt */ /* Mask or unmask the specified common interrupt */
void z_arc_connect_idu_set_mask(u32_t irq_num, u32_t mask) void z_arc_connect_idu_set_mask(uint32_t irq_num, uint32_t mask)
{ {
LOCKED(&arc_connect_spinlock) { LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd_data(ARC_CONNECT_CMD_IDU_SET_MASK, z_arc_connect_cmd_data(ARC_CONNECT_CMD_IDU_SET_MASK,
@ -419,9 +419,9 @@ void z_arc_connect_idu_set_mask(u32_t irq_num, u32_t mask)
} }
/* Read the internal MASK register of the specified common interrupt */ /* Read the internal MASK register of the specified common interrupt */
u32_t z_arc_connect_idu_read_mask(u32_t irq_num) uint32_t z_arc_connect_idu_read_mask(uint32_t irq_num)
{ {
u32_t ret = 0; uint32_t ret = 0;
LOCKED(&arc_connect_spinlock) { LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_IDU_READ_MASK, irq_num); z_arc_connect_cmd(ARC_CONNECT_CMD_IDU_READ_MASK, irq_num);
@ -435,9 +435,9 @@ u32_t z_arc_connect_idu_read_mask(u32_t irq_num)
* Check if it is the first-acknowledging core to the common interrupt * Check if it is the first-acknowledging core to the common interrupt
* if IDU is programmed in the first-acknowledged mode * if IDU is programmed in the first-acknowledged mode
*/ */
u32_t z_arc_connect_idu_check_first(u32_t irq_num) uint32_t z_arc_connect_idu_check_first(uint32_t irq_num)
{ {
u32_t ret = 0; uint32_t ret = 0;
LOCKED(&arc_connect_spinlock) { LOCKED(&arc_connect_spinlock) {
z_arc_connect_cmd(ARC_CONNECT_CMD_IDU_CHECK_FIRST, irq_num); z_arc_connect_cmd(ARC_CONNECT_CMD_IDU_CHECK_FIRST, irq_num);

View file

@ -35,7 +35,7 @@ volatile struct {
* master core that it's waken * master core that it's waken
* *
*/ */
volatile u32_t arc_cpu_wake_flag; volatile uint32_t arc_cpu_wake_flag;
volatile char *arc_cpu_sp; volatile char *arc_cpu_sp;
/* /*
@ -98,7 +98,7 @@ static void sched_ipi_handler(void *unused)
/* arch implementation of sched_ipi */ /* arch implementation of sched_ipi */
void arch_sched_ipi(void) void arch_sched_ipi(void)
{ {
u32_t i; uint32_t i;
/* broadcast sched_ipi request to other cores /* broadcast sched_ipi request to other cores
* if the target is current core, hardware will ignore it * if the target is current core, hardware will ignore it

View file

@ -55,7 +55,7 @@ static bool dcache_available(void)
return (val == 0) ? false : true; return (val == 0) ? false : true;
} }
static void dcache_dc_ctrl(u32_t dcache_en_mask) static void dcache_dc_ctrl(uint32_t dcache_en_mask)
{ {
if (dcache_available()) { if (dcache_available()) {
z_arc_v2_aux_reg_write(_ARC_V2_DC_CTRL, dcache_en_mask); z_arc_v2_aux_reg_write(_ARC_V2_DC_CTRL, dcache_en_mask);
@ -84,9 +84,9 @@ static void dcache_enable(void)
* *
* @return N/A * @return N/A
*/ */
static void dcache_flush_mlines(u32_t start_addr, u32_t size) static void dcache_flush_mlines(uint32_t start_addr, uint32_t size)
{ {
u32_t end_addr; uint32_t end_addr;
unsigned int key; unsigned int key;
if (!dcache_available() || (size == 0U)) { if (!dcache_available() || (size == 0U)) {
@ -94,7 +94,7 @@ static void dcache_flush_mlines(u32_t start_addr, u32_t size)
} }
end_addr = start_addr + size - 1; end_addr = start_addr + size - 1;
start_addr &= (u32_t)(~(DCACHE_LINE_SIZE - 1)); start_addr &= (uint32_t)(~(DCACHE_LINE_SIZE - 1));
key = arch_irq_lock(); /* --enter critical section-- */ key = arch_irq_lock(); /* --enter critical section-- */
@ -137,7 +137,7 @@ static void dcache_flush_mlines(u32_t start_addr, u32_t size)
void sys_cache_flush(vaddr_t start_addr, size_t size) void sys_cache_flush(vaddr_t start_addr, size_t size)
{ {
dcache_flush_mlines((u32_t)start_addr, (u32_t)size); dcache_flush_mlines((uint32_t)start_addr, (uint32_t)size);
} }
@ -145,7 +145,7 @@ void sys_cache_flush(vaddr_t start_addr, size_t size)
size_t sys_cache_line_size; size_t sys_cache_line_size;
static void init_dcache_line_size(void) static void init_dcache_line_size(void)
{ {
u32_t val; uint32_t val;
val = z_arc_v2_aux_reg_read(_ARC_V2_D_CACHE_BUILD); val = z_arc_v2_aux_reg_read(_ARC_V2_D_CACHE_BUILD);
__ASSERT((val&0xff) != 0U, "d-cache is not present"); __ASSERT((val&0xff) != 0U, "d-cache is not present");

View file

@ -71,7 +71,7 @@ static const struct z_exc_handle exceptions[] = {
* @return The lowest allowed stack frame pointer, if error is a * @return The lowest allowed stack frame pointer, if error is a
* thread stack corruption, otherwise return 0. * thread stack corruption, otherwise return 0.
*/ */
static u32_t z_check_thread_stack_fail(const u32_t fault_addr, u32_t sp) static uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, uint32_t sp)
{ {
const struct k_thread *thread = _current; const struct k_thread *thread = _current;
@ -89,8 +89,8 @@ static u32_t z_check_thread_stack_fail(const u32_t fault_addr, u32_t sp)
#else #else
sp = z_arc_v2_aux_reg_read(_ARC_V2_USER_SP); sp = z_arc_v2_aux_reg_read(_ARC_V2_USER_SP);
#endif #endif
if (sp <= (u32_t)thread->stack_obj) { if (sp <= (uint32_t)thread->stack_obj) {
return (u32_t)thread->stack_obj; return (uint32_t)thread->stack_obj;
} }
} else { } else {
/* User thread in privilege mode */ /* User thread in privilege mode */
@ -103,10 +103,10 @@ static u32_t z_check_thread_stack_fail(const u32_t fault_addr, u32_t sp)
} }
} else { } else {
/* Supervisor thread */ /* Supervisor thread */
if (IS_MPU_GUARD_VIOLATION((u32_t)thread->stack_obj, if (IS_MPU_GUARD_VIOLATION((uint32_t)thread->stack_obj,
fault_addr, sp)) { fault_addr, sp)) {
/* Supervisor thread stack corruption */ /* Supervisor thread stack corruption */
return (u32_t)thread->stack_obj + STACK_GUARD_SIZE; return (uint32_t)thread->stack_obj + STACK_GUARD_SIZE;
} }
} }
#else /* CONFIG_USERSPACE */ #else /* CONFIG_USERSPACE */
@ -129,7 +129,7 @@ static u32_t z_check_thread_stack_fail(const u32_t fault_addr, u32_t sp)
* These codes and parameters do not have associated* names in * These codes and parameters do not have associated* names in
* the technical manual, just switch on the values in Table 6-5 * the technical manual, just switch on the values in Table 6-5
*/ */
static const char *get_protv_access_err(u32_t parameter) static const char *get_protv_access_err(uint32_t parameter)
{ {
switch (parameter) { switch (parameter) {
case 0x1: case 0x1:
@ -151,7 +151,7 @@ static const char *get_protv_access_err(u32_t parameter)
} }
} }
static void dump_protv_exception(u32_t cause, u32_t parameter) static void dump_protv_exception(uint32_t cause, uint32_t parameter)
{ {
switch (cause) { switch (cause) {
case 0x0: case 0x0:
@ -185,7 +185,7 @@ static void dump_protv_exception(u32_t cause, u32_t parameter)
} }
} }
static void dump_machine_check_exception(u32_t cause, u32_t parameter) static void dump_machine_check_exception(uint32_t cause, uint32_t parameter)
{ {
switch (cause) { switch (cause) {
case 0x0: case 0x0:
@ -233,7 +233,7 @@ static void dump_machine_check_exception(u32_t cause, u32_t parameter)
} }
} }
static void dump_privilege_exception(u32_t cause, u32_t parameter) static void dump_privilege_exception(uint32_t cause, uint32_t parameter)
{ {
switch (cause) { switch (cause) {
case 0x0: case 0x0:
@ -289,7 +289,7 @@ static void dump_privilege_exception(u32_t cause, u32_t parameter)
} }
} }
static void dump_exception_info(u32_t vector, u32_t cause, u32_t parameter) static void dump_exception_info(uint32_t vector, uint32_t cause, uint32_t parameter)
{ {
if (vector >= 0x10 && vector <= 0xFF) { if (vector >= 0x10 && vector <= 0xFF) {
LOG_ERR("interrupt %u", vector); LOG_ERR("interrupt %u", vector);
@ -363,19 +363,19 @@ static void dump_exception_info(u32_t vector, u32_t cause, u32_t parameter)
* invokes the user provided routine k_sys_fatal_error_handler() which is * invokes the user provided routine k_sys_fatal_error_handler() which is
* responsible for implementing the error handling policy. * responsible for implementing the error handling policy.
*/ */
void _Fault(z_arch_esf_t *esf, u32_t old_sp) void _Fault(z_arch_esf_t *esf, uint32_t old_sp)
{ {
u32_t vector, cause, parameter; uint32_t vector, cause, parameter;
u32_t exc_addr = z_arc_v2_aux_reg_read(_ARC_V2_EFA); uint32_t exc_addr = z_arc_v2_aux_reg_read(_ARC_V2_EFA);
u32_t ecr = z_arc_v2_aux_reg_read(_ARC_V2_ECR); uint32_t ecr = z_arc_v2_aux_reg_read(_ARC_V2_ECR);
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
for (int i = 0; i < ARRAY_SIZE(exceptions); i++) { for (int i = 0; i < ARRAY_SIZE(exceptions); i++) {
u32_t start = (u32_t)exceptions[i].start; uint32_t start = (uint32_t)exceptions[i].start;
u32_t end = (u32_t)exceptions[i].end; uint32_t end = (uint32_t)exceptions[i].end;
if (esf->pc >= start && esf->pc < end) { if (esf->pc >= start && esf->pc < end) {
esf->pc = (u32_t)(exceptions[i].fixup); esf->pc = (uint32_t)(exceptions[i].fixup);
return; return;
} }
} }

View file

@ -137,7 +137,7 @@ int arch_irq_is_enabled(unsigned int irq)
* @return N/A * @return N/A
*/ */
void z_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags) void z_irq_priority_set(unsigned int irq, unsigned int prio, uint32_t flags)
{ {
ARG_UNUSED(flags); ARG_UNUSED(flags);
@ -174,7 +174,7 @@ void z_irq_spurious(void *unused)
#ifdef CONFIG_DYNAMIC_INTERRUPTS #ifdef CONFIG_DYNAMIC_INTERRUPTS
int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority, int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
void (*routine)(void *parameter), void *parameter, void (*routine)(void *parameter), void *parameter,
u32_t flags) uint32_t flags)
{ {
z_isr_install(irq, routine, parameter); z_isr_install(irq, routine, parameter);
z_irq_priority_set(irq, priority, flags); z_irq_priority_set(irq, priority, flags);

View file

@ -36,7 +36,7 @@ int arch_mem_domain_max_partitions_get(void)
* Reset MPU region for a single memory partition * Reset MPU region for a single memory partition
*/ */
void arch_mem_domain_partition_remove(struct k_mem_domain *domain, void arch_mem_domain_partition_remove(struct k_mem_domain *domain,
u32_t partition_id) uint32_t partition_id)
{ {
if (_current->mem_domain_info.mem_domain != domain) { if (_current->mem_domain_info.mem_domain != domain) {
return; return;
@ -76,7 +76,7 @@ void arch_mem_domain_destroy(struct k_mem_domain *domain)
} }
void arch_mem_domain_partition_add(struct k_mem_domain *domain, void arch_mem_domain_partition_add(struct k_mem_domain *domain,
u32_t partition_id) uint32_t partition_id)
{ {
/* No-op on this architecture */ /* No-op on this architecture */
} }

View file

@ -21,20 +21,20 @@ LOG_MODULE_REGISTER(mpu);
* @brief Get the number of supported MPU regions * @brief Get the number of supported MPU regions
* *
*/ */
static inline u8_t get_num_regions(void) static inline uint8_t get_num_regions(void)
{ {
u32_t num = z_arc_v2_aux_reg_read(_ARC_V2_MPU_BUILD); uint32_t num = z_arc_v2_aux_reg_read(_ARC_V2_MPU_BUILD);
num = (num & 0xFF00U) >> 8U; num = (num & 0xFF00U) >> 8U;
return (u8_t)num; return (uint8_t)num;
} }
/** /**
* This internal function is utilized by the MPU driver to parse the intent * This internal function is utilized by the MPU driver to parse the intent
* type (i.e. THREAD_STACK_REGION) and return the correct parameter set. * type (i.e. THREAD_STACK_REGION) and return the correct parameter set.
*/ */
static inline u32_t get_region_attr_by_type(u32_t type) static inline uint32_t get_region_attr_by_type(uint32_t type)
{ {
switch (type) { switch (type) {
case THREAD_STACK_USER_REGION: case THREAD_STACK_USER_REGION:

View file

@ -26,10 +26,10 @@
/** /**
* This internal function initializes a MPU region * This internal function initializes a MPU region
*/ */
static inline void _region_init(u32_t index, u32_t region_addr, u32_t size, static inline void _region_init(uint32_t index, uint32_t region_addr, uint32_t size,
u32_t region_attr) uint32_t region_attr)
{ {
u8_t bits = find_msb_set(size) - 1; uint8_t bits = find_msb_set(size) - 1;
index = index * 2U; index = index * 2U;
@ -57,7 +57,7 @@ static inline void _region_init(u32_t index, u32_t region_addr, u32_t size,
* This internal function is utilized by the MPU driver to parse the intent * This internal function is utilized by the MPU driver to parse the intent
* type (i.e. THREAD_STACK_REGION) and return the correct region index. * type (i.e. THREAD_STACK_REGION) and return the correct region index.
*/ */
static inline int get_region_index_by_type(u32_t type) static inline int get_region_index_by_type(uint32_t type)
{ {
/* /*
* The new MPU regions are allocated per type after the statically * The new MPU regions are allocated per type after the statically
@ -90,7 +90,7 @@ static inline int get_region_index_by_type(u32_t type)
/** /**
* This internal function checks if region is enabled or not * This internal function checks if region is enabled or not
*/ */
static inline bool _is_enabled_region(u32_t r_index) static inline bool _is_enabled_region(uint32_t r_index)
{ {
return ((z_arc_v2_aux_reg_read(_ARC_V2_MPU_RDB0 + r_index * 2U) return ((z_arc_v2_aux_reg_read(_ARC_V2_MPU_RDB0 + r_index * 2U)
& AUX_MPU_RDB_VALID_MASK) == AUX_MPU_RDB_VALID_MASK); & AUX_MPU_RDB_VALID_MASK) == AUX_MPU_RDB_VALID_MASK);
@ -99,11 +99,11 @@ static inline bool _is_enabled_region(u32_t r_index)
/** /**
* This internal function check if the given buffer in in the region * This internal function check if the given buffer in in the region
*/ */
static inline bool _is_in_region(u32_t r_index, u32_t start, u32_t size) static inline bool _is_in_region(uint32_t r_index, uint32_t start, uint32_t size)
{ {
u32_t r_addr_start; uint32_t r_addr_start;
u32_t r_addr_end; uint32_t r_addr_end;
u32_t r_size_lshift; uint32_t r_size_lshift;
r_addr_start = z_arc_v2_aux_reg_read(_ARC_V2_MPU_RDB0 + r_index * 2U) r_addr_start = z_arc_v2_aux_reg_read(_ARC_V2_MPU_RDB0 + r_index * 2U)
& (~AUX_MPU_RDB_VALID_MASK); & (~AUX_MPU_RDB_VALID_MASK);
@ -122,9 +122,9 @@ static inline bool _is_in_region(u32_t r_index, u32_t start, u32_t size)
/** /**
* This internal function check if the region is user accessible or not * This internal function check if the region is user accessible or not
*/ */
static inline bool _is_user_accessible_region(u32_t r_index, int write) static inline bool _is_user_accessible_region(uint32_t r_index, int write)
{ {
u32_t r_ap; uint32_t r_ap;
r_ap = z_arc_v2_aux_reg_read(_ARC_V2_MPU_RDP0 + r_index * 2U); r_ap = z_arc_v2_aux_reg_read(_ARC_V2_MPU_RDP0 + r_index * 2U);
@ -146,10 +146,10 @@ static inline bool _is_user_accessible_region(u32_t r_index, int write)
* @param base base address in RAM * @param base base address in RAM
* @param size size of the region * @param size size of the region
*/ */
static inline int _mpu_configure(u8_t type, u32_t base, u32_t size) static inline int _mpu_configure(uint8_t type, uint32_t base, uint32_t size)
{ {
s32_t region_index = get_region_index_by_type(type); int32_t region_index = get_region_index_by_type(type);
u32_t region_attr = get_region_attr_by_type(type); uint32_t region_attr = get_region_attr_by_type(type);
LOG_DBG("Region info: 0x%x 0x%x", base, size); LOG_DBG("Region info: 0x%x 0x%x", base, size);
@ -200,7 +200,7 @@ void arc_core_mpu_configure_thread(struct k_thread *thread)
if (thread->base.user_options & K_USER) { if (thread->base.user_options & K_USER) {
LOG_DBG("configure user thread %p's stack", thread); LOG_DBG("configure user thread %p's stack", thread);
if (_mpu_configure(THREAD_STACK_USER_REGION, if (_mpu_configure(THREAD_STACK_USER_REGION,
(u32_t)thread->stack_obj, thread->stack_info.size) < 0) { (uint32_t)thread->stack_obj, thread->stack_info.size) < 0) {
LOG_ERR("user thread %p's stack failed", thread); LOG_ERR("user thread %p's stack failed", thread);
return; return;
} }
@ -217,9 +217,9 @@ void arc_core_mpu_configure_thread(struct k_thread *thread)
* *
* @param region_attr region attribute of default region * @param region_attr region attribute of default region
*/ */
void arc_core_mpu_default(u32_t region_attr) void arc_core_mpu_default(uint32_t region_attr)
{ {
u32_t val = z_arc_v2_aux_reg_read(_ARC_V2_MPU_EN) & uint32_t val = z_arc_v2_aux_reg_read(_ARC_V2_MPU_EN) &
(~AUX_MPU_RDP_ATTR_MASK); (~AUX_MPU_RDP_ATTR_MASK);
region_attr &= AUX_MPU_RDP_ATTR_MASK; region_attr &= AUX_MPU_RDP_ATTR_MASK;
@ -234,8 +234,8 @@ void arc_core_mpu_default(u32_t region_attr)
* @param base base address * @param base base address
* @param region_attr region attribute * @param region_attr region attribute
*/ */
int arc_core_mpu_region(u32_t index, u32_t base, u32_t size, int arc_core_mpu_region(uint32_t index, uint32_t base, uint32_t size,
u32_t region_attr) uint32_t region_attr)
{ {
if (index >= get_num_regions()) { if (index >= get_num_regions()) {
return -EINVAL; return -EINVAL;
@ -259,7 +259,7 @@ void arc_core_mpu_configure_mem_domain(struct k_thread *thread)
{ {
int region_index = int region_index =
get_region_index_by_type(THREAD_DOMAIN_PARTITION_REGION); get_region_index_by_type(THREAD_DOMAIN_PARTITION_REGION);
u32_t num_partitions; uint32_t num_partitions;
struct k_mem_partition *pparts; struct k_mem_partition *pparts;
struct k_mem_domain *mem_domain = NULL; struct k_mem_domain *mem_domain = NULL;
@ -316,7 +316,7 @@ void arc_core_mpu_remove_mem_domain(struct k_mem_domain *mem_domain)
* @param partition_id memory partition id * @param partition_id memory partition id
*/ */
void arc_core_mpu_remove_mem_partition(struct k_mem_domain *domain, void arc_core_mpu_remove_mem_partition(struct k_mem_domain *domain,
u32_t part_id) uint32_t part_id)
{ {
ARG_UNUSED(domain); ARG_UNUSED(domain);
@ -351,7 +351,7 @@ int arc_core_mpu_buffer_validate(void *addr, size_t size, int write)
*/ */
for (r_index = 0; r_index < get_num_regions(); r_index++) { for (r_index = 0; r_index < get_num_regions(); r_index++) {
if (!_is_enabled_region(r_index) || if (!_is_enabled_region(r_index) ||
!_is_in_region(r_index, (u32_t)addr, size)) { !_is_in_region(r_index, (uint32_t)addr, size)) {
continue; continue;
} }
@ -377,8 +377,8 @@ static int arc_mpu_init(struct device *arg)
{ {
ARG_UNUSED(arg); ARG_UNUSED(arg);
u32_t num_regions; uint32_t num_regions;
u32_t i; uint32_t i;
num_regions = get_num_regions(); num_regions = get_num_regions();

View file

@ -62,14 +62,14 @@
* memory areas where dynamic MPU programming is allowed. * memory areas where dynamic MPU programming is allowed.
*/ */
struct dynamic_region_info { struct dynamic_region_info {
u8_t index; uint8_t index;
u32_t base; uint32_t base;
u32_t size; uint32_t size;
u32_t attr; uint32_t attr;
}; };
static u8_t dynamic_regions_num; static uint8_t dynamic_regions_num;
static u8_t dynamic_region_index; static uint8_t dynamic_region_index;
/** /**
* Global array, holding the MPU region index of * Global array, holding the MPU region index of
@ -79,41 +79,41 @@ static u8_t dynamic_region_index;
static struct dynamic_region_info dyn_reg_info[MPU_DYNAMIC_REGION_AREAS_NUM]; static struct dynamic_region_info dyn_reg_info[MPU_DYNAMIC_REGION_AREAS_NUM];
#endif /* CONFIG_MPU_GAP_FILLING */ #endif /* CONFIG_MPU_GAP_FILLING */
static u8_t static_regions_num; static uint8_t static_regions_num;
#ifdef CONFIG_ARC_NORMAL_FIRMWARE #ifdef CONFIG_ARC_NORMAL_FIRMWARE
/* \todo through secure service to access mpu */ /* \todo through secure service to access mpu */
static inline void _region_init(u32_t index, u32_t region_addr, u32_t size, static inline void _region_init(uint32_t index, uint32_t region_addr, uint32_t size,
u32_t region_attr) uint32_t region_attr)
{ {
} }
static inline void _region_set_attr(u32_t index, u32_t attr) static inline void _region_set_attr(uint32_t index, uint32_t attr)
{ {
} }
static inline u32_t _region_get_attr(u32_t index) static inline uint32_t _region_get_attr(uint32_t index)
{ {
return 0; return 0;
} }
static inline u32_t _region_get_start(u32_t index) static inline uint32_t _region_get_start(uint32_t index)
{ {
return 0; return 0;
} }
static inline void _region_set_start(u32_t index, u32_t start) static inline void _region_set_start(uint32_t index, uint32_t start)
{ {
} }
static inline u32_t _region_get_end(u32_t index) static inline uint32_t _region_get_end(uint32_t index)
{ {
return 0; return 0;
} }
static inline void _region_set_end(u32_t index, u32_t end) static inline void _region_set_end(uint32_t index, uint32_t end)
{ {
} }
@ -121,7 +121,7 @@ static inline void _region_set_end(u32_t index, u32_t end)
* This internal function probes the given addr's MPU index.if not * This internal function probes the given addr's MPU index.if not
* in MPU, returns error * in MPU, returns error
*/ */
static inline int _mpu_probe(u32_t addr) static inline int _mpu_probe(uint32_t addr)
{ {
return -EINVAL; return -EINVAL;
} }
@ -129,7 +129,7 @@ static inline int _mpu_probe(u32_t addr)
/** /**
* This internal function checks if MPU region is enabled or not * This internal function checks if MPU region is enabled or not
*/ */
static inline bool _is_enabled_region(u32_t r_index) static inline bool _is_enabled_region(uint32_t r_index)
{ {
return false; return false;
} }
@ -137,14 +137,14 @@ static inline bool _is_enabled_region(u32_t r_index)
/** /**
* This internal function check if the region is user accessible or not * This internal function check if the region is user accessible or not
*/ */
static inline bool _is_user_accessible_region(u32_t r_index, int write) static inline bool _is_user_accessible_region(uint32_t r_index, int write)
{ {
return false; return false;
} }
#else /* CONFIG_ARC_NORMAL_FIRMWARE */ #else /* CONFIG_ARC_NORMAL_FIRMWARE */
/* the following functions are prepared for SECURE_FRIMWARE */ /* the following functions are prepared for SECURE_FRIMWARE */
static inline void _region_init(u32_t index, u32_t region_addr, u32_t size, static inline void _region_init(uint32_t index, uint32_t region_addr, uint32_t size,
u32_t region_attr) uint32_t region_attr)
{ {
if (size < (1 << ARC_FEATURE_MPU_ALIGNMENT_BITS)) { if (size < (1 << ARC_FEATURE_MPU_ALIGNMENT_BITS)) {
size = (1 << ARC_FEATURE_MPU_ALIGNMENT_BITS); size = (1 << ARC_FEATURE_MPU_ALIGNMENT_BITS);
@ -162,34 +162,34 @@ static inline void _region_init(u32_t index, u32_t region_addr, u32_t size,
z_arc_v2_aux_reg_write(_ARC_V2_MPU_RPER, region_attr); z_arc_v2_aux_reg_write(_ARC_V2_MPU_RPER, region_attr);
} }
static inline void _region_set_attr(u32_t index, u32_t attr) static inline void _region_set_attr(uint32_t index, uint32_t attr)
{ {
z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, index); z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, index);
z_arc_v2_aux_reg_write(_ARC_V2_MPU_RPER, attr | z_arc_v2_aux_reg_write(_ARC_V2_MPU_RPER, attr |
AUX_MPU_RPER_VALID_MASK); AUX_MPU_RPER_VALID_MASK);
} }
static inline u32_t _region_get_attr(u32_t index) static inline uint32_t _region_get_attr(uint32_t index)
{ {
z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, index); z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, index);
return z_arc_v2_aux_reg_read(_ARC_V2_MPU_RPER); return z_arc_v2_aux_reg_read(_ARC_V2_MPU_RPER);
} }
static inline u32_t _region_get_start(u32_t index) static inline uint32_t _region_get_start(uint32_t index)
{ {
z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, index); z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, index);
return z_arc_v2_aux_reg_read(_ARC_V2_MPU_RSTART); return z_arc_v2_aux_reg_read(_ARC_V2_MPU_RSTART);
} }
static inline void _region_set_start(u32_t index, u32_t start) static inline void _region_set_start(uint32_t index, uint32_t start)
{ {
z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, index); z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, index);
z_arc_v2_aux_reg_write(_ARC_V2_MPU_RSTART, start); z_arc_v2_aux_reg_write(_ARC_V2_MPU_RSTART, start);
} }
static inline u32_t _region_get_end(u32_t index) static inline uint32_t _region_get_end(uint32_t index)
{ {
z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, index); z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, index);
@ -197,7 +197,7 @@ static inline u32_t _region_get_end(u32_t index)
(1 << ARC_FEATURE_MPU_ALIGNMENT_BITS); (1 << ARC_FEATURE_MPU_ALIGNMENT_BITS);
} }
static inline void _region_set_end(u32_t index, u32_t end) static inline void _region_set_end(uint32_t index, uint32_t end)
{ {
z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, index); z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, index);
z_arc_v2_aux_reg_write(_ARC_V2_MPU_REND, end - z_arc_v2_aux_reg_write(_ARC_V2_MPU_REND, end -
@ -208,9 +208,9 @@ static inline void _region_set_end(u32_t index, u32_t end)
* This internal function probes the given addr's MPU index.if not * This internal function probes the given addr's MPU index.if not
* in MPU, returns error * in MPU, returns error
*/ */
static inline int _mpu_probe(u32_t addr) static inline int _mpu_probe(uint32_t addr)
{ {
u32_t val; uint32_t val;
z_arc_v2_aux_reg_write(_ARC_V2_MPU_PROBE, addr); z_arc_v2_aux_reg_write(_ARC_V2_MPU_PROBE, addr);
val = z_arc_v2_aux_reg_read(_ARC_V2_MPU_INDEX); val = z_arc_v2_aux_reg_read(_ARC_V2_MPU_INDEX);
@ -226,7 +226,7 @@ static inline int _mpu_probe(u32_t addr)
/** /**
* This internal function checks if MPU region is enabled or not * This internal function checks if MPU region is enabled or not
*/ */
static inline bool _is_enabled_region(u32_t r_index) static inline bool _is_enabled_region(uint32_t r_index)
{ {
z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, r_index); z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, r_index);
return ((z_arc_v2_aux_reg_read(_ARC_V2_MPU_RPER) & return ((z_arc_v2_aux_reg_read(_ARC_V2_MPU_RPER) &
@ -236,9 +236,9 @@ static inline bool _is_enabled_region(u32_t r_index)
/** /**
* This internal function check if the region is user accessible or not * This internal function check if the region is user accessible or not
*/ */
static inline bool _is_user_accessible_region(u32_t r_index, int write) static inline bool _is_user_accessible_region(uint32_t r_index, int write)
{ {
u32_t r_ap; uint32_t r_ap;
z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, r_index); z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, r_index);
r_ap = z_arc_v2_aux_reg_read(_ARC_V2_MPU_RPER); r_ap = z_arc_v2_aux_reg_read(_ARC_V2_MPU_RPER);
@ -259,7 +259,7 @@ static inline bool _is_user_accessible_region(u32_t r_index, int write)
* This internal function checks the area given by (start, size) * This internal function checks the area given by (start, size)
* and returns the index if the area match one MPU entry * and returns the index if the area match one MPU entry
*/ */
static inline int _get_region_index(u32_t start, u32_t size) static inline int _get_region_index(uint32_t start, uint32_t size)
{ {
int index = _mpu_probe(start); int index = _mpu_probe(start);
@ -295,8 +295,8 @@ static inline int _dynamic_region_allocate_index(void)
* @param attr region attribute * @param attr region attribute
* @return <0 failure, >0 allocated dynamic region index * @return <0 failure, >0 allocated dynamic region index
*/ */
static int _dynamic_region_allocate_and_init(u32_t base, u32_t size, static int _dynamic_region_allocate_and_init(uint32_t base, uint32_t size,
u32_t attr) uint32_t attr)
{ {
int u_region_index = _get_region_index(base, size); int u_region_index = _get_region_index(base, size);
int region_index; int region_index;
@ -321,10 +321,10 @@ static int _dynamic_region_allocate_and_init(u32_t base, u32_t size,
* region, possibly splitting the underlying region into two. * region, possibly splitting the underlying region into two.
*/ */
u32_t u_region_start = _region_get_start(u_region_index); uint32_t u_region_start = _region_get_start(u_region_index);
u32_t u_region_end = _region_get_end(u_region_index); uint32_t u_region_end = _region_get_end(u_region_index);
u32_t u_region_attr = _region_get_attr(u_region_index); uint32_t u_region_attr = _region_get_attr(u_region_index);
u32_t end = base + size; uint32_t end = base + size;
if ((base == u_region_start) && (end == u_region_end)) { if ((base == u_region_start) && (end == u_region_end)) {
@ -397,8 +397,8 @@ static int _dynamic_region_allocate_and_init(u32_t base, u32_t size,
*/ */
static void _mpu_reset_dynamic_regions(void) static void _mpu_reset_dynamic_regions(void)
{ {
u32_t i; uint32_t i;
u32_t num_regions = get_num_regions(); uint32_t num_regions = get_num_regions();
for (i = static_regions_num; i < num_regions; i++) { for (i = static_regions_num; i < num_regions; i++) {
_region_init(i, 0, 0, 0); _region_init(i, 0, 0, 0);
@ -423,9 +423,9 @@ static void _mpu_reset_dynamic_regions(void)
* @param base base address in RAM * @param base base address in RAM
* @param size size of the region * @param size size of the region
*/ */
static inline int _mpu_configure(u8_t type, u32_t base, u32_t size) static inline int _mpu_configure(uint8_t type, uint32_t base, uint32_t size)
{ {
u32_t region_attr = get_region_attr_by_type(type); uint32_t region_attr = get_region_attr_by_type(type);
return _dynamic_region_allocate_and_init(base, size, region_attr); return _dynamic_region_allocate_and_init(base, size, region_attr);
} }
@ -434,7 +434,7 @@ static inline int _mpu_configure(u8_t type, u32_t base, u32_t size)
* This internal function is utilized by the MPU driver to parse the intent * This internal function is utilized by the MPU driver to parse the intent
* type (i.e. THREAD_STACK_REGION) and return the correct region index. * type (i.e. THREAD_STACK_REGION) and return the correct region index.
*/ */
static inline int get_region_index_by_type(u32_t type) static inline int get_region_index_by_type(uint32_t type)
{ {
/* /*
* The new MPU regions are allocated per type after the statically * The new MPU regions are allocated per type after the statically
@ -476,10 +476,10 @@ static inline int get_region_index_by_type(u32_t type)
* @param base base address in RAM * @param base base address in RAM
* @param size size of the region * @param size size of the region
*/ */
static inline int _mpu_configure(u8_t type, u32_t base, u32_t size) static inline int _mpu_configure(uint8_t type, uint32_t base, uint32_t size)
{ {
int region_index = get_region_index_by_type(type); int region_index = get_region_index_by_type(type);
u32_t region_attr = get_region_attr_by_type(type); uint32_t region_attr = get_region_attr_by_type(type);
LOG_DBG("Region info: 0x%x 0x%x", base, size); LOG_DBG("Region info: 0x%x 0x%x", base, size);
@ -588,14 +588,14 @@ void arc_core_mpu_configure_thread(struct k_thread *thread)
if (thread->base.user_options & K_USER) { if (thread->base.user_options & K_USER) {
LOG_DBG("configure user thread %p's stack", thread); LOG_DBG("configure user thread %p's stack", thread);
if (_mpu_configure(THREAD_STACK_USER_REGION, if (_mpu_configure(THREAD_STACK_USER_REGION,
(u32_t)thread->stack_obj, thread->stack_info.size) < 0) { (uint32_t)thread->stack_obj, thread->stack_info.size) < 0) {
LOG_ERR("thread %p's stack failed", thread); LOG_ERR("thread %p's stack failed", thread);
return; return;
} }
} }
#if defined(CONFIG_MPU_GAP_FILLING) #if defined(CONFIG_MPU_GAP_FILLING)
u32_t num_partitions; uint32_t num_partitions;
struct k_mem_partition *pparts; struct k_mem_partition *pparts;
struct k_mem_domain *mem_domain = thread->mem_domain_info.mem_domain; struct k_mem_domain *mem_domain = thread->mem_domain_info.mem_domain;
@ -610,7 +610,7 @@ void arc_core_mpu_configure_thread(struct k_thread *thread)
pparts = NULL; pparts = NULL;
} }
for (u32_t i = 0; i < num_partitions; i++) { for (uint32_t i = 0; i < num_partitions; i++) {
if (pparts->size) { if (pparts->size) {
if (_dynamic_region_allocate_and_init(pparts->start, if (_dynamic_region_allocate_and_init(pparts->start,
pparts->size, pparts->attr) < 0) { pparts->size, pparts->attr) < 0) {
@ -633,7 +633,7 @@ void arc_core_mpu_configure_thread(struct k_thread *thread)
* *
* @param region_attr region attribute of default region * @param region_attr region attribute of default region
*/ */
void arc_core_mpu_default(u32_t region_attr) void arc_core_mpu_default(uint32_t region_attr)
{ {
#ifdef CONFIG_ARC_NORMAL_FIRMWARE #ifdef CONFIG_ARC_NORMAL_FIRMWARE
/* \todo through secure service to access mpu */ /* \todo through secure service to access mpu */
@ -650,8 +650,8 @@ void arc_core_mpu_default(u32_t region_attr)
* @param size region size * @param size region size
* @param region_attr region attribute * @param region_attr region attribute
*/ */
int arc_core_mpu_region(u32_t index, u32_t base, u32_t size, int arc_core_mpu_region(uint32_t index, uint32_t base, uint32_t size,
u32_t region_attr) uint32_t region_attr)
{ {
if (index >= get_num_regions()) { if (index >= get_num_regions()) {
return -EINVAL; return -EINVAL;
@ -678,9 +678,9 @@ void arc_core_mpu_configure_mem_domain(struct k_thread *thread)
#else #else
void arc_core_mpu_configure_mem_domain(struct k_thread *thread) void arc_core_mpu_configure_mem_domain(struct k_thread *thread)
{ {
u32_t region_index; uint32_t region_index;
u32_t num_partitions; uint32_t num_partitions;
u32_t num_regions; uint32_t num_regions;
struct k_mem_partition *pparts; struct k_mem_partition *pparts;
struct k_mem_domain *mem_domain = NULL; struct k_mem_domain *mem_domain = NULL;
@ -728,7 +728,7 @@ void arc_core_mpu_configure_mem_domain(struct k_thread *thread)
*/ */
void arc_core_mpu_remove_mem_domain(struct k_mem_domain *mem_domain) void arc_core_mpu_remove_mem_domain(struct k_mem_domain *mem_domain)
{ {
u32_t num_partitions; uint32_t num_partitions;
struct k_mem_partition *pparts; struct k_mem_partition *pparts;
int index; int index;
@ -742,7 +742,7 @@ void arc_core_mpu_remove_mem_domain(struct k_mem_domain *mem_domain)
pparts = NULL; pparts = NULL;
} }
for (u32_t i = 0; i < num_partitions; i++) { for (uint32_t i = 0; i < num_partitions; i++) {
if (pparts->size) { if (pparts->size) {
index = _get_region_index(pparts->start, index = _get_region_index(pparts->start,
pparts->size); pparts->size);
@ -765,7 +765,7 @@ void arc_core_mpu_remove_mem_domain(struct k_mem_domain *mem_domain)
* @param partition_id memory partition id * @param partition_id memory partition id
*/ */
void arc_core_mpu_remove_mem_partition(struct k_mem_domain *domain, void arc_core_mpu_remove_mem_partition(struct k_mem_domain *domain,
u32_t partition_id) uint32_t partition_id)
{ {
struct k_mem_partition *partition = &domain->partitions[partition_id]; struct k_mem_partition *partition = &domain->partitions[partition_id];
@ -811,9 +811,9 @@ int arc_core_mpu_buffer_validate(void *addr, size_t size, int write)
* we can stop the iteration immediately once we find the * we can stop the iteration immediately once we find the
* matched region that grants permission or denies access. * matched region that grants permission or denies access.
*/ */
r_index = _mpu_probe((u32_t)addr); r_index = _mpu_probe((uint32_t)addr);
/* match and the area is in one region */ /* match and the area is in one region */
if (r_index >= 0 && r_index == _mpu_probe((u32_t)addr + (size - 1))) { if (r_index >= 0 && r_index == _mpu_probe((uint32_t)addr + (size - 1))) {
if (_is_user_accessible_region(r_index, write)) { if (_is_user_accessible_region(r_index, write)) {
r_index = 0; r_index = 0;
} else { } else {
@ -839,8 +839,8 @@ int arc_core_mpu_buffer_validate(void *addr, size_t size, int write)
static int arc_mpu_init(struct device *arg) static int arc_mpu_init(struct device *arg)
{ {
ARG_UNUSED(arg); ARG_UNUSED(arg);
u32_t num_regions; uint32_t num_regions;
u32_t i; uint32_t i;
num_regions = get_num_regions(); num_regions = get_num_regions();

View file

@ -19,9 +19,9 @@ static void _default_sjli_entry(void);
* \todo: how to let user to install customized sjli entry easily, e.g. * \todo: how to let user to install customized sjli entry easily, e.g.
* through macros or with the help of compiler? * through macros or with the help of compiler?
*/ */
const static u32_t _sjli_vector_table[CONFIG_SJLI_TABLE_SIZE] = { const static uint32_t _sjli_vector_table[CONFIG_SJLI_TABLE_SIZE] = {
[0] = (u32_t)_arc_do_secure_call, [0] = (uint32_t)_arc_do_secure_call,
[1 ... (CONFIG_SJLI_TABLE_SIZE - 1)] = (u32_t)_default_sjli_entry, [1 ... (CONFIG_SJLI_TABLE_SIZE - 1)] = (uint32_t)_default_sjli_entry,
}; };
/* /*

View file

@ -22,7 +22,7 @@
* an secure service to access secure aux regs. Check should be done * an secure service to access secure aux regs. Check should be done
* to decide whether the access is valid. * to decide whether the access is valid.
*/ */
static s32_t arc_s_aux_read(u32_t aux_reg) static int32_t arc_s_aux_read(uint32_t aux_reg)
{ {
return -1; return -1;
} }
@ -37,7 +37,7 @@ static s32_t arc_s_aux_read(u32_t aux_reg)
* an secure service to access secure aux regs. Check should be done * an secure service to access secure aux regs. Check should be done
* to decide whether the access is valid. * to decide whether the access is valid.
*/ */
static s32_t arc_s_aux_write(u32_t aux_reg, u32_t val) static int32_t arc_s_aux_write(uint32_t aux_reg, uint32_t val)
{ {
if (aux_reg == _ARC_V2_AUX_IRQ_ACT) { if (aux_reg == _ARC_V2_AUX_IRQ_ACT) {
/* 0 -> CONFIG_NUM_IRQ_PRIO_LEVELS allocated to secure world /* 0 -> CONFIG_NUM_IRQ_PRIO_LEVELS allocated to secure world
@ -64,7 +64,7 @@ static s32_t arc_s_aux_write(u32_t aux_reg, u32_t val)
* apply one. Necessary check should be done to decide whether the apply is * apply one. Necessary check should be done to decide whether the apply is
* valid * valid
*/ */
static s32_t arc_s_irq_alloc(u32_t intno) static int32_t arc_s_irq_alloc(uint32_t intno)
{ {
z_arc_v2_irq_uinit_secure_set(intno, 0); z_arc_v2_irq_uinit_secure_set(intno, 0);
return 0; return 0;

View file

@ -22,15 +22,15 @@
/* initial stack frame */ /* initial stack frame */
struct init_stack_frame { struct init_stack_frame {
u32_t pc; uint32_t pc;
#ifdef CONFIG_ARC_HAS_SECURE #ifdef CONFIG_ARC_HAS_SECURE
u32_t sec_stat; uint32_t sec_stat;
#endif #endif
u32_t status32; uint32_t status32;
u32_t r3; uint32_t r3;
u32_t r2; uint32_t r2;
u32_t r1; uint32_t r1;
u32_t r0; uint32_t r0;
}; };
/* /*
@ -86,10 +86,10 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
if (options & K_USER) { if (options & K_USER) {
#ifdef CONFIG_GEN_PRIV_STACKS #ifdef CONFIG_GEN_PRIV_STACKS
thread->arch.priv_stack_start = thread->arch.priv_stack_start =
(u32_t)z_priv_stack_find(thread->stack_obj); (uint32_t)z_priv_stack_find(thread->stack_obj);
#else #else
thread->arch.priv_stack_start = thread->arch.priv_stack_start =
(u32_t)(stackEnd + STACK_GUARD_SIZE); (uint32_t)(stackEnd + STACK_GUARD_SIZE);
#endif #endif
priv_stack_end = (char *)Z_STACK_PTR_ALIGN( priv_stack_end = (char *)Z_STACK_PTR_ALIGN(
@ -98,8 +98,8 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
/* reserve 4 bytes for the start of user sp */ /* reserve 4 bytes for the start of user sp */
priv_stack_end -= 4; priv_stack_end -= 4;
(*(u32_t *)priv_stack_end) = Z_STACK_PTR_ALIGN( (*(uint32_t *)priv_stack_end) = Z_STACK_PTR_ALIGN(
(u32_t)stackEnd - offset); (uint32_t)stackEnd - offset);
#ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA #ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
/* reserve stack space for the userspace local data struct */ /* reserve stack space for the userspace local data struct */
@ -108,8 +108,8 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
Z_STACK_PTR_ALIGN(stackEnd - Z_STACK_PTR_ALIGN(stackEnd -
sizeof(*thread->userspace_local_data) - offset); sizeof(*thread->userspace_local_data) - offset);
/* update the start of user sp */ /* update the start of user sp */
(*(u32_t *)priv_stack_end) = (*(uint32_t *)priv_stack_end) =
(u32_t) thread->userspace_local_data; (uint32_t) thread->userspace_local_data;
#endif #endif
} else { } else {
@ -139,9 +139,9 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
/* fill init context */ /* fill init context */
pInitCtx->status32 = 0U; pInitCtx->status32 = 0U;
if (options & K_USER) { if (options & K_USER) {
pInitCtx->pc = ((u32_t)z_user_thread_entry_wrapper); pInitCtx->pc = ((uint32_t)z_user_thread_entry_wrapper);
} else { } else {
pInitCtx->pc = ((u32_t)z_thread_entry_wrapper); pInitCtx->pc = ((uint32_t)z_thread_entry_wrapper);
} }
/* /*
@ -165,17 +165,17 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
sizeof(struct init_stack_frame)); sizeof(struct init_stack_frame));
pInitCtx->status32 = 0U; pInitCtx->status32 = 0U;
pInitCtx->pc = ((u32_t)z_thread_entry_wrapper); pInitCtx->pc = ((uint32_t)z_thread_entry_wrapper);
#endif #endif
#ifdef CONFIG_ARC_SECURE_FIRMWARE #ifdef CONFIG_ARC_SECURE_FIRMWARE
pInitCtx->sec_stat = z_arc_v2_aux_reg_read(_ARC_V2_SEC_STAT); pInitCtx->sec_stat = z_arc_v2_aux_reg_read(_ARC_V2_SEC_STAT);
#endif #endif
pInitCtx->r0 = (u32_t)pEntry; pInitCtx->r0 = (uint32_t)pEntry;
pInitCtx->r1 = (u32_t)parameter1; pInitCtx->r1 = (uint32_t)parameter1;
pInitCtx->r2 = (u32_t)parameter2; pInitCtx->r2 = (uint32_t)parameter2;
pInitCtx->r3 = (u32_t)parameter3; pInitCtx->r3 = (uint32_t)parameter3;
/* stack check configuration */ /* stack check configuration */
#ifdef CONFIG_ARC_STACK_CHECKING #ifdef CONFIG_ARC_STACK_CHECKING
@ -186,21 +186,21 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
#endif #endif
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
if (options & K_USER) { if (options & K_USER) {
thread->arch.u_stack_top = (u32_t)pStackMem; thread->arch.u_stack_top = (uint32_t)pStackMem;
thread->arch.u_stack_base = (u32_t)stackEnd; thread->arch.u_stack_base = (uint32_t)stackEnd;
thread->arch.k_stack_top = thread->arch.k_stack_top =
(u32_t)(thread->arch.priv_stack_start); (uint32_t)(thread->arch.priv_stack_start);
thread->arch.k_stack_base = (u32_t) thread->arch.k_stack_base = (uint32_t)
(thread->arch.priv_stack_start + CONFIG_PRIVILEGED_STACK_SIZE); (thread->arch.priv_stack_start + CONFIG_PRIVILEGED_STACK_SIZE);
} else { } else {
thread->arch.k_stack_top = (u32_t)pStackMem; thread->arch.k_stack_top = (uint32_t)pStackMem;
thread->arch.k_stack_base = (u32_t)stackEnd; thread->arch.k_stack_base = (uint32_t)stackEnd;
thread->arch.u_stack_top = 0; thread->arch.u_stack_top = 0;
thread->arch.u_stack_base = 0; thread->arch.u_stack_base = 0;
} }
#else #else
thread->arch.k_stack_top = (u32_t) pStackMem; thread->arch.k_stack_top = (uint32_t) pStackMem;
thread->arch.k_stack_base = (u32_t) stackEnd; thread->arch.k_stack_base = (uint32_t) stackEnd;
#endif #endif
#endif #endif
@ -211,7 +211,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
thread->switch_handle = thread; thread->switch_handle = thread;
thread->arch.relinquish_cause = _CAUSE_COOP; thread->arch.relinquish_cause = _CAUSE_COOP;
thread->callee_saved.sp = thread->callee_saved.sp =
(u32_t)pInitCtx - ___callee_saved_stack_t_SIZEOF; (uint32_t)pInitCtx - ___callee_saved_stack_t_SIZEOF;
/* initial values in all other regs/k_thread entries are irrelevant */ /* initial values in all other regs/k_thread entries are irrelevant */
} }
@ -230,13 +230,13 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
{ {
_current->stack_info.start = (u32_t)_current->stack_obj; _current->stack_info.start = (uint32_t)_current->stack_obj;
#ifdef CONFIG_GEN_PRIV_STACKS #ifdef CONFIG_GEN_PRIV_STACKS
_current->arch.priv_stack_start = _current->arch.priv_stack_start =
(u32_t)z_priv_stack_find(_current->stack_obj); (uint32_t)z_priv_stack_find(_current->stack_obj);
#else #else
_current->arch.priv_stack_start = _current->arch.priv_stack_start =
(u32_t)(_current->stack_info.start + (uint32_t)(_current->stack_info.start +
_current->stack_info.size + STACK_GUARD_SIZE); _current->stack_info.size + STACK_GUARD_SIZE);
#endif #endif
@ -255,7 +255,7 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
configure_mpu_thread(_current); configure_mpu_thread(_current);
z_arc_userspace_enter(user_entry, p1, p2, p3, z_arc_userspace_enter(user_entry, p1, p2, p3,
(u32_t)_current->stack_obj, (uint32_t)_current->stack_obj,
_current->stack_info.size, _current); _current->stack_info.size, _current);
CODE_UNREACHABLE; CODE_UNREACHABLE;
} }

View file

@ -23,17 +23,17 @@
* *
* @return 64-bit time stamp value * @return 64-bit time stamp value
*/ */
u64_t z_tsc_read(void) uint64_t z_tsc_read(void)
{ {
unsigned int key; unsigned int key;
u64_t t; uint64_t t;
u32_t count; uint32_t count;
key = arch_irq_lock(); key = arch_irq_lock();
t = (u64_t)z_tick_get(); t = (uint64_t)z_tick_get();
count = z_arc_v2_aux_reg_read(_ARC_V2_TMR0_COUNT); count = z_arc_v2_aux_reg_read(_ARC_V2_TMR0_COUNT);
arch_irq_unlock(key); arch_irq_unlock(key);
t *= k_ticks_to_cyc_floor64(1); t *= k_ticks_to_cyc_floor64(1);
t += (u64_t)count; t += (uint64_t)count;
return t; return t;
} }

View file

@ -28,39 +28,39 @@
#include "vector_table.h" #include "vector_table.h"
struct vector_table { struct vector_table {
u32_t reset; uint32_t reset;
u32_t memory_error; uint32_t memory_error;
u32_t instruction_error; uint32_t instruction_error;
u32_t ev_machine_check; uint32_t ev_machine_check;
u32_t ev_tlb_miss_i; uint32_t ev_tlb_miss_i;
u32_t ev_tlb_miss_d; uint32_t ev_tlb_miss_d;
u32_t ev_prot_v; uint32_t ev_prot_v;
u32_t ev_privilege_v; uint32_t ev_privilege_v;
u32_t ev_swi; uint32_t ev_swi;
u32_t ev_trap; uint32_t ev_trap;
u32_t ev_extension; uint32_t ev_extension;
u32_t ev_div_zero; uint32_t ev_div_zero;
u32_t ev_dc_error; uint32_t ev_dc_error;
u32_t ev_maligned; uint32_t ev_maligned;
u32_t unused_1; uint32_t unused_1;
u32_t unused_2; uint32_t unused_2;
}; };
struct vector_table _VectorTable Z_GENERIC_SECTION(.exc_vector_table) = { struct vector_table _VectorTable Z_GENERIC_SECTION(.exc_vector_table) = {
(u32_t)__reset, (uint32_t)__reset,
(u32_t)__memory_error, (uint32_t)__memory_error,
(u32_t)__instruction_error, (uint32_t)__instruction_error,
(u32_t)__ev_machine_check, (uint32_t)__ev_machine_check,
(u32_t)__ev_tlb_miss_i, (uint32_t)__ev_tlb_miss_i,
(u32_t)__ev_tlb_miss_d, (uint32_t)__ev_tlb_miss_d,
(u32_t)__ev_prot_v, (uint32_t)__ev_prot_v,
(u32_t)__ev_privilege_v, (uint32_t)__ev_privilege_v,
(u32_t)__ev_swi, (uint32_t)__ev_swi,
(u32_t)__ev_trap, (uint32_t)__ev_trap,
(u32_t)__ev_extension, (uint32_t)__ev_extension,
(u32_t)__ev_div_zero, (uint32_t)__ev_div_zero,
(u32_t)__ev_dc_error, (uint32_t)__ev_dc_error,
(u32_t)__ev_maligned, (uint32_t)__ev_maligned,
0, 0,
0 0
}; };

View file

@ -37,70 +37,70 @@ extern "C" {
#ifdef CONFIG_ARC_HAS_SECURE #ifdef CONFIG_ARC_HAS_SECURE
struct _irq_stack_frame { struct _irq_stack_frame {
u32_t lp_end; uint32_t lp_end;
u32_t lp_start; uint32_t lp_start;
u32_t lp_count; uint32_t lp_count;
#ifdef CONFIG_CODE_DENSITY #ifdef CONFIG_CODE_DENSITY
/* /*
* Currently unsupported. This is where those registers are * Currently unsupported. This is where those registers are
* automatically pushed on the stack by the CPU when taking a regular * automatically pushed on the stack by the CPU when taking a regular
* IRQ. * IRQ.
*/ */
u32_t ei_base; uint32_t ei_base;
u32_t ldi_base; uint32_t ldi_base;
u32_t jli_base; uint32_t jli_base;
#endif #endif
u32_t r0; uint32_t r0;
u32_t r1; uint32_t r1;
u32_t r2; uint32_t r2;
u32_t r3; uint32_t r3;
u32_t r4; uint32_t r4;
u32_t r5; uint32_t r5;
u32_t r6; uint32_t r6;
u32_t r7; uint32_t r7;
u32_t r8; uint32_t r8;
u32_t r9; uint32_t r9;
u32_t r10; uint32_t r10;
u32_t r11; uint32_t r11;
u32_t r12; uint32_t r12;
u32_t r13; uint32_t r13;
u32_t blink; uint32_t blink;
u32_t pc; uint32_t pc;
u32_t sec_stat; uint32_t sec_stat;
u32_t status32; uint32_t status32;
}; };
#else #else
struct _irq_stack_frame { struct _irq_stack_frame {
u32_t r0; uint32_t r0;
u32_t r1; uint32_t r1;
u32_t r2; uint32_t r2;
u32_t r3; uint32_t r3;
u32_t r4; uint32_t r4;
u32_t r5; uint32_t r5;
u32_t r6; uint32_t r6;
u32_t r7; uint32_t r7;
u32_t r8; uint32_t r8;
u32_t r9; uint32_t r9;
u32_t r10; uint32_t r10;
u32_t r11; uint32_t r11;
u32_t r12; uint32_t r12;
u32_t r13; uint32_t r13;
u32_t blink; uint32_t blink;
u32_t lp_end; uint32_t lp_end;
u32_t lp_start; uint32_t lp_start;
u32_t lp_count; uint32_t lp_count;
#ifdef CONFIG_CODE_DENSITY #ifdef CONFIG_CODE_DENSITY
/* /*
* Currently unsupported. This is where those registers are * Currently unsupported. This is where those registers are
* automatically pushed on the stack by the CPU when taking a regular * automatically pushed on the stack by the CPU when taking a regular
* IRQ. * IRQ.
*/ */
u32_t ei_base; uint32_t ei_base;
u32_t ldi_base; uint32_t ldi_base;
u32_t jli_base; uint32_t jli_base;
#endif #endif
u32_t pc; uint32_t pc;
u32_t status32; uint32_t status32;
}; };
#endif #endif
@ -110,47 +110,47 @@ typedef struct _irq_stack_frame _isf_t;
/* callee-saved registers pushed on the stack, not in k_thread */ /* callee-saved registers pushed on the stack, not in k_thread */
struct _callee_saved_stack { struct _callee_saved_stack {
u32_t r13; uint32_t r13;
u32_t r14; uint32_t r14;
u32_t r15; uint32_t r15;
u32_t r16; uint32_t r16;
u32_t r17; uint32_t r17;
u32_t r18; uint32_t r18;
u32_t r19; uint32_t r19;
u32_t r20; uint32_t r20;
u32_t r21; uint32_t r21;
u32_t r22; uint32_t r22;
u32_t r23; uint32_t r23;
u32_t r24; uint32_t r24;
u32_t r25; uint32_t r25;
u32_t r26; uint32_t r26;
u32_t fp; /* r27 */ uint32_t fp; /* r27 */
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
#ifdef CONFIG_ARC_HAS_SECURE #ifdef CONFIG_ARC_HAS_SECURE
u32_t user_sp; uint32_t user_sp;
u32_t kernel_sp; uint32_t kernel_sp;
#else #else
u32_t user_sp; uint32_t user_sp;
#endif #endif
#endif #endif
/* r28 is the stack pointer and saved separately */ /* r28 is the stack pointer and saved separately */
/* r29 is ILINK and does not need to be saved */ /* r29 is ILINK and does not need to be saved */
u32_t r30; uint32_t r30;
#ifdef CONFIG_ARC_HAS_ACCL_REGS #ifdef CONFIG_ARC_HAS_ACCL_REGS
u32_t r58; uint32_t r58;
u32_t r59; uint32_t r59;
#endif #endif
#ifdef CONFIG_FPU_SHARING #ifdef CONFIG_FPU_SHARING
u32_t fpu_status; uint32_t fpu_status;
u32_t fpu_ctrl; uint32_t fpu_ctrl;
#ifdef CONFIG_FP_FPU_DA #ifdef CONFIG_FP_FPU_DA
u32_t dpfp2h; uint32_t dpfp2h;
u32_t dpfp2l; uint32_t dpfp2l;
u32_t dpfp1h; uint32_t dpfp1h;
u32_t dpfp1l; uint32_t dpfp1l;
#endif #endif
#endif #endif

View file

@ -48,7 +48,7 @@ static ALWAYS_INLINE void arch_kernel_init(void)
*/ */
static ALWAYS_INLINE int Z_INTERRUPT_CAUSE(void) static ALWAYS_INLINE int Z_INTERRUPT_CAUSE(void)
{ {
u32_t irq_num = z_arc_v2_aux_reg_read(_ARC_V2_ICAUSE); uint32_t irq_num = z_arc_v2_aux_reg_read(_ARC_V2_ICAUSE);
return irq_num; return irq_num;
} }
@ -62,7 +62,7 @@ extern void z_thread_entry_wrapper(void);
extern void z_user_thread_entry_wrapper(void); extern void z_user_thread_entry_wrapper(void);
extern void z_arc_userspace_enter(k_thread_entry_t user_entry, void *p1, extern void z_arc_userspace_enter(k_thread_entry_t user_entry, void *p1,
void *p2, void *p3, u32_t stack, u32_t size, void *p2, void *p3, uint32_t stack, uint32_t size,
struct k_thread *thread); struct k_thread *thread);

View file

@ -36,11 +36,11 @@ extern "C" {
*/ */
static ALWAYS_INLINE void z_icache_setup(void) static ALWAYS_INLINE void z_icache_setup(void)
{ {
u32_t icache_config = ( uint32_t icache_config = (
IC_CACHE_DIRECT | /* direct mapping (one-way assoc.) */ IC_CACHE_DIRECT | /* direct mapping (one-way assoc.) */
IC_CACHE_ENABLE /* i-cache enabled */ IC_CACHE_ENABLE /* i-cache enabled */
); );
u32_t val; uint32_t val;
val = z_arc_v2_aux_reg_read(_ARC_V2_I_CACHE_BUILD); val = z_arc_v2_aux_reg_read(_ARC_V2_I_CACHE_BUILD);
val &= 0xff; val &= 0xff;

View file

@ -53,7 +53,7 @@ extern "C" {
*/ */
static ALWAYS_INLINE void z_irq_setup(void) static ALWAYS_INLINE void z_irq_setup(void)
{ {
u32_t aux_irq_ctrl_value = ( uint32_t aux_irq_ctrl_value = (
_ARC_V2_AUX_IRQ_CTRL_LOOP_REGS | /* save lp_xxx registers */ _ARC_V2_AUX_IRQ_CTRL_LOOP_REGS | /* save lp_xxx registers */
#ifdef CONFIG_CODE_DENSITY #ifdef CONFIG_CODE_DENSITY
_ARC_V2_AUX_IRQ_CTRL_LP | /* save code density registers */ _ARC_V2_AUX_IRQ_CTRL_LP | /* save code density registers */

View file

@ -13,7 +13,7 @@ LOG_MODULE_DECLARE(os);
#define FAULT_DUMP_VERBOSE (CONFIG_FAULT_DUMP == 2) #define FAULT_DUMP_VERBOSE (CONFIG_FAULT_DUMP == 2)
#if FAULT_DUMP_VERBOSE #if FAULT_DUMP_VERBOSE
static const char *get_dbgdscr_moe_string(u32_t moe) static const char *get_dbgdscr_moe_string(uint32_t moe)
{ {
switch (moe) { switch (moe) {
case DBGDSCR_MOE_HALT_REQUEST: case DBGDSCR_MOE_HALT_REQUEST:
@ -40,14 +40,14 @@ static const char *get_dbgdscr_moe_string(u32_t moe)
static void dump_debug_event(void) static void dump_debug_event(void)
{ {
/* Read and parse debug mode of entry */ /* Read and parse debug mode of entry */
u32_t dbgdscr = __get_DBGDSCR(); uint32_t dbgdscr = __get_DBGDSCR();
u32_t moe = (dbgdscr & DBGDSCR_MOE_Msk) >> DBGDSCR_MOE_Pos; uint32_t moe = (dbgdscr & DBGDSCR_MOE_Msk) >> DBGDSCR_MOE_Pos;
/* Print debug event information */ /* Print debug event information */
LOG_ERR("Debug Event (%s)", get_dbgdscr_moe_string(moe)); LOG_ERR("Debug Event (%s)", get_dbgdscr_moe_string(moe));
} }
static void dump_fault(u32_t status, u32_t addr) static void dump_fault(uint32_t status, uint32_t addr)
{ {
/* /*
* Dump fault status and, if applicable, tatus-specific information. * Dump fault status and, if applicable, tatus-specific information.
@ -110,11 +110,11 @@ bool z_arm_fault_undef_instruction(z_arch_esf_t *esf)
bool z_arm_fault_prefetch(z_arch_esf_t *esf) bool z_arm_fault_prefetch(z_arch_esf_t *esf)
{ {
/* Read and parse Instruction Fault Status Register (IFSR) */ /* Read and parse Instruction Fault Status Register (IFSR) */
u32_t ifsr = __get_IFSR(); uint32_t ifsr = __get_IFSR();
u32_t fs = ((ifsr & IFSR_FS1_Msk) >> 6) | (ifsr & IFSR_FS0_Msk); uint32_t fs = ((ifsr & IFSR_FS1_Msk) >> 6) | (ifsr & IFSR_FS0_Msk);
/* Read Instruction Fault Address Register (IFAR) */ /* Read Instruction Fault Address Register (IFAR) */
u32_t ifar = __get_IFAR(); uint32_t ifar = __get_IFAR();
/* Print fault information*/ /* Print fault information*/
LOG_ERR("***** PREFETCH ABORT *****"); LOG_ERR("***** PREFETCH ABORT *****");
@ -137,11 +137,11 @@ bool z_arm_fault_prefetch(z_arch_esf_t *esf)
bool z_arm_fault_data(z_arch_esf_t *esf) bool z_arm_fault_data(z_arch_esf_t *esf)
{ {
/* Read and parse Data Fault Status Register (DFSR) */ /* Read and parse Data Fault Status Register (DFSR) */
u32_t dfsr = __get_DFSR(); uint32_t dfsr = __get_DFSR();
u32_t fs = ((dfsr & DFSR_FS1_Msk) >> 6) | (dfsr & DFSR_FS0_Msk); uint32_t fs = ((dfsr & DFSR_FS1_Msk) >> 6) | (dfsr & DFSR_FS0_Msk);
/* Read Data Fault Address Register (DFAR) */ /* Read Data Fault Address Register (DFAR) */
u32_t dfar = __get_DFAR(); uint32_t dfar = __get_DFAR();
/* Print fault information*/ /* Print fault information*/
LOG_ERR("***** DATA ABORT *****"); LOG_ERR("***** DATA ABORT *****");

View file

@ -7,7 +7,7 @@
#include <zephyr.h> #include <zephyr.h>
#include <aarch32/cortex_m/cmse.h> #include <aarch32/cortex_m/cmse.h>
int arm_cmse_mpu_region_get(u32_t addr) int arm_cmse_mpu_region_get(uint32_t addr)
{ {
cmse_address_info_t addr_info = cmse_TT((void *)addr); cmse_address_info_t addr_info = cmse_TT((void *)addr);
@ -18,7 +18,7 @@ int arm_cmse_mpu_region_get(u32_t addr)
return -EINVAL; return -EINVAL;
} }
static int arm_cmse_addr_read_write_ok(u32_t addr, int force_npriv, int rw) static int arm_cmse_addr_read_write_ok(uint32_t addr, int force_npriv, int rw)
{ {
cmse_address_info_t addr_info; cmse_address_info_t addr_info;
if (force_npriv) { if (force_npriv) {
@ -30,17 +30,17 @@ static int arm_cmse_addr_read_write_ok(u32_t addr, int force_npriv, int rw)
return rw ? addr_info.flags.readwrite_ok : addr_info.flags.read_ok; return rw ? addr_info.flags.readwrite_ok : addr_info.flags.read_ok;
} }
int arm_cmse_addr_read_ok(u32_t addr, int force_npriv) int arm_cmse_addr_read_ok(uint32_t addr, int force_npriv)
{ {
return arm_cmse_addr_read_write_ok(addr, force_npriv, 0); return arm_cmse_addr_read_write_ok(addr, force_npriv, 0);
} }
int arm_cmse_addr_readwrite_ok(u32_t addr, int force_npriv) int arm_cmse_addr_readwrite_ok(uint32_t addr, int force_npriv)
{ {
return arm_cmse_addr_read_write_ok(addr, force_npriv, 1); return arm_cmse_addr_read_write_ok(addr, force_npriv, 1);
} }
static int arm_cmse_addr_range_read_write_ok(u32_t addr, u32_t size, static int arm_cmse_addr_range_read_write_ok(uint32_t addr, uint32_t size,
int force_npriv, int rw) int force_npriv, int rw)
{ {
int flags = 0; int flags = 0;
@ -60,19 +60,19 @@ static int arm_cmse_addr_range_read_write_ok(u32_t addr, u32_t size,
} }
} }
int arm_cmse_addr_range_read_ok(u32_t addr, u32_t size, int force_npriv) int arm_cmse_addr_range_read_ok(uint32_t addr, uint32_t size, int force_npriv)
{ {
return arm_cmse_addr_range_read_write_ok(addr, size, force_npriv, 0); return arm_cmse_addr_range_read_write_ok(addr, size, force_npriv, 0);
} }
int arm_cmse_addr_range_readwrite_ok(u32_t addr, u32_t size, int force_npriv) int arm_cmse_addr_range_readwrite_ok(uint32_t addr, uint32_t size, int force_npriv)
{ {
return arm_cmse_addr_range_read_write_ok(addr, size, force_npriv, 1); return arm_cmse_addr_range_read_write_ok(addr, size, force_npriv, 1);
} }
#if defined(CONFIG_ARM_SECURE_FIRMWARE) #if defined(CONFIG_ARM_SECURE_FIRMWARE)
int arm_cmse_mpu_nonsecure_region_get(u32_t addr) int arm_cmse_mpu_nonsecure_region_get(uint32_t addr)
{ {
cmse_address_info_t addr_info = cmse_TTA((void *)addr); cmse_address_info_t addr_info = cmse_TTA((void *)addr);
@ -83,7 +83,7 @@ int arm_cmse_mpu_nonsecure_region_get(u32_t addr)
return -EINVAL; return -EINVAL;
} }
int arm_cmse_sau_region_get(u32_t addr) int arm_cmse_sau_region_get(uint32_t addr)
{ {
cmse_address_info_t addr_info = cmse_TT((void *)addr); cmse_address_info_t addr_info = cmse_TT((void *)addr);
@ -94,7 +94,7 @@ int arm_cmse_sau_region_get(u32_t addr)
return -EINVAL; return -EINVAL;
} }
int arm_cmse_idau_region_get(u32_t addr) int arm_cmse_idau_region_get(uint32_t addr)
{ {
cmse_address_info_t addr_info = cmse_TT((void *)addr); cmse_address_info_t addr_info = cmse_TT((void *)addr);
@ -105,14 +105,14 @@ int arm_cmse_idau_region_get(u32_t addr)
return -EINVAL; return -EINVAL;
} }
int arm_cmse_addr_is_secure(u32_t addr) int arm_cmse_addr_is_secure(uint32_t addr)
{ {
cmse_address_info_t addr_info = cmse_TT((void *)addr); cmse_address_info_t addr_info = cmse_TT((void *)addr);
return addr_info.flags.secure; return addr_info.flags.secure;
} }
static int arm_cmse_addr_nonsecure_read_write_ok(u32_t addr, static int arm_cmse_addr_nonsecure_read_write_ok(uint32_t addr,
int force_npriv, int rw) int force_npriv, int rw)
{ {
cmse_address_info_t addr_info; cmse_address_info_t addr_info;
@ -126,17 +126,17 @@ static int arm_cmse_addr_nonsecure_read_write_ok(u32_t addr,
addr_info.flags.nonsecure_read_ok; addr_info.flags.nonsecure_read_ok;
} }
int arm_cmse_addr_nonsecure_read_ok(u32_t addr, int force_npriv) int arm_cmse_addr_nonsecure_read_ok(uint32_t addr, int force_npriv)
{ {
return arm_cmse_addr_nonsecure_read_write_ok(addr, force_npriv, 0); return arm_cmse_addr_nonsecure_read_write_ok(addr, force_npriv, 0);
} }
int arm_cmse_addr_nonsecure_readwrite_ok(u32_t addr, int force_npriv) int arm_cmse_addr_nonsecure_readwrite_ok(uint32_t addr, int force_npriv)
{ {
return arm_cmse_addr_nonsecure_read_write_ok(addr, force_npriv, 1); return arm_cmse_addr_nonsecure_read_write_ok(addr, force_npriv, 1);
} }
static int arm_cmse_addr_range_nonsecure_read_write_ok(u32_t addr, u32_t size, static int arm_cmse_addr_range_nonsecure_read_write_ok(uint32_t addr, uint32_t size,
int force_npriv, int rw) int force_npriv, int rw)
{ {
int flags = CMSE_NONSECURE; int flags = CMSE_NONSECURE;
@ -156,14 +156,14 @@ static int arm_cmse_addr_range_nonsecure_read_write_ok(u32_t addr, u32_t size,
} }
} }
int arm_cmse_addr_range_nonsecure_read_ok(u32_t addr, u32_t size, int arm_cmse_addr_range_nonsecure_read_ok(uint32_t addr, uint32_t size,
int force_npriv) int force_npriv)
{ {
return arm_cmse_addr_range_nonsecure_read_write_ok(addr, size, return arm_cmse_addr_range_nonsecure_read_write_ok(addr, size,
force_npriv, 0); force_npriv, 0);
} }
int arm_cmse_addr_range_nonsecure_readwrite_ok(u32_t addr, u32_t size, int arm_cmse_addr_range_nonsecure_readwrite_ok(uint32_t addr, uint32_t size,
int force_npriv) int force_npriv)
{ {
return arm_cmse_addr_range_nonsecure_read_write_ok(addr, size, return arm_cmse_addr_range_nonsecure_read_write_ok(addr, size,

View file

@ -21,7 +21,7 @@ LOG_MODULE_DECLARE(os);
#if defined(CONFIG_PRINTK) || defined(CONFIG_LOG) #if defined(CONFIG_PRINTK) || defined(CONFIG_LOG)
#define PR_EXC(...) LOG_ERR(__VA_ARGS__) #define PR_EXC(...) LOG_ERR(__VA_ARGS__)
#define STORE_xFAR(reg_var, reg) u32_t reg_var = (u32_t)reg #define STORE_xFAR(reg_var, reg) uint32_t reg_var = (uint32_t)reg
#else #else
#define PR_EXC(...) #define PR_EXC(...)
#define STORE_xFAR(reg_var, reg) #define STORE_xFAR(reg_var, reg)
@ -175,11 +175,11 @@ static bool memory_fault_recoverable(z_arch_esf_t *esf)
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
for (int i = 0; i < ARRAY_SIZE(exceptions); i++) { for (int i = 0; i < ARRAY_SIZE(exceptions); i++) {
/* Mask out instruction mode */ /* Mask out instruction mode */
u32_t start = (u32_t)exceptions[i].start & ~0x1; uint32_t start = (uint32_t)exceptions[i].start & ~0x1;
u32_t end = (u32_t)exceptions[i].end & ~0x1; uint32_t end = (uint32_t)exceptions[i].end & ~0x1;
if (esf->basic.pc >= start && esf->basic.pc < end) { if (esf->basic.pc >= start && esf->basic.pc < end) {
esf->basic.pc = (u32_t)(exceptions[i].fixup); esf->basic.pc = (uint32_t)(exceptions[i].fixup);
return true; return true;
} }
} }
@ -193,8 +193,8 @@ static bool memory_fault_recoverable(z_arch_esf_t *esf)
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
#if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE) #if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
u32_t z_check_thread_stack_fail(const u32_t fault_addr, uint32_t z_check_thread_stack_fail(const uint32_t fault_addr,
const u32_t psp); const uint32_t psp);
#endif /* CONFIG_MPU_STACK_GUARD || defined(CONFIG_USERSPACE) */ #endif /* CONFIG_MPU_STACK_GUARD || defined(CONFIG_USERSPACE) */
/** /**
@ -205,11 +205,11 @@ u32_t z_check_thread_stack_fail(const u32_t fault_addr,
* *
* @return error code to identify the fatal error reason * @return error code to identify the fatal error reason
*/ */
static u32_t mem_manage_fault(z_arch_esf_t *esf, int from_hard_fault, static uint32_t mem_manage_fault(z_arch_esf_t *esf, int from_hard_fault,
bool *recoverable) bool *recoverable)
{ {
u32_t reason = K_ERR_CPU_EXCEPTION; uint32_t reason = K_ERR_CPU_EXCEPTION;
u32_t mmfar = -EINVAL; uint32_t mmfar = -EINVAL;
PR_FAULT_INFO("***** MPU FAULT *****"); PR_FAULT_INFO("***** MPU FAULT *****");
@ -276,8 +276,8 @@ static u32_t mem_manage_fault(z_arch_esf_t *esf, int from_hard_fault,
* handle the case of 'mmfar' holding the -EINVAL value. * handle the case of 'mmfar' holding the -EINVAL value.
*/ */
if (SCB->ICSR & SCB_ICSR_RETTOBASE_Msk) { if (SCB->ICSR & SCB_ICSR_RETTOBASE_Msk) {
u32_t min_stack_ptr = z_check_thread_stack_fail(mmfar, uint32_t min_stack_ptr = z_check_thread_stack_fail(mmfar,
((u32_t) &esf[0])); ((uint32_t) &esf[0]));
if (min_stack_ptr) { if (min_stack_ptr) {
/* When MemManage Stacking Error has occurred, /* When MemManage Stacking Error has occurred,
@ -339,7 +339,7 @@ static u32_t mem_manage_fault(z_arch_esf_t *esf, int from_hard_fault,
*/ */
static int bus_fault(z_arch_esf_t *esf, int from_hard_fault, bool *recoverable) static int bus_fault(z_arch_esf_t *esf, int from_hard_fault, bool *recoverable)
{ {
u32_t reason = K_ERR_CPU_EXCEPTION; uint32_t reason = K_ERR_CPU_EXCEPTION;
PR_FAULT_INFO("***** BUS FAULT *****"); PR_FAULT_INFO("***** BUS FAULT *****");
@ -383,10 +383,10 @@ static int bus_fault(z_arch_esf_t *esf, int from_hard_fault, bool *recoverable)
#endif /* !defined(CONFIG_ARMV7_M_ARMV8_M_FP) */ #endif /* !defined(CONFIG_ARMV7_M_ARMV8_M_FP) */
#if defined(CONFIG_ARM_MPU) && defined(CONFIG_CPU_HAS_NXP_MPU) #if defined(CONFIG_ARM_MPU) && defined(CONFIG_CPU_HAS_NXP_MPU)
u32_t sperr = SYSMPU->CESR & SYSMPU_CESR_SPERR_MASK; uint32_t sperr = SYSMPU->CESR & SYSMPU_CESR_SPERR_MASK;
u32_t mask = BIT(31); uint32_t mask = BIT(31);
int i; int i;
u32_t ear = -EINVAL; uint32_t ear = -EINVAL;
if (sperr) { if (sperr) {
for (i = 0; i < SYSMPU_EAR_COUNT; i++, mask >>= 1) { for (i = 0; i < SYSMPU_EAR_COUNT; i++, mask >>= 1) {
@ -426,9 +426,9 @@ static int bus_fault(z_arch_esf_t *esf, int from_hard_fault, bool *recoverable)
* inspecting the RETTOBASE flag. * inspecting the RETTOBASE flag.
*/ */
if (SCB->ICSR & SCB_ICSR_RETTOBASE_Msk) { if (SCB->ICSR & SCB_ICSR_RETTOBASE_Msk) {
u32_t min_stack_ptr = uint32_t min_stack_ptr =
z_check_thread_stack_fail(ear, z_check_thread_stack_fail(ear,
((u32_t) &esf[0])); ((uint32_t) &esf[0]));
if (min_stack_ptr) { if (min_stack_ptr) {
/* When BusFault Stacking Error /* When BusFault Stacking Error
@ -491,9 +491,9 @@ static int bus_fault(z_arch_esf_t *esf, int from_hard_fault, bool *recoverable)
* *
* @return error code to identify the fatal error reason * @return error code to identify the fatal error reason
*/ */
static u32_t usage_fault(const z_arch_esf_t *esf) static uint32_t usage_fault(const z_arch_esf_t *esf)
{ {
u32_t reason = K_ERR_CPU_EXCEPTION; uint32_t reason = K_ERR_CPU_EXCEPTION;
PR_FAULT_INFO("***** USAGE FAULT *****"); PR_FAULT_INFO("***** USAGE FAULT *****");
@ -606,9 +606,9 @@ static void debug_monitor(const z_arch_esf_t *esf)
* *
* @return error code to identify the fatal error reason * @return error code to identify the fatal error reason
*/ */
static u32_t hard_fault(z_arch_esf_t *esf, bool *recoverable) static uint32_t hard_fault(z_arch_esf_t *esf, bool *recoverable)
{ {
u32_t reason = K_ERR_CPU_EXCEPTION; uint32_t reason = K_ERR_CPU_EXCEPTION;
PR_FAULT_INFO("***** HARD FAULT *****"); PR_FAULT_INFO("***** HARD FAULT *****");
@ -621,14 +621,14 @@ static u32_t hard_fault(z_arch_esf_t *esf, bool *recoverable)
* priority. We handle the case of Kernel OOPS and Stack * priority. We handle the case of Kernel OOPS and Stack
* Fail here. * Fail here.
*/ */
u16_t *ret_addr = (u16_t *)esf->basic.pc; uint16_t *ret_addr = (uint16_t *)esf->basic.pc;
/* SVC is a 16-bit instruction. On a synchronous SVC /* SVC is a 16-bit instruction. On a synchronous SVC
* escalated to Hard Fault, the return address is the * escalated to Hard Fault, the return address is the
* next instruction, i.e. after the SVC. * next instruction, i.e. after the SVC.
*/ */
#define _SVC_OPCODE 0xDF00 #define _SVC_OPCODE 0xDF00
u16_t fault_insn = *(ret_addr - 1); uint16_t fault_insn = *(ret_addr - 1);
if (((fault_insn & 0xff00) == _SVC_OPCODE) && if (((fault_insn & 0xff00) == _SVC_OPCODE) &&
((fault_insn & 0x00ff) == _SVC_CALL_RUNTIME_EXCEPT)) { ((fault_insn & 0x00ff) == _SVC_CALL_RUNTIME_EXCEPT)) {
@ -682,9 +682,9 @@ static void reserved_exception(const z_arch_esf_t *esf, int fault)
} }
/* Handler function for ARM fault conditions. */ /* Handler function for ARM fault conditions. */
static u32_t fault_handle(z_arch_esf_t *esf, int fault, bool *recoverable) static uint32_t fault_handle(z_arch_esf_t *esf, int fault, bool *recoverable)
{ {
u32_t reason = K_ERR_CPU_EXCEPTION; uint32_t reason = K_ERR_CPU_EXCEPTION;
*recoverable = false; *recoverable = false;
@ -747,8 +747,8 @@ static void secure_stack_dump(const z_arch_esf_t *secure_esf)
* In case of a Non-Secure function call the top of the * In case of a Non-Secure function call the top of the
* stack contains the return address to Secure state. * stack contains the return address to Secure state.
*/ */
u32_t *top_of_sec_stack = (u32_t *)secure_esf; uint32_t *top_of_sec_stack = (uint32_t *)secure_esf;
u32_t sec_ret_addr; uint32_t sec_ret_addr;
#if defined(CONFIG_ARMV7_M_ARMV8_M_FP) #if defined(CONFIG_ARMV7_M_ARMV8_M_FP)
if ((*top_of_sec_stack == INTEGRITY_SIGNATURE_STD) || if ((*top_of_sec_stack == INTEGRITY_SIGNATURE_STD) ||
(*top_of_sec_stack == INTEGRITY_SIGNATURE_EXT)) { (*top_of_sec_stack == INTEGRITY_SIGNATURE_EXT)) {
@ -790,7 +790,7 @@ static void secure_stack_dump(const z_arch_esf_t *secure_esf)
* *
* @return ESF pointer on success, otherwise return NULL * @return ESF pointer on success, otherwise return NULL
*/ */
static inline z_arch_esf_t *get_esf(u32_t msp, u32_t psp, u32_t exc_return, static inline z_arch_esf_t *get_esf(uint32_t msp, uint32_t psp, uint32_t exc_return,
bool *nested_exc) bool *nested_exc)
{ {
bool alternative_state_exc = false; bool alternative_state_exc = false;
@ -931,9 +931,9 @@ static inline z_arch_esf_t *get_esf(u32_t msp, u32_t psp, u32_t exc_return,
* @param exc_return EXC_RETURN value present in LR after exception entry. * @param exc_return EXC_RETURN value present in LR after exception entry.
* *
*/ */
void z_arm_fault(u32_t msp, u32_t psp, u32_t exc_return) void z_arm_fault(uint32_t msp, uint32_t psp, uint32_t exc_return)
{ {
u32_t reason = K_ERR_CPU_EXCEPTION; uint32_t reason = K_ERR_CPU_EXCEPTION;
int fault = SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk; int fault = SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk;
bool recoverable, nested_exc; bool recoverable, nested_exc;
z_arch_esf_t *esf; z_arch_esf_t *esf;

View file

@ -39,11 +39,11 @@ LOG_MODULE_REGISTER(mpu);
* memory area, where dynamic memory regions may be programmed at run-time. * memory area, where dynamic memory regions may be programmed at run-time.
*/ */
#if defined(CONFIG_USERSPACE) #if defined(CONFIG_USERSPACE)
#define _MPU_DYNAMIC_REGIONS_AREA_START ((u32_t)&_app_smem_start) #define _MPU_DYNAMIC_REGIONS_AREA_START ((uint32_t)&_app_smem_start)
#else #else
#define _MPU_DYNAMIC_REGIONS_AREA_START ((u32_t)&__kernel_ram_start) #define _MPU_DYNAMIC_REGIONS_AREA_START ((uint32_t)&__kernel_ram_start)
#endif /* CONFIG_USERSPACE */ #endif /* CONFIG_USERSPACE */
#define _MPU_DYNAMIC_REGIONS_AREA_SIZE ((u32_t)&__kernel_ram_end - \ #define _MPU_DYNAMIC_REGIONS_AREA_SIZE ((uint32_t)&__kernel_ram_end - \
_MPU_DYNAMIC_REGIONS_AREA_START) _MPU_DYNAMIC_REGIONS_AREA_START)
/** /**
@ -64,24 +64,24 @@ void z_arm_configure_static_mpu_regions(void)
#if defined(CONFIG_COVERAGE_GCOV) && defined(CONFIG_USERSPACE) #if defined(CONFIG_COVERAGE_GCOV) && defined(CONFIG_USERSPACE)
const struct k_mem_partition gcov_region = const struct k_mem_partition gcov_region =
{ {
.start = (u32_t)&__gcov_bss_start, .start = (uint32_t)&__gcov_bss_start,
.size = (u32_t)&__gcov_bss_size, .size = (uint32_t)&__gcov_bss_size,
.attr = K_MEM_PARTITION_P_RW_U_RW, .attr = K_MEM_PARTITION_P_RW_U_RW,
}; };
#endif /* CONFIG_COVERAGE_GCOV && CONFIG_USERSPACE */ #endif /* CONFIG_COVERAGE_GCOV && CONFIG_USERSPACE */
#if defined(CONFIG_NOCACHE_MEMORY) #if defined(CONFIG_NOCACHE_MEMORY)
const struct k_mem_partition nocache_region = const struct k_mem_partition nocache_region =
{ {
.start = (u32_t)&_nocache_ram_start, .start = (uint32_t)&_nocache_ram_start,
.size = (u32_t)&_nocache_ram_size, .size = (uint32_t)&_nocache_ram_size,
.attr = K_MEM_PARTITION_P_RW_U_NA_NOCACHE, .attr = K_MEM_PARTITION_P_RW_U_NA_NOCACHE,
}; };
#endif /* CONFIG_NOCACHE_MEMORY */ #endif /* CONFIG_NOCACHE_MEMORY */
#if defined(CONFIG_ARCH_HAS_RAMFUNC_SUPPORT) #if defined(CONFIG_ARCH_HAS_RAMFUNC_SUPPORT)
const struct k_mem_partition ramfunc_region = const struct k_mem_partition ramfunc_region =
{ {
.start = (u32_t)&_ramfunc_ram_start, .start = (uint32_t)&_ramfunc_ram_start,
.size = (u32_t)&_ramfunc_ram_size, .size = (uint32_t)&_ramfunc_ram_size,
.attr = K_MEM_PARTITION_P_RX_U_RX, .attr = K_MEM_PARTITION_P_RX_U_RX,
}; };
#endif /* CONFIG_ARCH_HAS_RAMFUNC_SUPPORT */ #endif /* CONFIG_ARCH_HAS_RAMFUNC_SUPPORT */
@ -109,8 +109,8 @@ void z_arm_configure_static_mpu_regions(void)
*/ */
arm_core_mpu_configure_static_mpu_regions(static_regions, arm_core_mpu_configure_static_mpu_regions(static_regions,
ARRAY_SIZE(static_regions), ARRAY_SIZE(static_regions),
(u32_t)&_image_ram_start, (uint32_t)&_image_ram_start,
(u32_t)&__kernel_ram_end); (uint32_t)&__kernel_ram_end);
#if defined(CONFIG_MPU_REQUIRES_NON_OVERLAPPING_REGIONS) #if defined(CONFIG_MPU_REQUIRES_NON_OVERLAPPING_REGIONS)
/* Define a constant array of k_mem_partition objects that holds the /* Define a constant array of k_mem_partition objects that holds the
@ -151,7 +151,7 @@ void z_arm_configure_dynamic_mpu_regions(struct k_thread *thread)
*/ */
struct k_mem_partition *dynamic_regions[_MAX_DYNAMIC_MPU_REGIONS_NUM]; struct k_mem_partition *dynamic_regions[_MAX_DYNAMIC_MPU_REGIONS_NUM];
u8_t region_num = 0U; uint8_t region_num = 0U;
#if defined(CONFIG_USERSPACE) #if defined(CONFIG_USERSPACE)
struct k_mem_partition thread_stack; struct k_mem_partition thread_stack;
@ -162,7 +162,7 @@ void z_arm_configure_dynamic_mpu_regions(struct k_thread *thread)
if (mem_domain) { if (mem_domain) {
LOG_DBG("configure domain: %p", mem_domain); LOG_DBG("configure domain: %p", mem_domain);
u32_t num_partitions = mem_domain->num_partitions; uint32_t num_partitions = mem_domain->num_partitions;
struct k_mem_partition partition; struct k_mem_partition partition;
int i; int i;
@ -193,8 +193,8 @@ void z_arm_configure_dynamic_mpu_regions(struct k_thread *thread)
/* Thread user stack */ /* Thread user stack */
LOG_DBG("configure user thread %p's context", thread); LOG_DBG("configure user thread %p's context", thread);
if (thread->arch.priv_stack_start) { if (thread->arch.priv_stack_start) {
u32_t base = (u32_t)thread->stack_obj; uint32_t base = (uint32_t)thread->stack_obj;
u32_t size = thread->stack_info.size + uint32_t size = thread->stack_info.size +
(thread->stack_info.start - base); (thread->stack_info.start - base);
__ASSERT(region_num < _MAX_DYNAMIC_MPU_REGIONS_NUM, __ASSERT(region_num < _MAX_DYNAMIC_MPU_REGIONS_NUM,
@ -212,8 +212,8 @@ void z_arm_configure_dynamic_mpu_regions(struct k_thread *thread)
struct k_mem_partition guard; struct k_mem_partition guard;
/* Privileged stack guard */ /* Privileged stack guard */
u32_t guard_start; uint32_t guard_start;
u32_t guard_size = MPU_GUARD_ALIGN_AND_SIZE; uint32_t guard_size = MPU_GUARD_ALIGN_AND_SIZE;
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
if ((thread->base.user_options & K_FP_REGS) != 0) { if ((thread->base.user_options & K_FP_REGS) != 0) {
@ -225,15 +225,15 @@ void z_arm_configure_dynamic_mpu_regions(struct k_thread *thread)
if (thread->arch.priv_stack_start) { if (thread->arch.priv_stack_start) {
guard_start = thread->arch.priv_stack_start - guard_size; guard_start = thread->arch.priv_stack_start - guard_size;
__ASSERT((u32_t)&z_priv_stacks_ram_start <= guard_start, __ASSERT((uint32_t)&z_priv_stacks_ram_start <= guard_start,
"Guard start: (0x%x) below privilege stacks boundary: (0x%x)", "Guard start: (0x%x) below privilege stacks boundary: (0x%x)",
guard_start, (u32_t)&z_priv_stacks_ram_start); guard_start, (uint32_t)&z_priv_stacks_ram_start);
} else { } else {
guard_start = thread->stack_info.start - guard_size; guard_start = thread->stack_info.start - guard_size;
__ASSERT((u32_t)thread->stack_obj == guard_start, __ASSERT((uint32_t)thread->stack_obj == guard_start,
"Guard start (0x%x) not beginning at stack object (0x%x)\n", "Guard start (0x%x) not beginning at stack object (0x%x)\n",
guard_start, (u32_t)thread->stack_obj); guard_start, (uint32_t)thread->stack_obj);
} }
#else #else
guard_start = thread->stack_info.start - guard_size; guard_start = thread->stack_info.start - guard_size;
@ -318,7 +318,7 @@ void arch_mem_domain_destroy(struct k_mem_domain *domain)
} }
void arch_mem_domain_partition_remove(struct k_mem_domain *domain, void arch_mem_domain_partition_remove(struct k_mem_domain *domain,
u32_t partition_id) uint32_t partition_id)
{ {
/* Request to remove a partition from a memory domain. /* Request to remove a partition from a memory domain.
* This resets the access permissions of the partition * This resets the access permissions of the partition
@ -335,7 +335,7 @@ void arch_mem_domain_partition_remove(struct k_mem_domain *domain,
} }
void arch_mem_domain_partition_add(struct k_mem_domain *domain, void arch_mem_domain_partition_add(struct k_mem_domain *domain,
u32_t partition_id) uint32_t partition_id)
{ {
/* No-op on this architecture */ /* No-op on this architecture */
} }

View file

@ -133,8 +133,8 @@ struct k_thread;
* requirements of the MPU hardware. * requirements of the MPU hardware.
*/ */
void arm_core_mpu_configure_static_mpu_regions( void arm_core_mpu_configure_static_mpu_regions(
const struct k_mem_partition *static_regions[], const u8_t regions_num, const struct k_mem_partition *static_regions[], const uint8_t regions_num,
const u32_t background_area_start, const u32_t background_area_end); const uint32_t background_area_start, const uint32_t background_area_end);
#if defined(CONFIG_MPU_REQUIRES_NON_OVERLAPPING_REGIONS) #if defined(CONFIG_MPU_REQUIRES_NON_OVERLAPPING_REGIONS)
@ -165,7 +165,7 @@ void arm_core_mpu_configure_static_mpu_regions(
*/ */
void arm_core_mpu_mark_areas_for_dynamic_regions( void arm_core_mpu_mark_areas_for_dynamic_regions(
const struct k_mem_partition dyn_region_areas[], const struct k_mem_partition dyn_region_areas[],
const u8_t dyn_region_areas_num); const uint8_t dyn_region_areas_num);
#endif /* CONFIG_MPU_REQUIRES_NON_OVERLAPPING_REGIONS */ #endif /* CONFIG_MPU_REQUIRES_NON_OVERLAPPING_REGIONS */
@ -185,7 +185,7 @@ void arm_core_mpu_mark_areas_for_dynamic_regions(
* not exceed the number of (currently) available MPU indices. * not exceed the number of (currently) available MPU indices.
*/ */
void arm_core_mpu_configure_dynamic_mpu_regions( void arm_core_mpu_configure_dynamic_mpu_regions(
const struct k_mem_partition *dynamic_regions[], u8_t regions_num); const struct k_mem_partition *dynamic_regions[], uint8_t regions_num);
#if defined(CONFIG_USERSPACE) #if defined(CONFIG_USERSPACE)
/** /**
@ -215,7 +215,7 @@ void arm_core_mpu_mem_partition_config_update(
* @param base base address in RAM * @param base base address in RAM
* @param size size of the region * @param size size of the region
*/ */
void arm_core_mpu_configure(u8_t type, u32_t base, u32_t size); void arm_core_mpu_configure(uint8_t type, uint32_t base, uint32_t size);
/** /**
* @brief configure MPU regions for the memory partitions of the memory domain * @brief configure MPU regions for the memory partitions of the memory domain
@ -237,7 +237,7 @@ void arm_core_mpu_configure_user_context(struct k_thread *thread);
* @param part_index memory partition index * @param part_index memory partition index
* @param part memory partition info * @param part memory partition info
*/ */
void arm_core_mpu_configure_mem_partition(u32_t part_index, void arm_core_mpu_configure_mem_partition(uint32_t part_index,
struct k_mem_partition *part); struct k_mem_partition *part);
/** /**
@ -245,7 +245,7 @@ void arm_core_mpu_configure_mem_partition(u32_t part_index,
* *
* @param part_index memory partition index * @param part_index memory partition index
*/ */
void arm_core_mpu_mem_partition_remove(u32_t part_index); void arm_core_mpu_mem_partition_remove(uint32_t part_index);
/** /**
* @brief Get the maximum number of available (free) MPU region indices * @brief Get the maximum number of available (free) MPU region indices

View file

@ -36,12 +36,12 @@ LOG_MODULE_DECLARE(mpu);
* have been reserved by the MPU driver to program the static (fixed) memory * have been reserved by the MPU driver to program the static (fixed) memory
* regions. * regions.
*/ */
static u8_t static_regions_num; static uint8_t static_regions_num;
/** /**
* Get the number of supported MPU regions. * Get the number of supported MPU regions.
*/ */
static inline u8_t get_num_regions(void) static inline uint8_t get_num_regions(void)
{ {
#if defined(CONFIG_CPU_CORTEX_M0PLUS) || \ #if defined(CONFIG_CPU_CORTEX_M0PLUS) || \
defined(CONFIG_CPU_CORTEX_M3) || \ defined(CONFIG_CPU_CORTEX_M3) || \
@ -55,11 +55,11 @@ static inline u8_t get_num_regions(void)
return NUM_MPU_REGIONS; return NUM_MPU_REGIONS;
#else #else
u32_t type = MPU->TYPE; uint32_t type = MPU->TYPE;
type = (type & MPU_TYPE_DREGION_Msk) >> MPU_TYPE_DREGION_Pos; type = (type & MPU_TYPE_DREGION_Msk) >> MPU_TYPE_DREGION_Pos;
return (u8_t)type; return (uint8_t)type;
#endif /* CPU_CORTEX_M0PLUS | CPU_CORTEX_M3 | CPU_CORTEX_M4 */ #endif /* CPU_CORTEX_M0PLUS | CPU_CORTEX_M3 | CPU_CORTEX_M4 */
} }
@ -76,7 +76,7 @@ static inline u8_t get_num_regions(void)
#error "Unsupported ARM CPU" #error "Unsupported ARM CPU"
#endif #endif
static int region_allocate_and_init(const u8_t index, static int region_allocate_and_init(const uint8_t index,
const struct arm_mpu_region *region_conf) const struct arm_mpu_region *region_conf)
{ {
/* Attempt to allocate new region index. */ /* Attempt to allocate new region index. */
@ -98,7 +98,7 @@ static int region_allocate_and_init(const u8_t index,
/* This internal function programs an MPU region /* This internal function programs an MPU region
* of a given configuration at a given MPU index. * of a given configuration at a given MPU index.
*/ */
static int mpu_configure_region(const u8_t index, static int mpu_configure_region(const uint8_t index,
const struct k_mem_partition *new_region) const struct k_mem_partition *new_region)
{ {
struct arm_mpu_region region_conf; struct arm_mpu_region region_conf;
@ -122,7 +122,7 @@ static int mpu_configure_region(const u8_t index,
* sanity check of the memory regions to be programmed. * sanity check of the memory regions to be programmed.
*/ */
static int mpu_configure_regions(const struct k_mem_partition static int mpu_configure_regions(const struct k_mem_partition
*regions[], u8_t regions_num, u8_t start_reg_index, *regions[], uint8_t regions_num, uint8_t start_reg_index,
bool do_sanity_check) bool do_sanity_check)
{ {
int i; int i;
@ -192,21 +192,21 @@ void arm_core_mpu_mem_partition_config_update(
k_mem_partition_attr_t *new_attr) k_mem_partition_attr_t *new_attr)
{ {
/* Find the partition. ASSERT if not found. */ /* Find the partition. ASSERT if not found. */
u8_t i; uint8_t i;
u8_t reg_index = get_num_regions(); uint8_t reg_index = get_num_regions();
for (i = get_dyn_region_min_index(); i < get_num_regions(); i++) { for (i = get_dyn_region_min_index(); i < get_num_regions(); i++) {
if (!is_enabled_region(i)) { if (!is_enabled_region(i)) {
continue; continue;
} }
u32_t base = mpu_region_get_base(i); uint32_t base = mpu_region_get_base(i);
if (base != partition->start) { if (base != partition->start) {
continue; continue;
} }
u32_t size = mpu_region_get_size(i); uint32_t size = mpu_region_get_size(i);
if (size != partition->size) { if (size != partition->size) {
continue; continue;
@ -249,8 +249,8 @@ int arm_core_mpu_buffer_validate(void *addr, size_t size, int write)
* @brief configure fixed (static) MPU regions. * @brief configure fixed (static) MPU regions.
*/ */
void arm_core_mpu_configure_static_mpu_regions(const struct k_mem_partition void arm_core_mpu_configure_static_mpu_regions(const struct k_mem_partition
*static_regions[], const u8_t regions_num, *static_regions[], const uint8_t regions_num,
const u32_t background_area_start, const u32_t background_area_end) const uint32_t background_area_start, const uint32_t background_area_end)
{ {
if (mpu_configure_static_mpu_regions(static_regions, regions_num, if (mpu_configure_static_mpu_regions(static_regions, regions_num,
background_area_start, background_area_end) == -EINVAL) { background_area_start, background_area_end) == -EINVAL) {
@ -266,7 +266,7 @@ void arm_core_mpu_configure_static_mpu_regions(const struct k_mem_partition
*/ */
void arm_core_mpu_mark_areas_for_dynamic_regions( void arm_core_mpu_mark_areas_for_dynamic_regions(
const struct k_mem_partition dyn_region_areas[], const struct k_mem_partition dyn_region_areas[],
const u8_t dyn_region_areas_num) const uint8_t dyn_region_areas_num)
{ {
if (mpu_mark_areas_for_dynamic_regions(dyn_region_areas, if (mpu_mark_areas_for_dynamic_regions(dyn_region_areas,
dyn_region_areas_num) == -EINVAL) { dyn_region_areas_num) == -EINVAL) {
@ -281,7 +281,7 @@ void arm_core_mpu_mark_areas_for_dynamic_regions(
* @brief configure dynamic MPU regions. * @brief configure dynamic MPU regions.
*/ */
void arm_core_mpu_configure_dynamic_mpu_regions(const struct k_mem_partition void arm_core_mpu_configure_dynamic_mpu_regions(const struct k_mem_partition
*dynamic_regions[], u8_t regions_num) *dynamic_regions[], uint8_t regions_num)
{ {
if (mpu_configure_dynamic_mpu_regions(dynamic_regions, regions_num) if (mpu_configure_dynamic_mpu_regions(dynamic_regions, regions_num)
== -EINVAL) { == -EINVAL) {
@ -301,7 +301,7 @@ void arm_core_mpu_configure_dynamic_mpu_regions(const struct k_mem_partition
*/ */
static int arm_mpu_init(struct device *arg) static int arm_mpu_init(struct device *arg)
{ {
u32_t r_index; uint32_t r_index;
if (mpu_config.num_regions > get_num_regions()) { if (mpu_config.num_regions > get_num_regions()) {
/* Attempt to configure more MPU regions than /* Attempt to configure more MPU regions than

View file

@ -25,7 +25,7 @@ static void mpu_init(void)
* Note: * Note:
* The caller must provide a valid region index. * The caller must provide a valid region index.
*/ */
static void region_init(const u32_t index, static void region_init(const uint32_t index,
const struct arm_mpu_region *region_conf) const struct arm_mpu_region *region_conf)
{ {
/* Select the region you want to access */ /* Select the region you want to access */
@ -71,7 +71,7 @@ static int mpu_partition_is_valid(const struct k_mem_partition *part)
* power-of-two value, and the returned SIZE field value corresponds * power-of-two value, and the returned SIZE field value corresponds
* to that power-of-two value. * to that power-of-two value.
*/ */
static inline u32_t size_to_mpu_rasr_size(u32_t size) static inline uint32_t size_to_mpu_rasr_size(uint32_t size)
{ {
/* The minimal supported region size is 32 bytes */ /* The minimal supported region size is 32 bytes */
if (size <= 32U) { if (size <= 32U) {
@ -98,7 +98,7 @@ static inline u32_t size_to_mpu_rasr_size(u32_t size)
*/ */
static inline void get_region_attr_from_k_mem_partition_info( static inline void get_region_attr_from_k_mem_partition_info(
arm_mpu_region_attr_t *p_attr, arm_mpu_region_attr_t *p_attr,
const k_mem_partition_attr_t *attr, u32_t base, u32_t size) const k_mem_partition_attr_t *attr, uint32_t base, uint32_t size)
{ {
/* in ARMv7-M MPU the base address is not required /* in ARMv7-M MPU the base address is not required
* to determine region attributes * to determine region attributes
@ -126,21 +126,21 @@ static inline int get_dyn_region_min_index(void)
* This internal function converts the SIZE field value of MPU_RASR * This internal function converts the SIZE field value of MPU_RASR
* to the region size (in bytes). * to the region size (in bytes).
*/ */
static inline u32_t mpu_rasr_size_to_size(u32_t rasr_size) static inline uint32_t mpu_rasr_size_to_size(uint32_t rasr_size)
{ {
return 1 << (rasr_size + 1); return 1 << (rasr_size + 1);
} }
static inline u32_t mpu_region_get_base(u32_t index) static inline uint32_t mpu_region_get_base(uint32_t index)
{ {
MPU->RNR = index; MPU->RNR = index;
return MPU->RBAR & MPU_RBAR_ADDR_Msk; return MPU->RBAR & MPU_RBAR_ADDR_Msk;
} }
static inline u32_t mpu_region_get_size(u32_t index) static inline uint32_t mpu_region_get_size(uint32_t index)
{ {
MPU->RNR = index; MPU->RNR = index;
u32_t rasr_size = (MPU->RASR & MPU_RASR_SIZE_Msk) >> MPU_RASR_SIZE_Pos; uint32_t rasr_size = (MPU->RASR & MPU_RASR_SIZE_Msk) >> MPU_RASR_SIZE_Pos;
return mpu_rasr_size_to_size(rasr_size); return mpu_rasr_size_to_size(rasr_size);
} }
@ -151,11 +151,11 @@ static inline u32_t mpu_region_get_size(u32_t index)
* Note: * Note:
* The caller must provide a valid region number. * The caller must provide a valid region number.
*/ */
static inline int is_enabled_region(u32_t index) static inline int is_enabled_region(uint32_t index)
{ {
/* Lock IRQs to ensure RNR value is correct when reading RASR. */ /* Lock IRQs to ensure RNR value is correct when reading RASR. */
unsigned int key; unsigned int key;
u32_t rasr; uint32_t rasr;
key = irq_lock(); key = irq_lock();
MPU->RNR = index; MPU->RNR = index;
@ -177,11 +177,11 @@ static inline int is_enabled_region(u32_t index)
* Note: * Note:
* The caller must provide a valid region number. * The caller must provide a valid region number.
*/ */
static inline u32_t get_region_ap(u32_t r_index) static inline uint32_t get_region_ap(uint32_t r_index)
{ {
/* Lock IRQs to ensure RNR value is correct when reading RASR. */ /* Lock IRQs to ensure RNR value is correct when reading RASR. */
unsigned int key; unsigned int key;
u32_t rasr; uint32_t rasr;
key = irq_lock(); key = irq_lock();
MPU->RNR = r_index; MPU->RNR = r_index;
@ -197,16 +197,16 @@ static inline u32_t get_region_ap(u32_t r_index)
* Note: * Note:
* The caller must provide a valid region number. * The caller must provide a valid region number.
*/ */
static inline int is_in_region(u32_t r_index, u32_t start, u32_t size) static inline int is_in_region(uint32_t r_index, uint32_t start, uint32_t size)
{ {
u32_t r_addr_start; uint32_t r_addr_start;
u32_t r_size_lshift; uint32_t r_size_lshift;
u32_t r_addr_end; uint32_t r_addr_end;
u32_t end; uint32_t end;
/* Lock IRQs to ensure RNR value is correct when reading RBAR, RASR. */ /* Lock IRQs to ensure RNR value is correct when reading RBAR, RASR. */
unsigned int key; unsigned int key;
u32_t rbar, rasr; uint32_t rbar, rasr;
key = irq_lock(); key = irq_lock();
MPU->RNR = r_index; MPU->RNR = r_index;
@ -237,9 +237,9 @@ static inline int is_in_region(u32_t r_index, u32_t start, u32_t size)
* Note: * Note:
* The caller must provide a valid region number. * The caller must provide a valid region number.
*/ */
static inline int is_user_accessible_region(u32_t r_index, int write) static inline int is_user_accessible_region(uint32_t r_index, int write)
{ {
u32_t r_ap = get_region_ap(r_index); uint32_t r_ap = get_region_ap(r_index);
if (write) { if (write) {
@ -255,12 +255,12 @@ static inline int is_user_accessible_region(u32_t r_index, int write)
*/ */
static inline int mpu_buffer_validate(void *addr, size_t size, int write) static inline int mpu_buffer_validate(void *addr, size_t size, int write)
{ {
s32_t r_index; int32_t r_index;
/* Iterate all mpu regions in reversed order */ /* Iterate all mpu regions in reversed order */
for (r_index = get_num_regions() - 1; r_index >= 0; r_index--) { for (r_index = get_num_regions() - 1; r_index >= 0; r_index--) {
if (!is_enabled_region(r_index) || if (!is_enabled_region(r_index) ||
!is_in_region(r_index, (u32_t)addr, size)) { !is_in_region(r_index, (uint32_t)addr, size)) {
continue; continue;
} }
@ -282,11 +282,11 @@ static inline int mpu_buffer_validate(void *addr, size_t size, int write)
#endif /* CONFIG_USERSPACE */ #endif /* CONFIG_USERSPACE */
static int mpu_configure_region(const u8_t index, static int mpu_configure_region(const uint8_t index,
const struct k_mem_partition *new_region); const struct k_mem_partition *new_region);
static int mpu_configure_regions(const struct k_mem_partition static int mpu_configure_regions(const struct k_mem_partition
*regions[], u8_t regions_num, u8_t start_reg_index, *regions[], uint8_t regions_num, uint8_t start_reg_index,
bool do_sanity_check); bool do_sanity_check);
/* This internal function programs the static MPU regions. /* This internal function programs the static MPU regions.
@ -298,9 +298,9 @@ static int mpu_configure_regions(const struct k_mem_partition
* performed, the error signal is propagated to the caller of the function. * performed, the error signal is propagated to the caller of the function.
*/ */
static int mpu_configure_static_mpu_regions(const struct k_mem_partition static int mpu_configure_static_mpu_regions(const struct k_mem_partition
*static_regions[], const u8_t regions_num, *static_regions[], const uint8_t regions_num,
const u32_t background_area_base, const uint32_t background_area_base,
const u32_t background_area_end) const uint32_t background_area_end)
{ {
int mpu_reg_index = static_regions_num; int mpu_reg_index = static_regions_num;
@ -327,7 +327,7 @@ static int mpu_configure_static_mpu_regions(const struct k_mem_partition
* performed, the error signal is propagated to the caller of the function. * performed, the error signal is propagated to the caller of the function.
*/ */
static int mpu_configure_dynamic_mpu_regions(const struct k_mem_partition static int mpu_configure_dynamic_mpu_regions(const struct k_mem_partition
*dynamic_regions[], u8_t regions_num) *dynamic_regions[], uint8_t regions_num)
{ {
int mpu_reg_index = static_regions_num; int mpu_reg_index = static_regions_num;

View file

@ -57,7 +57,7 @@ static void mpu_init(void)
* Note: * Note:
* The caller must provide a valid region index. * The caller must provide a valid region index.
*/ */
static void region_init(const u32_t index, static void region_init(const uint32_t index,
const struct arm_mpu_region *region_conf) const struct arm_mpu_region *region_conf)
{ {
ARM_MPU_SetRegion( ARM_MPU_SetRegion(
@ -116,10 +116,10 @@ static int mpu_partition_is_valid(const struct k_mem_partition *part)
* needs to be enabled. * needs to be enabled.
* *
*/ */
static inline int get_region_index(u32_t start, u32_t size) static inline int get_region_index(uint32_t start, uint32_t size)
{ {
u32_t region_start_addr = arm_cmse_mpu_region_get(start); uint32_t region_start_addr = arm_cmse_mpu_region_get(start);
u32_t region_end_addr = arm_cmse_mpu_region_get(start + size - 1); uint32_t region_end_addr = arm_cmse_mpu_region_get(start + size - 1);
/* MPU regions are contiguous so return the region number, /* MPU regions are contiguous so return the region number,
* if both start and end address are in the same region. * if both start and end address are in the same region.
@ -130,33 +130,33 @@ static inline int get_region_index(u32_t start, u32_t size)
return -EINVAL; return -EINVAL;
} }
static inline u32_t mpu_region_get_base(const u32_t index) static inline uint32_t mpu_region_get_base(const uint32_t index)
{ {
MPU->RNR = index; MPU->RNR = index;
return MPU->RBAR & MPU_RBAR_BASE_Msk; return MPU->RBAR & MPU_RBAR_BASE_Msk;
} }
static inline void mpu_region_set_base(const u32_t index, const u32_t base) static inline void mpu_region_set_base(const uint32_t index, const uint32_t base)
{ {
MPU->RNR = index; MPU->RNR = index;
MPU->RBAR = (MPU->RBAR & (~MPU_RBAR_BASE_Msk)) MPU->RBAR = (MPU->RBAR & (~MPU_RBAR_BASE_Msk))
| (base & MPU_RBAR_BASE_Msk); | (base & MPU_RBAR_BASE_Msk);
} }
static inline u32_t mpu_region_get_last_addr(const u32_t index) static inline uint32_t mpu_region_get_last_addr(const uint32_t index)
{ {
MPU->RNR = index; MPU->RNR = index;
return (MPU->RLAR & MPU_RLAR_LIMIT_Msk) | (~MPU_RLAR_LIMIT_Msk); return (MPU->RLAR & MPU_RLAR_LIMIT_Msk) | (~MPU_RLAR_LIMIT_Msk);
} }
static inline void mpu_region_set_limit(const u32_t index, const u32_t limit) static inline void mpu_region_set_limit(const uint32_t index, const uint32_t limit)
{ {
MPU->RNR = index; MPU->RNR = index;
MPU->RLAR = (MPU->RLAR & (~MPU_RLAR_LIMIT_Msk)) MPU->RLAR = (MPU->RLAR & (~MPU_RLAR_LIMIT_Msk))
| (limit & MPU_RLAR_LIMIT_Msk); | (limit & MPU_RLAR_LIMIT_Msk);
} }
static inline void mpu_region_get_access_attr(const u32_t index, static inline void mpu_region_get_access_attr(const uint32_t index,
arm_mpu_region_attr_t *attr) arm_mpu_region_attr_t *attr)
{ {
MPU->RNR = index; MPU->RNR = index;
@ -167,7 +167,7 @@ static inline void mpu_region_get_access_attr(const u32_t index,
MPU_RLAR_AttrIndx_Pos; MPU_RLAR_AttrIndx_Pos;
} }
static inline void mpu_region_get_conf(const u32_t index, static inline void mpu_region_get_conf(const uint32_t index,
struct arm_mpu_region *region_conf) struct arm_mpu_region *region_conf)
{ {
MPU->RNR = index; MPU->RNR = index;
@ -193,7 +193,7 @@ static inline void mpu_region_get_conf(const u32_t index,
*/ */
static inline void get_region_attr_from_k_mem_partition_info( static inline void get_region_attr_from_k_mem_partition_info(
arm_mpu_region_attr_t *p_attr, arm_mpu_region_attr_t *p_attr,
const k_mem_partition_attr_t *attr, u32_t base, u32_t size) const k_mem_partition_attr_t *attr, uint32_t base, uint32_t size)
{ {
p_attr->rbar = attr->rbar & p_attr->rbar = attr->rbar &
(MPU_RBAR_XN_Msk | MPU_RBAR_AP_Msk | MPU_RBAR_SH_Msk); (MPU_RBAR_XN_Msk | MPU_RBAR_AP_Msk | MPU_RBAR_SH_Msk);
@ -228,7 +228,7 @@ static inline int get_dyn_region_min_index(void)
return dyn_reg_min_index; return dyn_reg_min_index;
} }
static inline u32_t mpu_region_get_size(u32_t index) static inline uint32_t mpu_region_get_size(uint32_t index)
{ {
return mpu_region_get_last_addr(index) + 1 return mpu_region_get_last_addr(index) + 1
- mpu_region_get_base(index); - mpu_region_get_base(index);
@ -240,7 +240,7 @@ static inline u32_t mpu_region_get_size(u32_t index)
* Note: * Note:
* The caller must provide a valid region number. * The caller must provide a valid region number.
*/ */
static inline int is_enabled_region(u32_t index) static inline int is_enabled_region(uint32_t index)
{ {
MPU->RNR = index; MPU->RNR = index;
@ -272,8 +272,8 @@ static inline int is_enabled_region(u32_t index)
*/ */
static inline int mpu_buffer_validate(void *addr, size_t size, int write) static inline int mpu_buffer_validate(void *addr, size_t size, int write)
{ {
u32_t _addr = (u32_t)addr; uint32_t _addr = (uint32_t)addr;
u32_t _size = (u32_t)size; uint32_t _size = (uint32_t)size;
if (write) { if (write) {
if (arm_cmse_addr_range_readwrite_ok(_addr, _size, 1)) { if (arm_cmse_addr_range_readwrite_ok(_addr, _size, 1)) {
@ -290,8 +290,8 @@ static inline int mpu_buffer_validate(void *addr, size_t size, int write)
* Validation failure may be due to SAU/IDAU presence. * Validation failure may be due to SAU/IDAU presence.
* We re-check user accessibility based on MPU only. * We re-check user accessibility based on MPU only.
*/ */
s32_t r_index_base = arm_cmse_mpu_region_get(_addr); int32_t r_index_base = arm_cmse_mpu_region_get(_addr);
s32_t r_index_last = arm_cmse_mpu_region_get(_addr + _size - 1); int32_t r_index_last = arm_cmse_mpu_region_get(_addr + _size - 1);
if ((r_index_base != -EINVAL) && (r_index_base == r_index_last)) { if ((r_index_base != -EINVAL) && (r_index_base == r_index_last)) {
/* Valid MPU region, check permissions on base address only. */ /* Valid MPU region, check permissions on base address only. */
@ -312,15 +312,15 @@ static inline int mpu_buffer_validate(void *addr, size_t size, int write)
#endif /* CONFIG_USERSPACE */ #endif /* CONFIG_USERSPACE */
static int region_allocate_and_init(const u8_t index, static int region_allocate_and_init(const uint8_t index,
const struct arm_mpu_region *region_conf); const struct arm_mpu_region *region_conf);
static int mpu_configure_region(const u8_t index, static int mpu_configure_region(const uint8_t index,
const struct k_mem_partition *new_region); const struct k_mem_partition *new_region);
#if !defined(CONFIG_MPU_GAP_FILLING) #if !defined(CONFIG_MPU_GAP_FILLING)
static int mpu_configure_regions(const struct k_mem_partition static int mpu_configure_regions(const struct k_mem_partition
*regions[], u8_t regions_num, u8_t start_reg_index, *regions[], uint8_t regions_num, uint8_t start_reg_index,
bool do_sanity_check); bool do_sanity_check);
#endif #endif
@ -332,7 +332,7 @@ static int mpu_configure_regions(const struct k_mem_partition
* area, effectively, leaving no space in this area uncovered by MPU. * area, effectively, leaving no space in this area uncovered by MPU.
*/ */
static int mpu_configure_regions_and_partition(const struct k_mem_partition static int mpu_configure_regions_and_partition(const struct k_mem_partition
*regions[], u8_t regions_num, u8_t start_reg_index, *regions[], uint8_t regions_num, uint8_t start_reg_index,
bool do_sanity_check) bool do_sanity_check)
{ {
int i; int i;
@ -367,9 +367,9 @@ static int mpu_configure_regions_and_partition(const struct k_mem_partition
* The new memory region is to be placed inside the underlying * The new memory region is to be placed inside the underlying
* region, possibly splitting the underlying region into two. * region, possibly splitting the underlying region into two.
*/ */
u32_t u_reg_base = mpu_region_get_base(u_reg_index); uint32_t u_reg_base = mpu_region_get_base(u_reg_index);
u32_t u_reg_last = mpu_region_get_last_addr(u_reg_index); uint32_t u_reg_last = mpu_region_get_last_addr(u_reg_index);
u32_t reg_last = regions[i]->start + regions[i]->size - 1; uint32_t reg_last = regions[i]->start + regions[i]->size - 1;
if ((regions[i]->start == u_reg_base) && if ((regions[i]->start == u_reg_base) &&
(reg_last == u_reg_last)) { (reg_last == u_reg_last)) {
@ -468,9 +468,9 @@ static int mpu_configure_regions_and_partition(const struct k_mem_partition
* performed, the error signal is propagated to the caller of the function. * performed, the error signal is propagated to the caller of the function.
*/ */
static int mpu_configure_static_mpu_regions(const struct k_mem_partition static int mpu_configure_static_mpu_regions(const struct k_mem_partition
*static_regions[], const u8_t regions_num, *static_regions[], const uint8_t regions_num,
const u32_t background_area_base, const uint32_t background_area_base,
const u32_t background_area_end) const uint32_t background_area_end)
{ {
int mpu_reg_index = static_regions_num; int mpu_reg_index = static_regions_num;
@ -495,7 +495,7 @@ static int mpu_configure_static_mpu_regions(const struct k_mem_partition
*/ */
static int mpu_mark_areas_for_dynamic_regions( static int mpu_mark_areas_for_dynamic_regions(
const struct k_mem_partition dyn_region_areas[], const struct k_mem_partition dyn_region_areas[],
const u8_t dyn_region_areas_num) const uint8_t dyn_region_areas_num)
{ {
/* In ARMv8-M architecture we need to store the index values /* In ARMv8-M architecture we need to store the index values
* and the default configuration of the MPU regions, inside * and the default configuration of the MPU regions, inside
@ -539,7 +539,7 @@ static int mpu_mark_areas_for_dynamic_regions(
* performed, the error signal is propagated to the caller of the function. * performed, the error signal is propagated to the caller of the function.
*/ */
static int mpu_configure_dynamic_mpu_regions(const struct k_mem_partition static int mpu_configure_dynamic_mpu_regions(const struct k_mem_partition
*dynamic_regions[], u8_t regions_num) *dynamic_regions[], uint8_t regions_num)
{ {
int mpu_reg_index = static_regions_num; int mpu_reg_index = static_regions_num;

View file

@ -22,7 +22,7 @@ LOG_MODULE_DECLARE(mpu);
* have been reserved by the MPU driver to program the static (fixed) memory * have been reserved by the MPU driver to program the static (fixed) memory
* regions. * regions.
*/ */
static u8_t static_regions_num; static uint8_t static_regions_num;
/* Global MPU configuration at system initialization. */ /* Global MPU configuration at system initialization. */
static void mpu_init(void) static void mpu_init(void)
@ -34,7 +34,7 @@ static void mpu_init(void)
/** /**
* Get the number of supported MPU regions. * Get the number of supported MPU regions.
*/ */
static inline u8_t get_num_regions(void) static inline uint8_t get_num_regions(void)
{ {
return FSL_FEATURE_SYSMPU_DESCRIPTOR_COUNT; return FSL_FEATURE_SYSMPU_DESCRIPTOR_COUNT;
} }
@ -71,12 +71,12 @@ static int mpu_partition_is_valid(const struct k_mem_partition *part)
* Note: * Note:
* The caller must provide a valid region index. * The caller must provide a valid region index.
*/ */
static void region_init(const u32_t index, static void region_init(const uint32_t index,
const struct nxp_mpu_region *region_conf) const struct nxp_mpu_region *region_conf)
{ {
u32_t region_base = region_conf->base; uint32_t region_base = region_conf->base;
u32_t region_end = region_conf->end; uint32_t region_end = region_conf->end;
u32_t region_attr = region_conf->attr.attr; uint32_t region_attr = region_conf->attr.attr;
if (index == 0U) { if (index == 0U) {
/* The MPU does not allow writes from the core to affect the /* The MPU does not allow writes from the core to affect the
@ -88,11 +88,11 @@ static void region_init(const u32_t index,
*/ */
__ASSERT(region_base == SYSMPU->WORD[index][0], __ASSERT(region_base == SYSMPU->WORD[index][0],
"Region %d base address got 0x%08x expected 0x%08x", "Region %d base address got 0x%08x expected 0x%08x",
index, region_base, (u32_t)SYSMPU->WORD[index][0]); index, region_base, (uint32_t)SYSMPU->WORD[index][0]);
__ASSERT(region_end == SYSMPU->WORD[index][1], __ASSERT(region_end == SYSMPU->WORD[index][1],
"Region %d end address got 0x%08x expected 0x%08x", "Region %d end address got 0x%08x expected 0x%08x",
index, region_end, (u32_t)SYSMPU->WORD[index][1]); index, region_end, (uint32_t)SYSMPU->WORD[index][1]);
/* Changes to the RGD0_WORD2 alterable fields should be done /* Changes to the RGD0_WORD2 alterable fields should be done
* via a write to RGDAAC0. * via a write to RGDAAC0.
@ -107,14 +107,14 @@ static void region_init(const u32_t index,
} }
LOG_DBG("[%d] 0x%08x 0x%08x 0x%08x 0x%08x", index, LOG_DBG("[%d] 0x%08x 0x%08x 0x%08x 0x%08x", index,
(u32_t)SYSMPU->WORD[index][0], (uint32_t)SYSMPU->WORD[index][0],
(u32_t)SYSMPU->WORD[index][1], (uint32_t)SYSMPU->WORD[index][1],
(u32_t)SYSMPU->WORD[index][2], (uint32_t)SYSMPU->WORD[index][2],
(u32_t)SYSMPU->WORD[index][3]); (uint32_t)SYSMPU->WORD[index][3]);
} }
static int region_allocate_and_init(const u8_t index, static int region_allocate_and_init(const uint8_t index,
const struct nxp_mpu_region *region_conf) const struct nxp_mpu_region *region_conf)
{ {
/* Attempt to allocate new region index. */ /* Attempt to allocate new region index. */
@ -140,7 +140,7 @@ static int region_allocate_and_init(const u8_t index,
*/ */
static inline void get_region_attr_from_k_mem_partition_info( static inline void get_region_attr_from_k_mem_partition_info(
nxp_mpu_region_attr_t *p_attr, nxp_mpu_region_attr_t *p_attr,
const k_mem_partition_attr_t *attr, u32_t base, u32_t size) const k_mem_partition_attr_t *attr, uint32_t base, uint32_t size)
{ {
/* in NXP MPU the base address and size are not required /* in NXP MPU the base address and size are not required
* to determine region attributes * to determine region attributes
@ -154,7 +154,7 @@ static inline void get_region_attr_from_k_mem_partition_info(
/* This internal function programs an MPU region /* This internal function programs an MPU region
* of a given configuration at a given MPU index. * of a given configuration at a given MPU index.
*/ */
static int mpu_configure_region(const u8_t index, static int mpu_configure_region(const uint8_t index,
const struct k_mem_partition *new_region) const struct k_mem_partition *new_region)
{ {
struct nxp_mpu_region region_conf; struct nxp_mpu_region region_conf;
@ -174,7 +174,7 @@ static int mpu_configure_region(const u8_t index,
#if defined(CONFIG_MPU_STACK_GUARD) #if defined(CONFIG_MPU_STACK_GUARD)
/* This internal function partitions the SRAM MPU region */ /* This internal function partitions the SRAM MPU region */
static int mpu_sram_partitioning(u8_t index, static int mpu_sram_partitioning(uint8_t index,
const struct k_mem_partition *p_region) const struct k_mem_partition *p_region)
{ {
/* /*
@ -234,7 +234,7 @@ static int mpu_sram_partitioning(u8_t index,
* sanity check of the memory regions to be programmed. * sanity check of the memory regions to be programmed.
*/ */
static int mpu_configure_regions(const struct k_mem_partition static int mpu_configure_regions(const struct k_mem_partition
*regions[], u8_t regions_num, u8_t start_reg_index, *regions[], uint8_t regions_num, uint8_t start_reg_index,
bool do_sanity_check) bool do_sanity_check)
{ {
int i; int i;
@ -294,9 +294,9 @@ static int mpu_configure_regions(const struct k_mem_partition
* performed, the error signal is propagated to the caller of the function. * performed, the error signal is propagated to the caller of the function.
*/ */
static int mpu_configure_static_mpu_regions(const struct k_mem_partition static int mpu_configure_static_mpu_regions(const struct k_mem_partition
*static_regions[], const u8_t regions_num, *static_regions[], const uint8_t regions_num,
const u32_t background_area_base, const uint32_t background_area_base,
const u32_t background_area_end) const uint32_t background_area_end)
{ {
int mpu_reg_index = static_regions_num; int mpu_reg_index = static_regions_num;
@ -323,7 +323,7 @@ static int mpu_configure_static_mpu_regions(const struct k_mem_partition
* performed, the error signal is propagated to the caller of the function. * performed, the error signal is propagated to the caller of the function.
*/ */
static int mpu_configure_dynamic_mpu_regions(const struct k_mem_partition static int mpu_configure_dynamic_mpu_regions(const struct k_mem_partition
*dynamic_regions[], u8_t regions_num) *dynamic_regions[], uint8_t regions_num)
{ {
unsigned int key; unsigned int key;
@ -397,12 +397,12 @@ void arm_core_mpu_disable(void)
#if defined(CONFIG_USERSPACE) #if defined(CONFIG_USERSPACE)
static inline u32_t mpu_region_get_base(u32_t r_index) static inline uint32_t mpu_region_get_base(uint32_t r_index)
{ {
return SYSMPU->WORD[r_index][0]; return SYSMPU->WORD[r_index][0];
} }
static inline u32_t mpu_region_get_size(u32_t r_index) static inline uint32_t mpu_region_get_size(uint32_t r_index)
{ {
/* <END> + 1 - <BASE> */ /* <END> + 1 - <BASE> */
return (SYSMPU->WORD[r_index][1] + 1) - SYSMPU->WORD[r_index][0]; return (SYSMPU->WORD[r_index][1] + 1) - SYSMPU->WORD[r_index][0];
@ -414,7 +414,7 @@ static inline u32_t mpu_region_get_size(u32_t r_index)
* Note: * Note:
* The caller must provide a valid region number. * The caller must provide a valid region number.
*/ */
static inline int is_enabled_region(u32_t r_index) static inline int is_enabled_region(uint32_t r_index)
{ {
return SYSMPU->WORD[r_index][3] & SYSMPU_WORD_VLD_MASK; return SYSMPU->WORD[r_index][3] & SYSMPU_WORD_VLD_MASK;
} }
@ -425,11 +425,11 @@ static inline int is_enabled_region(u32_t r_index)
* Note: * Note:
* The caller must provide a valid region number. * The caller must provide a valid region number.
*/ */
static inline int is_in_region(u32_t r_index, u32_t start, u32_t size) static inline int is_in_region(uint32_t r_index, uint32_t start, uint32_t size)
{ {
u32_t r_addr_start; uint32_t r_addr_start;
u32_t r_addr_end; uint32_t r_addr_end;
u32_t end; uint32_t end;
r_addr_start = SYSMPU->WORD[r_index][0]; r_addr_start = SYSMPU->WORD[r_index][0];
r_addr_end = SYSMPU->WORD[r_index][1]; r_addr_end = SYSMPU->WORD[r_index][1];
@ -454,21 +454,21 @@ void arm_core_mpu_mem_partition_config_update(
k_mem_partition_attr_t *new_attr) k_mem_partition_attr_t *new_attr)
{ {
/* Find the partition. ASSERT if not found. */ /* Find the partition. ASSERT if not found. */
u8_t i; uint8_t i;
u8_t reg_index = get_num_regions(); uint8_t reg_index = get_num_regions();
for (i = static_regions_num; i < get_num_regions(); i++) { for (i = static_regions_num; i < get_num_regions(); i++) {
if (!is_enabled_region(i)) { if (!is_enabled_region(i)) {
continue; continue;
} }
u32_t base = mpu_region_get_base(i); uint32_t base = mpu_region_get_base(i);
if (base != partition->start) { if (base != partition->start) {
continue; continue;
} }
u32_t size = mpu_region_get_size(i); uint32_t size = mpu_region_get_size(i);
if (size != partition->size) { if (size != partition->size) {
continue; continue;
@ -501,9 +501,9 @@ int arm_core_mpu_get_max_available_dyn_regions(void)
* Note: * Note:
* The caller must provide a valid region number. * The caller must provide a valid region number.
*/ */
static inline int is_user_accessible_region(u32_t r_index, int write) static inline int is_user_accessible_region(uint32_t r_index, int write)
{ {
u32_t r_ap = SYSMPU->WORD[r_index][2]; uint32_t r_ap = SYSMPU->WORD[r_index][2];
if (write) { if (write) {
return (r_ap & MPU_REGION_WRITE) == MPU_REGION_WRITE; return (r_ap & MPU_REGION_WRITE) == MPU_REGION_WRITE;
@ -517,12 +517,12 @@ static inline int is_user_accessible_region(u32_t r_index, int write)
*/ */
int arm_core_mpu_buffer_validate(void *addr, size_t size, int write) int arm_core_mpu_buffer_validate(void *addr, size_t size, int write)
{ {
u8_t r_index; uint8_t r_index;
/* Iterate through all MPU regions */ /* Iterate through all MPU regions */
for (r_index = 0U; r_index < get_num_regions(); r_index++) { for (r_index = 0U; r_index < get_num_regions(); r_index++) {
if (!is_enabled_region(r_index) || if (!is_enabled_region(r_index) ||
!is_in_region(r_index, (u32_t)addr, size)) { !is_in_region(r_index, (uint32_t)addr, size)) {
continue; continue;
} }
@ -545,8 +545,8 @@ int arm_core_mpu_buffer_validate(void *addr, size_t size, int write)
* @brief configure fixed (static) MPU regions. * @brief configure fixed (static) MPU regions.
*/ */
void arm_core_mpu_configure_static_mpu_regions(const struct k_mem_partition void arm_core_mpu_configure_static_mpu_regions(const struct k_mem_partition
*static_regions[], const u8_t regions_num, *static_regions[], const uint8_t regions_num,
const u32_t background_area_start, const u32_t background_area_end) const uint32_t background_area_start, const uint32_t background_area_end)
{ {
if (mpu_configure_static_mpu_regions(static_regions, regions_num, if (mpu_configure_static_mpu_regions(static_regions, regions_num,
background_area_start, background_area_end) == -EINVAL) { background_area_start, background_area_end) == -EINVAL) {
@ -560,7 +560,7 @@ void arm_core_mpu_configure_static_mpu_regions(const struct k_mem_partition
* @brief configure dynamic MPU regions. * @brief configure dynamic MPU regions.
*/ */
void arm_core_mpu_configure_dynamic_mpu_regions(const struct k_mem_partition void arm_core_mpu_configure_dynamic_mpu_regions(const struct k_mem_partition
*dynamic_regions[], u8_t regions_num) *dynamic_regions[], uint8_t regions_num)
{ {
if (mpu_configure_dynamic_mpu_regions(dynamic_regions, regions_num) if (mpu_configure_dynamic_mpu_regions(dynamic_regions, regions_num)
== -EINVAL) { == -EINVAL) {
@ -582,7 +582,7 @@ static int nxp_mpu_init(struct device *arg)
{ {
ARG_UNUSED(arg); ARG_UNUSED(arg);
u32_t r_index; uint32_t r_index;
if (mpu_config.num_regions > get_num_regions()) { if (mpu_config.num_regions > get_num_regions()) {
/* Attempt to configure more MPU regions than /* Attempt to configure more MPU regions than

View file

@ -8,24 +8,24 @@
#include <aarch32/cortex_m/tz.h> #include <aarch32/cortex_m/tz.h>
#include <aarch32/cortex_m/exc.h> #include <aarch32/cortex_m/exc.h>
static void configure_nonsecure_vtor_offset(u32_t vtor_ns) static void configure_nonsecure_vtor_offset(uint32_t vtor_ns)
{ {
SCB_NS->VTOR = vtor_ns; SCB_NS->VTOR = vtor_ns;
} }
static void configure_nonsecure_msp(u32_t msp_ns) static void configure_nonsecure_msp(uint32_t msp_ns)
{ {
__TZ_set_MSP_NS(msp_ns); __TZ_set_MSP_NS(msp_ns);
} }
static void configure_nonsecure_psp(u32_t psp_ns) static void configure_nonsecure_psp(uint32_t psp_ns)
{ {
__TZ_set_PSP_NS(psp_ns); __TZ_set_PSP_NS(psp_ns);
} }
static void configure_nonsecure_control(u32_t spsel_ns, u32_t npriv_ns) static void configure_nonsecure_control(uint32_t spsel_ns, uint32_t npriv_ns)
{ {
u32_t control_ns = __TZ_get_CONTROL_NS(); uint32_t control_ns = __TZ_get_CONTROL_NS();
/* Only nPRIV and SPSEL bits are banked between security states. */ /* Only nPRIV and SPSEL bits are banked between security states. */
control_ns &= ~(CONTROL_SPSEL_Msk | CONTROL_nPRIV_Msk); control_ns &= ~(CONTROL_SPSEL_Msk | CONTROL_nPRIV_Msk);
@ -46,12 +46,12 @@ static void configure_nonsecure_control(u32_t spsel_ns, u32_t npriv_ns)
* Stack Pointer Limit registers. * Stack Pointer Limit registers.
*/ */
void tz_nonsecure_msplim_set(u32_t val) void tz_nonsecure_msplim_set(uint32_t val)
{ {
__TZ_set_MSPLIM_NS(val); __TZ_set_MSPLIM_NS(val);
} }
void tz_nonsecure_psplim_set(u32_t val) void tz_nonsecure_psplim_set(uint32_t val)
{ {
__TZ_set_PSPLIM_NS(val); __TZ_set_PSPLIM_NS(val);
} }
@ -71,7 +71,7 @@ void tz_nonsecure_state_setup(const tz_nonsecure_setup_conf_t *p_ns_conf)
void tz_nbanked_exception_target_state_set(int secure_state) void tz_nbanked_exception_target_state_set(int secure_state)
{ {
u32_t aircr_payload = SCB->AIRCR & (~(SCB_AIRCR_VECTKEY_Msk)); uint32_t aircr_payload = SCB->AIRCR & (~(SCB_AIRCR_VECTKEY_Msk));
if (secure_state) { if (secure_state) {
aircr_payload &= ~(SCB_AIRCR_BFHFNMINS_Msk); aircr_payload &= ~(SCB_AIRCR_BFHFNMINS_Msk);
} else { } else {
@ -84,7 +84,7 @@ void tz_nbanked_exception_target_state_set(int secure_state)
void tz_nonsecure_exception_prio_config(int secure_boost) void tz_nonsecure_exception_prio_config(int secure_boost)
{ {
u32_t aircr_payload = SCB->AIRCR & (~(SCB_AIRCR_VECTKEY_Msk)); uint32_t aircr_payload = SCB->AIRCR & (~(SCB_AIRCR_VECTKEY_Msk));
if (secure_boost) { if (secure_boost) {
aircr_payload |= SCB_AIRCR_PRIS_Msk; aircr_payload |= SCB_AIRCR_PRIS_Msk;
} else { } else {
@ -97,7 +97,7 @@ void tz_nonsecure_exception_prio_config(int secure_boost)
void tz_nonsecure_system_reset_req_block(int block) void tz_nonsecure_system_reset_req_block(int block)
{ {
u32_t aircr_payload = SCB->AIRCR & (~(SCB_AIRCR_VECTKEY_Msk)); uint32_t aircr_payload = SCB->AIRCR & (~(SCB_AIRCR_VECTKEY_Msk));
if (block) { if (block) {
aircr_payload |= SCB_AIRCR_SYSRESETREQS_Msk; aircr_payload |= SCB_AIRCR_SYSRESETREQS_Msk;
} else { } else {
@ -130,7 +130,7 @@ void tz_sau_configure(int enable, int allns)
} }
} }
u32_t tz_sau_number_of_regions_get(void) uint32_t tz_sau_number_of_regions_get(void)
{ {
return SAU->TYPE & SAU_TYPE_SREGION_Msk; return SAU->TYPE & SAU_TYPE_SREGION_Msk;
} }
@ -139,7 +139,7 @@ u32_t tz_sau_number_of_regions_get(void)
#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) #if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U)
int tz_sau_region_configure_enable(tz_sau_conf_t *p_sau_conf) int tz_sau_region_configure_enable(tz_sau_conf_t *p_sau_conf)
{ {
u32_t regions = tz_sau_number_of_regions_get(); uint32_t regions = tz_sau_number_of_regions_get();
if ((p_sau_conf->region_num == 0) || if ((p_sau_conf->region_num == 0) ||
(p_sau_conf->region_num > (regions - 1))) { (p_sau_conf->region_num > (regions - 1))) {

View file

@ -27,10 +27,10 @@ static void esf_dump(const z_arch_esf_t *esf)
for (int i = 0; i < 16; i += 4) { for (int i = 0; i < 16; i += 4) {
LOG_ERR("s[%2d]: 0x%08x s[%2d]: 0x%08x" LOG_ERR("s[%2d]: 0x%08x s[%2d]: 0x%08x"
" s[%2d]: 0x%08x s[%2d]: 0x%08x", " s[%2d]: 0x%08x s[%2d]: 0x%08x",
i, (u32_t)esf->s[i], i, (uint32_t)esf->s[i],
i + 1, (u32_t)esf->s[i + 1], i + 1, (uint32_t)esf->s[i + 1],
i + 2, (u32_t)esf->s[i + 2], i + 2, (uint32_t)esf->s[i + 2],
i + 3, (u32_t)esf->s[i + 3]); i + 3, (uint32_t)esf->s[i + 3]);
} }
LOG_ERR("fpscr: 0x%08x", esf->fpscr); LOG_ERR("fpscr: 0x%08x", esf->fpscr);
#endif #endif
@ -88,7 +88,7 @@ void z_do_kernel_oops(const z_arch_esf_t *esf)
FUNC_NORETURN void arch_syscall_oops(void *ssf_ptr) FUNC_NORETURN void arch_syscall_oops(void *ssf_ptr)
{ {
u32_t *ssf_contents = ssf_ptr; uint32_t *ssf_contents = ssf_ptr;
z_arch_esf_t oops_esf = { 0 }; z_arch_esf_t oops_esf = { 0 };
/* TODO: Copy the rest of the register set out of ssf_ptr */ /* TODO: Copy the rest of the register set out of ssf_ptr */

View file

@ -61,7 +61,7 @@ int arch_irq_is_enabled(unsigned int irq)
* *
* @return N/A * @return N/A
*/ */
void z_arm_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags) void z_arm_irq_priority_set(unsigned int irq, unsigned int prio, uint32_t flags)
{ {
/* The kernel may reserve some of the highest priority levels. /* The kernel may reserve some of the highest priority levels.
* So we offset the requested priority level with the number * So we offset the requested priority level with the number
@ -135,7 +135,7 @@ int arch_irq_is_enabled(unsigned int irq)
* *
* @return N/A * @return N/A
*/ */
void z_arm_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags) void z_arm_irq_priority_set(unsigned int irq, unsigned int prio, uint32_t flags)
{ {
arm_gic_irq_set_priority(irq, prio, flags); arm_gic_irq_set_priority(irq, prio, flags);
} }
@ -181,7 +181,7 @@ void _arch_isr_direct_pm(void)
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */ #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
if (_kernel.idle) { if (_kernel.idle) {
s32_t idle_val = _kernel.idle; int32_t idle_val = _kernel.idle;
_kernel.idle = 0; _kernel.idle = 0;
z_sys_power_save_idle_exit(idle_val); z_sys_power_save_idle_exit(idle_val);
@ -258,7 +258,7 @@ int irq_target_state_is_secure(unsigned int irq)
#ifdef CONFIG_DYNAMIC_INTERRUPTS #ifdef CONFIG_DYNAMIC_INTERRUPTS
int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority, int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
void (*routine)(void *parameter), void *parameter, void (*routine)(void *parameter), void *parameter,
u32_t flags) uint32_t flags)
{ {
z_isr_install(irq, routine, parameter); z_isr_install(irq, routine, parameter);
z_arm_irq_priority_set(irq, priority, flags); z_arm_irq_priority_set(irq, priority, flags);
@ -268,7 +268,7 @@ int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
#ifdef CONFIG_DYNAMIC_DIRECT_INTERRUPTS #ifdef CONFIG_DYNAMIC_DIRECT_INTERRUPTS
static inline void z_arm_irq_dynamic_direct_isr_dispatch(void) static inline void z_arm_irq_dynamic_direct_isr_dispatch(void)
{ {
u32_t irq = __get_IPSR() - 16; uint32_t irq = __get_IPSR() - 16;
if (irq < IRQ_TABLE_SIZE) { if (irq < IRQ_TABLE_SIZE) {
struct _isr_table_entry *isr_entry = &_sw_isr_table[irq]; struct _isr_table_entry *isr_entry = &_sw_isr_table[irq];

View file

@ -36,7 +36,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
char *pStackMem = Z_THREAD_STACK_BUFFER(stack); char *pStackMem = Z_THREAD_STACK_BUFFER(stack);
char *stackEnd; char *stackEnd;
/* Offset between the top of stack and the high end of stack area. */ /* Offset between the top of stack and the high end of stack area. */
u32_t top_of_stack_offset = 0U; uint32_t top_of_stack_offset = 0U;
#if defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT) \ #if defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT) \
&& defined(CONFIG_USERSPACE) && defined(CONFIG_USERSPACE)
@ -58,14 +58,14 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
#ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA #ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
/* Reserve space on top of stack for local data. */ /* Reserve space on top of stack for local data. */
u32_t p_local_data = Z_STACK_PTR_ALIGN(pStackMem + stackSize uint32_t p_local_data = Z_STACK_PTR_ALIGN(pStackMem + stackSize
- sizeof(*thread->userspace_local_data)); - sizeof(*thread->userspace_local_data));
thread->userspace_local_data = thread->userspace_local_data =
(struct _thread_userspace_local_data *)(p_local_data); (struct _thread_userspace_local_data *)(p_local_data);
/* Top of actual stack must be moved below the user local data. */ /* Top of actual stack must be moved below the user local data. */
top_of_stack_offset = (u32_t) top_of_stack_offset = (uint32_t)
(pStackMem + stackSize - ((char *)p_local_data)); (pStackMem + stackSize - ((char *)p_local_data));
#endif /* CONFIG_THREAD_USERSPACE_LOCAL_DATA */ #endif /* CONFIG_THREAD_USERSPACE_LOCAL_DATA */
@ -105,12 +105,12 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
#if defined(CONFIG_USERSPACE) #if defined(CONFIG_USERSPACE)
if ((options & K_USER) != 0) { if ((options & K_USER) != 0) {
pInitCtx->basic.pc = (u32_t)arch_user_mode_enter; pInitCtx->basic.pc = (uint32_t)arch_user_mode_enter;
} else { } else {
pInitCtx->basic.pc = (u32_t)z_thread_entry; pInitCtx->basic.pc = (uint32_t)z_thread_entry;
} }
#else #else
pInitCtx->basic.pc = (u32_t)z_thread_entry; pInitCtx->basic.pc = (uint32_t)z_thread_entry;
#endif #endif
#if defined(CONFIG_CPU_CORTEX_M) #if defined(CONFIG_CPU_CORTEX_M)
@ -118,10 +118,10 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
pInitCtx->basic.pc &= 0xfffffffe; pInitCtx->basic.pc &= 0xfffffffe;
#endif #endif
pInitCtx->basic.a1 = (u32_t)pEntry; pInitCtx->basic.a1 = (uint32_t)pEntry;
pInitCtx->basic.a2 = (u32_t)parameter1; pInitCtx->basic.a2 = (uint32_t)parameter1;
pInitCtx->basic.a3 = (u32_t)parameter2; pInitCtx->basic.a3 = (uint32_t)parameter2;
pInitCtx->basic.a4 = (u32_t)parameter3; pInitCtx->basic.a4 = (uint32_t)parameter3;
#if defined(CONFIG_CPU_CORTEX_M) #if defined(CONFIG_CPU_CORTEX_M)
pInitCtx->basic.xpsr = pInitCtx->basic.xpsr =
@ -133,7 +133,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
#endif /* CONFIG_COMPILER_ISA_THUMB2 */ #endif /* CONFIG_COMPILER_ISA_THUMB2 */
#endif /* CONFIG_CPU_CORTEX_M */ #endif /* CONFIG_CPU_CORTEX_M */
thread->callee_saved.psp = (u32_t)pInitCtx; thread->callee_saved.psp = (uint32_t)pInitCtx;
thread->arch.basepri = 0; thread->arch.basepri = 0;
@ -160,7 +160,7 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
/* Set up privileged stack before entering user mode */ /* Set up privileged stack before entering user mode */
_current->arch.priv_stack_start = _current->arch.priv_stack_start =
(u32_t)z_priv_stack_find(_current->stack_obj); (uint32_t)z_priv_stack_find(_current->stack_obj);
#if defined(CONFIG_MPU_STACK_GUARD) #if defined(CONFIG_MPU_STACK_GUARD)
/* Stack guard area reserved at the bottom of the thread's /* Stack guard area reserved at the bottom of the thread's
* privileged stack. Adjust the available (writable) stack * privileged stack. Adjust the available (writable) stack
@ -176,7 +176,7 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
#endif /* CONFIG_MPU_STACK_GUARD */ #endif /* CONFIG_MPU_STACK_GUARD */
z_arm_userspace_enter(user_entry, p1, p2, p3, z_arm_userspace_enter(user_entry, p1, p2, p3,
(u32_t)_current->stack_info.start, (uint32_t)_current->stack_info.start,
_current->stack_info.size); _current->stack_info.size);
CODE_UNREACHABLE; CODE_UNREACHABLE;
} }
@ -211,17 +211,17 @@ void configure_builtin_stack_guard(struct k_thread *thread)
* thread privileged stack being allocated in higher memory area * thread privileged stack being allocated in higher memory area
* than the default thread stack (ensured by design). * than the default thread stack (ensured by design).
*/ */
u32_t guard_start = uint32_t guard_start =
((thread->arch.priv_stack_start) && ((thread->arch.priv_stack_start) &&
(__get_PSP() >= thread->arch.priv_stack_start)) ? (__get_PSP() >= thread->arch.priv_stack_start)) ?
(u32_t)thread->arch.priv_stack_start : (uint32_t)thread->arch.priv_stack_start :
(u32_t)thread->stack_obj; (uint32_t)thread->stack_obj;
__ASSERT(thread->stack_info.start == ((u32_t)thread->stack_obj), __ASSERT(thread->stack_info.start == ((uint32_t)thread->stack_obj),
"stack_info.start does not point to the start of the" "stack_info.start does not point to the start of the"
"thread allocated area."); "thread allocated area.");
#else #else
u32_t guard_start = thread->stack_info.start; uint32_t guard_start = thread->stack_info.start;
#endif #endif
#if defined(CONFIG_CPU_CORTEX_M_HAS_SPLIM) #if defined(CONFIG_CPU_CORTEX_M_HAS_SPLIM)
__set_PSPLIM(guard_start); __set_PSPLIM(guard_start);
@ -277,7 +277,7 @@ void configure_builtin_stack_guard(struct k_thread *thread)
* @return The lowest allowed stack frame pointer, if error is a * @return The lowest allowed stack frame pointer, if error is a
* thread stack corruption, otherwise return 0. * thread stack corruption, otherwise return 0.
*/ */
u32_t z_check_thread_stack_fail(const u32_t fault_addr, const u32_t psp) uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp)
{ {
const struct k_thread *thread = _current; const struct k_thread *thread = _current;
@ -286,10 +286,10 @@ u32_t z_check_thread_stack_fail(const u32_t fault_addr, const u32_t psp)
} }
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
u32_t guard_len = (thread->base.user_options & K_FP_REGS) ? uint32_t guard_len = (thread->base.user_options & K_FP_REGS) ?
MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE; MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE;
#else #else
u32_t guard_len = MPU_GUARD_ALIGN_AND_SIZE; uint32_t guard_len = MPU_GUARD_ALIGN_AND_SIZE;
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */ #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
#if defined(CONFIG_USERSPACE) #if defined(CONFIG_USERSPACE)
@ -305,9 +305,9 @@ u32_t z_check_thread_stack_fail(const u32_t fault_addr, const u32_t psp)
return thread->arch.priv_stack_start; return thread->arch.priv_stack_start;
} }
} else { } else {
if (psp < (u32_t)thread->stack_obj) { if (psp < (uint32_t)thread->stack_obj) {
/* Thread's user stack corruption */ /* Thread's user stack corruption */
return (u32_t)thread->stack_obj; return (uint32_t)thread->stack_obj;
} }
} }
} else { } else {
@ -421,7 +421,7 @@ void arch_switch_to_main_thread(struct k_thread *main_thread,
#if defined(CONFIG_BUILTIN_STACK_GUARD) #if defined(CONFIG_BUILTIN_STACK_GUARD)
/* Set PSPLIM register for built-in stack guarding of main thread. */ /* Set PSPLIM register for built-in stack guarding of main thread. */
#if defined(CONFIG_CPU_CORTEX_M_HAS_SPLIM) #if defined(CONFIG_CPU_CORTEX_M_HAS_SPLIM)
__set_PSPLIM((u32_t)main_stack); __set_PSPLIM((uint32_t)main_stack);
#else #else
#error "Built-in PSP limit checks not supported by HW" #error "Built-in PSP limit checks not supported by HW"
#endif #endif

View file

@ -90,19 +90,19 @@
((level) == 2) ? L2_SPACE : L3_SPACE) ((level) == 2) ? L2_SPACE : L3_SPACE)
#endif #endif
static u64_t base_xlat_table[NUM_BASE_LEVEL_ENTRIES] static uint64_t base_xlat_table[NUM_BASE_LEVEL_ENTRIES]
__aligned(NUM_BASE_LEVEL_ENTRIES * sizeof(u64_t)); __aligned(NUM_BASE_LEVEL_ENTRIES * sizeof(uint64_t));
static u64_t xlat_tables[CONFIG_MAX_XLAT_TABLES][XLAT_TABLE_ENTRIES] static uint64_t xlat_tables[CONFIG_MAX_XLAT_TABLES][XLAT_TABLE_ENTRIES]
__aligned(XLAT_TABLE_ENTRIES * sizeof(u64_t)); __aligned(XLAT_TABLE_ENTRIES * sizeof(uint64_t));
/* Translation table control register settings */ /* Translation table control register settings */
static u64_t get_tcr(int el) static uint64_t get_tcr(int el)
{ {
u64_t tcr; uint64_t tcr;
u64_t pa_bits = CONFIG_ARM64_PA_BITS; uint64_t pa_bits = CONFIG_ARM64_PA_BITS;
u64_t va_bits = CONFIG_ARM64_VA_BITS; uint64_t va_bits = CONFIG_ARM64_VA_BITS;
u64_t tcr_ps_bits; uint64_t tcr_ps_bits;
switch (pa_bits) { switch (pa_bits) {
case 48: case 48:
@ -145,20 +145,20 @@ static u64_t get_tcr(int el)
return tcr; return tcr;
} }
static int pte_desc_type(u64_t *pte) static int pte_desc_type(uint64_t *pte)
{ {
return *pte & PTE_DESC_TYPE_MASK; return *pte & PTE_DESC_TYPE_MASK;
} }
static u64_t *calculate_pte_index(u64_t addr, int level) static uint64_t *calculate_pte_index(uint64_t addr, int level)
{ {
int base_level = XLAT_TABLE_BASE_LEVEL; int base_level = XLAT_TABLE_BASE_LEVEL;
u64_t *pte; uint64_t *pte;
u64_t idx; uint64_t idx;
unsigned int i; unsigned int i;
/* Walk through all translation tables to find pte index */ /* Walk through all translation tables to find pte index */
pte = (u64_t *)base_xlat_table; pte = (uint64_t *)base_xlat_table;
for (i = base_level; i <= XLAT_TABLE_LEVEL_MAX; i++) { for (i = base_level; i <= XLAT_TABLE_LEVEL_MAX; i++) {
idx = XLAT_TABLE_VA_IDX(addr, i); idx = XLAT_TABLE_VA_IDX(addr, i);
pte += idx; pte += idx;
@ -170,26 +170,26 @@ static u64_t *calculate_pte_index(u64_t addr, int level)
if (pte_desc_type(pte) != PTE_TABLE_DESC) if (pte_desc_type(pte) != PTE_TABLE_DESC)
return NULL; return NULL;
/* Move to the next translation table level */ /* Move to the next translation table level */
pte = (u64_t *)(*pte & 0x0000fffffffff000ULL); pte = (uint64_t *)(*pte & 0x0000fffffffff000ULL);
} }
return NULL; return NULL;
} }
static void set_pte_table_desc(u64_t *pte, u64_t *table, unsigned int level) static void set_pte_table_desc(uint64_t *pte, uint64_t *table, unsigned int level)
{ {
#if DUMP_PTE #if DUMP_PTE
MMU_DEBUG("%s", XLAT_TABLE_LEVEL_SPACE(level)); MMU_DEBUG("%s", XLAT_TABLE_LEVEL_SPACE(level));
MMU_DEBUG("%p: [Table] %p\n", pte, table); MMU_DEBUG("%p: [Table] %p\n", pte, table);
#endif #endif
/* Point pte to new table */ /* Point pte to new table */
*pte = PTE_TABLE_DESC | (u64_t)table; *pte = PTE_TABLE_DESC | (uint64_t)table;
} }
static void set_pte_block_desc(u64_t *pte, u64_t addr_pa, static void set_pte_block_desc(uint64_t *pte, uint64_t addr_pa,
unsigned int attrs, unsigned int level) unsigned int attrs, unsigned int level)
{ {
u64_t desc = addr_pa; uint64_t desc = addr_pa;
unsigned int mem_type; unsigned int mem_type;
desc |= (level == 3) ? PTE_PAGE_DESC : PTE_BLOCK_DESC; desc |= (level == 3) ? PTE_PAGE_DESC : PTE_BLOCK_DESC;
@ -247,21 +247,21 @@ static void set_pte_block_desc(u64_t *pte, u64_t addr_pa,
} }
/* Returns a new reallocated table */ /* Returns a new reallocated table */
static u64_t *new_prealloc_table(void) static uint64_t *new_prealloc_table(void)
{ {
static unsigned int table_idx; static unsigned int table_idx;
__ASSERT(table_idx < CONFIG_MAX_XLAT_TABLES, __ASSERT(table_idx < CONFIG_MAX_XLAT_TABLES,
"Enough xlat tables not allocated"); "Enough xlat tables not allocated");
return (u64_t *)(xlat_tables[table_idx++]); return (uint64_t *)(xlat_tables[table_idx++]);
} }
/* Splits a block into table with entries spanning the old block */ /* Splits a block into table with entries spanning the old block */
static void split_pte_block_desc(u64_t *pte, int level) static void split_pte_block_desc(uint64_t *pte, int level)
{ {
u64_t old_block_desc = *pte; uint64_t old_block_desc = *pte;
u64_t *new_table; uint64_t *new_table;
unsigned int i = 0; unsigned int i = 0;
/* get address size shift bits for next level */ /* get address size shift bits for next level */
int levelshift = LEVEL_TO_VA_SIZE_SHIFT(level + 1); int levelshift = LEVEL_TO_VA_SIZE_SHIFT(level + 1);
@ -284,13 +284,13 @@ static void split_pte_block_desc(u64_t *pte, int level)
/* Create/Populate translation table(s) for given region */ /* Create/Populate translation table(s) for given region */
static void init_xlat_tables(const struct arm_mmu_region *region) static void init_xlat_tables(const struct arm_mmu_region *region)
{ {
u64_t *pte; uint64_t *pte;
u64_t virt = region->base_va; uint64_t virt = region->base_va;
u64_t phys = region->base_pa; uint64_t phys = region->base_pa;
u64_t size = region->size; uint64_t size = region->size;
u64_t attrs = region->attrs; uint64_t attrs = region->attrs;
u64_t level_size; uint64_t level_size;
u64_t *new_table; uint64_t *new_table;
unsigned int level = XLAT_TABLE_BASE_LEVEL; unsigned int level = XLAT_TABLE_BASE_LEVEL;
MMU_DEBUG("mmap: virt %llx phys %llx size %llx\n", virt, phys, size); MMU_DEBUG("mmap: virt %llx phys %llx size %llx\n", virt, phys, size);
@ -361,7 +361,7 @@ static void setup_page_tables(void)
{ {
unsigned int index; unsigned int index;
const struct arm_mmu_region *region; const struct arm_mmu_region *region;
u64_t max_va = 0, max_pa = 0; uint64_t max_va = 0, max_pa = 0;
for (index = 0; index < mmu_config.num_regions; index++) { for (index = 0; index < mmu_config.num_regions; index++) {
region = &mmu_config.mmu_regions[index]; region = &mmu_config.mmu_regions[index];
@ -392,7 +392,7 @@ static void setup_page_tables(void)
static void enable_mmu_el1(unsigned int flags) static void enable_mmu_el1(unsigned int flags)
{ {
ARG_UNUSED(flags); ARG_UNUSED(flags);
u64_t val; uint64_t val;
/* Set MAIR, TCR and TBBR registers */ /* Set MAIR, TCR and TBBR registers */
__asm__ volatile("msr mair_el1, %0" __asm__ volatile("msr mair_el1, %0"
@ -405,7 +405,7 @@ static void enable_mmu_el1(unsigned int flags)
: "memory", "cc"); : "memory", "cc");
__asm__ volatile("msr ttbr0_el1, %0" __asm__ volatile("msr ttbr0_el1, %0"
: :
: "r" ((u64_t)base_xlat_table) : "r" ((uint64_t)base_xlat_table)
: "memory", "cc"); : "memory", "cc");
/* Ensure these changes are seen before MMU is enabled */ /* Ensure these changes are seen before MMU is enabled */
@ -434,7 +434,7 @@ static void enable_mmu_el1(unsigned int flags)
*/ */
static int arm_mmu_init(struct device *arg) static int arm_mmu_init(struct device *arg)
{ {
u64_t val; uint64_t val;
unsigned int idx, flags = 0; unsigned int idx, flags = 0;
/* Current MMU code supports only EL1 */ /* Current MMU code supports only EL1 */
@ -449,9 +449,9 @@ static int arm_mmu_init(struct device *arg)
MMU_DEBUG("xlat tables:\n"); MMU_DEBUG("xlat tables:\n");
MMU_DEBUG("base table(L%d): %p, %d entries\n", XLAT_TABLE_BASE_LEVEL, MMU_DEBUG("base table(L%d): %p, %d entries\n", XLAT_TABLE_BASE_LEVEL,
(u64_t *)base_xlat_table, NUM_BASE_LEVEL_ENTRIES); (uint64_t *)base_xlat_table, NUM_BASE_LEVEL_ENTRIES);
for (idx = 0; idx < CONFIG_MAX_XLAT_TABLES; idx++) for (idx = 0; idx < CONFIG_MAX_XLAT_TABLES; idx++)
MMU_DEBUG("%d: %p\n", idx, (u64_t *)(xlat_tables + idx)); MMU_DEBUG("%d: %p\n", idx, (uint64_t *)(xlat_tables + idx));
setup_page_tables(); setup_page_tables();

View file

@ -17,9 +17,9 @@
LOG_MODULE_DECLARE(os); LOG_MODULE_DECLARE(os);
static void print_EC_cause(u64_t esr) static void print_EC_cause(uint64_t esr)
{ {
u32_t EC = (u32_t)esr >> 26; uint32_t EC = (uint32_t)esr >> 26;
switch (EC) { switch (EC) {
case 0b000000: case 0b000000:
@ -157,7 +157,7 @@ static void esf_dump(const z_arch_esf_t *esf)
void z_arm64_fatal_error(unsigned int reason, const z_arch_esf_t *esf) void z_arm64_fatal_error(unsigned int reason, const z_arch_esf_t *esf)
{ {
u64_t el, esr, elr, far; uint64_t el, esr, elr, far;
if (reason != K_ERR_SPURIOUS_IRQ) { if (reason != K_ERR_SPURIOUS_IRQ) {
__asm__ volatile("mrs %0, CurrentEL" : "=r" (el)); __asm__ volatile("mrs %0, CurrentEL" : "=r" (el));

View file

@ -47,7 +47,7 @@ int arch_irq_is_enabled(unsigned int irq)
return arm_gic_irq_is_enabled(irq); return arm_gic_irq_is_enabled(irq);
} }
void z_arm64_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags) void z_arm64_irq_priority_set(unsigned int irq, unsigned int prio, uint32_t flags)
{ {
arm_gic_irq_set_priority(irq, prio, flags); arm_gic_irq_set_priority(irq, prio, flags);
} }
@ -56,7 +56,7 @@ void z_arm64_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags)
#ifdef CONFIG_DYNAMIC_INTERRUPTS #ifdef CONFIG_DYNAMIC_INTERRUPTS
int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority, int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
void (*routine)(void *parameter), void *parameter, void (*routine)(void *parameter), void *parameter,
u32_t flags) uint32_t flags)
{ {
z_isr_install(irq, routine, parameter); z_isr_install(irq, routine, parameter);
z_arm64_irq_priority_set(irq, priority, flags); z_arm64_irq_priority_set(irq, priority, flags);

View file

@ -45,17 +45,17 @@ struct init_stack_frame {
/* top of the stack / most recently pushed */ /* top of the stack / most recently pushed */
/* SPSL_ELn and ELR_ELn */ /* SPSL_ELn and ELR_ELn */
u64_t spsr; uint64_t spsr;
u64_t elr; uint64_t elr;
/* /*
* Used by z_thread_entry_wrapper. pulls these off the stack and * Used by z_thread_entry_wrapper. pulls these off the stack and
* into argument registers before calling z_thread_entry() * into argument registers before calling z_thread_entry()
*/ */
u64_t entry_point; uint64_t entry_point;
u64_t arg1; uint64_t arg1;
u64_t arg2; uint64_t arg2;
u64_t arg3; uint64_t arg3;
/* least recently pushed */ /* least recently pushed */
}; };
@ -76,10 +76,10 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
pInitCtx = (struct init_stack_frame *)(Z_STACK_PTR_ALIGN(stackEnd - pInitCtx = (struct init_stack_frame *)(Z_STACK_PTR_ALIGN(stackEnd -
sizeof(struct init_stack_frame))); sizeof(struct init_stack_frame)));
pInitCtx->entry_point = (u64_t)pEntry; pInitCtx->entry_point = (uint64_t)pEntry;
pInitCtx->arg1 = (u64_t)parameter1; pInitCtx->arg1 = (uint64_t)parameter1;
pInitCtx->arg2 = (u64_t)parameter2; pInitCtx->arg2 = (uint64_t)parameter2;
pInitCtx->arg3 = (u64_t)parameter3; pInitCtx->arg3 = (uint64_t)parameter3;
/* /*
* - ELR_ELn: to be used by eret in z_thread_entry_wrapper() to return * - ELR_ELn: to be used by eret in z_thread_entry_wrapper() to return
@ -88,7 +88,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
* - SPSR_ELn: to enable IRQs (we are masking debug exceptions, SError * - SPSR_ELn: to enable IRQs (we are masking debug exceptions, SError
* interrupts and FIQs). * interrupts and FIQs).
*/ */
pInitCtx->elr = (u64_t)z_thread_entry; pInitCtx->elr = (uint64_t)z_thread_entry;
pInitCtx->spsr = SPSR_MODE_EL1H | DAIF_FIQ; pInitCtx->spsr = SPSR_MODE_EL1H | DAIF_FIQ;
/* /*
@ -100,6 +100,6 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
* task is first scheduled. * task is first scheduled.
*/ */
thread->callee_saved.sp = (u64_t)pInitCtx; thread->callee_saved.sp = (uint64_t)pInitCtx;
thread->callee_saved.x30 = (u64_t)z_thread_entry_wrapper; thread->callee_saved.x30 = (uint64_t)z_thread_entry_wrapper;
} }

View file

@ -50,7 +50,7 @@ extern "C" {
* *
* @return a valid MPU region number or -EINVAL * @return a valid MPU region number or -EINVAL
*/ */
int arm_cmse_mpu_region_get(u32_t addr); int arm_cmse_mpu_region_get(uint32_t addr);
/** /**
* @brief Read accessibility of an address * @brief Read accessibility of an address
@ -69,7 +69,7 @@ int arm_cmse_mpu_region_get(u32_t addr);
* *
* @return 1 if address is readable, 0 otherwise. * @return 1 if address is readable, 0 otherwise.
*/ */
int arm_cmse_addr_read_ok(u32_t addr, int force_npriv); int arm_cmse_addr_read_ok(uint32_t addr, int force_npriv);
/** /**
* @brief Read and Write accessibility of an address * @brief Read and Write accessibility of an address
@ -89,7 +89,7 @@ int arm_cmse_addr_read_ok(u32_t addr, int force_npriv);
* *
* @return 1 if address is Read and Writable, 0 otherwise. * @return 1 if address is Read and Writable, 0 otherwise.
*/ */
int arm_cmse_addr_readwrite_ok(u32_t addr, int force_npriv); int arm_cmse_addr_readwrite_ok(uint32_t addr, int force_npriv);
/** /**
* @brief Read accessibility of an address range * @brief Read accessibility of an address range
@ -111,7 +111,7 @@ int arm_cmse_addr_readwrite_ok(u32_t addr, int force_npriv);
* *
* @return 1 if address range is readable, 0 otherwise. * @return 1 if address range is readable, 0 otherwise.
*/ */
int arm_cmse_addr_range_read_ok(u32_t addr, u32_t size, int force_npriv); int arm_cmse_addr_range_read_ok(uint32_t addr, uint32_t size, int force_npriv);
/** /**
* @brief Read and Write accessibility of an address range * @brief Read and Write accessibility of an address range
@ -133,7 +133,7 @@ int arm_cmse_addr_range_read_ok(u32_t addr, u32_t size, int force_npriv);
* *
* @return 1 if address range is Read and Writable, 0 otherwise. * @return 1 if address range is Read and Writable, 0 otherwise.
*/ */
int arm_cmse_addr_range_readwrite_ok(u32_t addr, u32_t size, int force_npriv); int arm_cmse_addr_range_readwrite_ok(uint32_t addr, uint32_t size, int force_npriv);
/* Required for C99 compilation (required for GCC-8.x version, /* Required for C99 compilation (required for GCC-8.x version,
* where typeof is used instead of __typeof__) * where typeof is used instead of __typeof__)
@ -232,7 +232,7 @@ int arm_cmse_addr_range_readwrite_ok(u32_t addr, u32_t size, int force_npriv);
* *
* @return a valid MPU region number or -EINVAL * @return a valid MPU region number or -EINVAL
*/ */
int arm_cmse_mpu_nonsecure_region_get(u32_t addr); int arm_cmse_mpu_nonsecure_region_get(uint32_t addr);
/** /**
* @brief Get the SAU region number of an address * @brief Get the SAU region number of an address
@ -250,7 +250,7 @@ int arm_cmse_mpu_nonsecure_region_get(u32_t addr);
* *
* @return a valid SAU region number or -EINVAL * @return a valid SAU region number or -EINVAL
*/ */
int arm_cmse_sau_region_get(u32_t addr); int arm_cmse_sau_region_get(uint32_t addr);
/** /**
* @brief Get the IDAU region number of an address * @brief Get the IDAU region number of an address
@ -268,7 +268,7 @@ int arm_cmse_sau_region_get(u32_t addr);
* *
* @return a valid IDAU region number or -EINVAL * @return a valid IDAU region number or -EINVAL
*/ */
int arm_cmse_idau_region_get(u32_t addr); int arm_cmse_idau_region_get(uint32_t addr);
/** /**
* @brief Security attribution of an address * @brief Security attribution of an address
@ -280,7 +280,7 @@ int arm_cmse_idau_region_get(u32_t addr);
* *
* @return 1 if address is Secure, 0 otherwise. * @return 1 if address is Secure, 0 otherwise.
*/ */
int arm_cmse_addr_is_secure(u32_t addr); int arm_cmse_addr_is_secure(uint32_t addr);
/** /**
* @brief Non-Secure Read accessibility of an address * @brief Non-Secure Read accessibility of an address
@ -300,7 +300,7 @@ int arm_cmse_addr_is_secure(u32_t addr);
* *
* @return 1 if address is readable from Non-Secure state, 0 otherwise. * @return 1 if address is readable from Non-Secure state, 0 otherwise.
*/ */
int arm_cmse_addr_nonsecure_read_ok(u32_t addr, int force_npriv); int arm_cmse_addr_nonsecure_read_ok(uint32_t addr, int force_npriv);
/** /**
* @brief Non-Secure Read and Write accessibility of an address * @brief Non-Secure Read and Write accessibility of an address
@ -320,7 +320,7 @@ int arm_cmse_addr_nonsecure_read_ok(u32_t addr, int force_npriv);
* *
* @return 1 if address is Read and Writable from Non-Secure state, 0 otherwise * @return 1 if address is Read and Writable from Non-Secure state, 0 otherwise
*/ */
int arm_cmse_addr_nonsecure_readwrite_ok(u32_t addr, int force_npriv); int arm_cmse_addr_nonsecure_readwrite_ok(uint32_t addr, int force_npriv);
/** /**
* @brief Non-Secure Read accessibility of an address range * @brief Non-Secure Read accessibility of an address range
@ -342,7 +342,7 @@ int arm_cmse_addr_nonsecure_readwrite_ok(u32_t addr, int force_npriv);
* *
* @return 1 if address range is readable, 0 otherwise. * @return 1 if address range is readable, 0 otherwise.
*/ */
int arm_cmse_addr_range_nonsecure_read_ok(u32_t addr, u32_t size, int arm_cmse_addr_range_nonsecure_read_ok(uint32_t addr, uint32_t size,
int force_npriv); int force_npriv);
/** /**
@ -365,7 +365,7 @@ int arm_cmse_addr_range_nonsecure_read_ok(u32_t addr, u32_t size,
* *
* @return 1 if address range is readable, 0 otherwise. * @return 1 if address range is readable, 0 otherwise.
*/ */
int arm_cmse_addr_range_nonsecure_readwrite_ok(u32_t addr, u32_t size, int arm_cmse_addr_range_nonsecure_readwrite_ok(uint32_t addr, uint32_t size,
int force_npriv); int force_npriv);
/** /**

View file

@ -40,13 +40,13 @@ extern K_THREAD_STACK_ARRAY_DEFINE(z_interrupt_stacks, CONFIG_MP_NUM_CPUS,
*/ */
static ALWAYS_INLINE void z_arm_interrupt_stack_setup(void) static ALWAYS_INLINE void z_arm_interrupt_stack_setup(void)
{ {
u32_t msp = (u32_t)(Z_THREAD_STACK_BUFFER(z_interrupt_stacks[0])) + uint32_t msp = (uint32_t)(Z_THREAD_STACK_BUFFER(z_interrupt_stacks[0])) +
K_THREAD_STACK_SIZEOF(z_interrupt_stacks[0]); K_THREAD_STACK_SIZEOF(z_interrupt_stacks[0]);
__set_MSP(msp); __set_MSP(msp);
#if defined(CONFIG_BUILTIN_STACK_GUARD) #if defined(CONFIG_BUILTIN_STACK_GUARD)
#if defined(CONFIG_CPU_CORTEX_M_HAS_SPLIM) #if defined(CONFIG_CPU_CORTEX_M_HAS_SPLIM)
__set_MSPLIM((u32_t)z_interrupt_stacks[0]); __set_MSPLIM((uint32_t)z_interrupt_stacks[0]);
#else #else
#error "Built-in MSP limit checks not supported by HW" #error "Built-in MSP limit checks not supported by HW"
#endif #endif

View file

@ -35,13 +35,13 @@ extern "C" {
* state configuration. * state configuration.
*/ */
typedef struct tz_nonsecure_setup_conf { typedef struct tz_nonsecure_setup_conf {
u32_t msp_ns; uint32_t msp_ns;
u32_t psp_ns; uint32_t psp_ns;
u32_t vtor_ns; uint32_t vtor_ns;
struct { struct {
u32_t npriv:1; uint32_t npriv:1;
u32_t spsel:1; uint32_t spsel:1;
u32_t reserved:30; uint32_t reserved:30;
} control_ns; } control_ns;
} tz_nonsecure_setup_conf_t; } tz_nonsecure_setup_conf_t;
@ -84,7 +84,7 @@ void tz_nonsecure_state_setup(const tz_nonsecure_setup_conf_t *p_ns_conf);
* *
* @return N/A * @return N/A
*/ */
void tz_nonsecure_msplim_set(u32_t val); void tz_nonsecure_msplim_set(uint32_t val);
/** /**
* *
@ -101,7 +101,7 @@ void tz_nonsecure_msplim_set(u32_t val);
* *
* @return N/A * @return N/A
*/ */
void tz_nonsecure_psplim_set(u32_t val); void tz_nonsecure_psplim_set(uint32_t val);
#endif /* CONFIG_ARMV8_M_MAINLINE */ #endif /* CONFIG_ARMV8_M_MAINLINE */
@ -229,7 +229,7 @@ void tz_sau_configure(int enable, int allns);
* *
* @return The number of configured SAU regions. * @return The number of configured SAU regions.
*/ */
u32_t tz_sau_number_of_regions_get(void); uint32_t tz_sau_number_of_regions_get(void);
#if defined(CONFIG_CPU_HAS_ARM_SAU) #if defined(CONFIG_CPU_HAS_ARM_SAU)
/** /**
@ -240,11 +240,11 @@ u32_t tz_sau_number_of_regions_get(void);
* for a SAU region configuration. * for a SAU region configuration.
*/ */
typedef struct { typedef struct {
u8_t region_num; uint8_t region_num;
u8_t enable:1; uint8_t enable:1;
u8_t nsc:1; uint8_t nsc:1;
u32_t base_addr; uint32_t base_addr;
u32_t limit_addr; uint32_t limit_addr;
} tz_sau_conf_t; } tz_sau_conf_t;

View file

@ -52,8 +52,8 @@ arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
extern FUNC_NORETURN void z_arm_userspace_enter(k_thread_entry_t user_entry, extern FUNC_NORETURN void z_arm_userspace_enter(k_thread_entry_t user_entry,
void *p1, void *p2, void *p3, void *p1, void *p2, void *p3,
u32_t stack_end, uint32_t stack_end,
u32_t stack_start); uint32_t stack_start);
extern void z_arm_fatal_error(unsigned int reason, const z_arch_esf_t *esf); extern void z_arm_fatal_error(unsigned int reason, const z_arch_esf_t *esf);

View file

@ -46,7 +46,7 @@ def read_intlist(intlist_path, syms):
include/linker/intlist.ld: include/linker/intlist.ld:
struct { struct {
u32_t num_vectors; <- typically CONFIG_NUM_IRQS uint32_t num_vectors; <- typically CONFIG_NUM_IRQS
struct _isr_list isrs[]; <- Usually of smaller size than num_vectors struct _isr_list isrs[]; <- Usually of smaller size than num_vectors
} }
@ -55,9 +55,9 @@ def read_intlist(intlist_path, syms):
struct _isr_list { struct _isr_list {
/** IRQ line number */ /** IRQ line number */
s32_t irq; int32_t irq;
/** Flags for this IRQ, see ISR_FLAG_* definitions */ /** Flags for this IRQ, see ISR_FLAG_* definitions */
s32_t flags; int32_t flags;
/** ISR to call */ /** ISR to call */
void *func; void *func;
/** Parameter for non-direct IRQs */ /** Parameter for non-direct IRQs */
@ -134,7 +134,7 @@ source_header = """
#include <arch/cpu.h> #include <arch/cpu.h>
#if defined(CONFIG_GEN_SW_ISR_TABLE) && defined(CONFIG_GEN_IRQ_VECTOR_TABLE) #if defined(CONFIG_GEN_SW_ISR_TABLE) && defined(CONFIG_GEN_IRQ_VECTOR_TABLE)
#define ISR_WRAPPER ((u32_t)&_isr_wrapper) #define ISR_WRAPPER ((uint32_t)&_isr_wrapper)
#else #else
#define ISR_WRAPPER NULL #define ISR_WRAPPER NULL
#endif #endif
@ -147,7 +147,7 @@ def write_source_file(fp, vt, swt, intlist, syms):
nv = intlist["num_vectors"] nv = intlist["num_vectors"]
if vt: if vt:
fp.write("u32_t __irq_vector_table _irq_vector_table[%d] = {\n" % nv) fp.write("uint32_t __irq_vector_table _irq_vector_table[%d] = {\n" % nv)
for i in range(nv): for i in range(nv):
fp.write("\t{},\n".format(vt[i])) fp.write("\t{},\n".format(vt[i]))
fp.write("};\n") fp.write("};\n")

View file

@ -13,8 +13,8 @@
* which indicates the number of interrupts specified * which indicates the number of interrupts specified
*/ */
struct int_list_header { struct int_list_header {
u32_t table_size; uint32_t table_size;
u32_t offset; uint32_t offset;
}; };
/* These values are not included in the resulting binary, but instead form the /* These values are not included in the resulting binary, but instead form the
@ -50,8 +50,8 @@ Z_GENERIC_SECTION(.irq_info) struct int_list_header _iheader = {
#define IRQ_VECTOR_TABLE_DEFAULT_ISR z_irq_spurious #define IRQ_VECTOR_TABLE_DEFAULT_ISR z_irq_spurious
#endif /* CONFIG_GEN_SW_ISR_TABLE */ #endif /* CONFIG_GEN_SW_ISR_TABLE */
u32_t __irq_vector_table _irq_vector_table[IRQ_TABLE_SIZE] = { uint32_t __irq_vector_table _irq_vector_table[IRQ_TABLE_SIZE] = {
[0 ...(IRQ_TABLE_SIZE - 1)] = (u32_t)&IRQ_VECTOR_TABLE_DEFAULT_ISR, [0 ...(IRQ_TABLE_SIZE - 1)] = (uint32_t)&IRQ_VECTOR_TABLE_DEFAULT_ISR,
}; };
#endif /* CONFIG_GEN_IRQ_VECTOR_TABLE */ #endif /* CONFIG_GEN_IRQ_VECTOR_TABLE */

View file

@ -129,7 +129,7 @@ int __weak arch_irq_connect_dynamic(unsigned int irq,
unsigned int priority, unsigned int priority,
void (*routine)(void *), void (*routine)(void *),
void *parameter, void *parameter,
u32_t flags) uint32_t flags)
{ {
ARG_UNUSED(flags); ARG_UNUSED(flags);
ARG_UNUSED(priority); ARG_UNUSED(priority);

View file

@ -6,18 +6,18 @@
#include <kernel.h> #include <kernel.h>
#include <kernel_internal.h> #include <kernel_internal.h>
u64_t arch_timing_swap_start; uint64_t arch_timing_swap_start;
u64_t arch_timing_swap_end; uint64_t arch_timing_swap_end;
u64_t arch_timing_irq_start; uint64_t arch_timing_irq_start;
u64_t arch_timing_irq_end; uint64_t arch_timing_irq_end;
u64_t arch_timing_tick_start; uint64_t arch_timing_tick_start;
u64_t arch_timing_tick_end; uint64_t arch_timing_tick_end;
u64_t arch_timing_enter_user_mode_end; uint64_t arch_timing_enter_user_mode_end;
/* location of the time stamps*/ /* location of the time stamps*/
u32_t arch_timing_value_swap_end; uint32_t arch_timing_value_swap_end;
u64_t arch_timing_value_swap_common; uint64_t arch_timing_value_swap_common;
u64_t arch_timing_value_swap_temp; uint64_t arch_timing_value_swap_temp;
#if defined(CONFIG_NRF_RTC_TIMER) #if defined(CONFIG_NRF_RTC_TIMER)
#include <nrfx.h> #include <nrfx.h>
@ -48,13 +48,13 @@ u64_t arch_timing_value_swap_temp;
#define TIMING_INFO_PRE_READ() #define TIMING_INFO_PRE_READ()
#define TIMING_INFO_OS_GET_TIME() (k_cycle_get_32()) #define TIMING_INFO_OS_GET_TIME() (k_cycle_get_32())
#define TIMING_INFO_GET_TIMER_VALUE() (SysTick->VAL) #define TIMING_INFO_GET_TIMER_VALUE() (SysTick->VAL)
#define SUBTRACT_CLOCK_CYCLES(val) (SysTick->LOAD - (u32_t)val) #define SUBTRACT_CLOCK_CYCLES(val) (SysTick->LOAD - (uint32_t)val)
#elif defined(CONFIG_ARC) #elif defined(CONFIG_ARC)
#define TIMING_INFO_PRE_READ() #define TIMING_INFO_PRE_READ()
#define TIMING_INFO_OS_GET_TIME() (k_cycle_get_32()) #define TIMING_INFO_OS_GET_TIME() (k_cycle_get_32())
#define TIMING_INFO_GET_TIMER_VALUE() (z_arc_v2_aux_reg_read(_ARC_V2_TMR0_COUNT)) #define TIMING_INFO_GET_TIMER_VALUE() (z_arc_v2_aux_reg_read(_ARC_V2_TMR0_COUNT))
#define SUBTRACT_CLOCK_CYCLES(val) ((u32_t)val) #define SUBTRACT_CLOCK_CYCLES(val) ((uint32_t)val)
#elif defined(CONFIG_NIOS2) #elif defined(CONFIG_NIOS2)
#include "altera_avalon_timer_regs.h" #include "altera_avalon_timer_regs.h"
@ -62,24 +62,24 @@ u64_t arch_timing_value_swap_temp;
(IOWR_ALTERA_AVALON_TIMER_SNAPL(TIMER_0_BASE, 10)) (IOWR_ALTERA_AVALON_TIMER_SNAPL(TIMER_0_BASE, 10))
#define TIMING_INFO_OS_GET_TIME() (SUBTRACT_CLOCK_CYCLES(\ #define TIMING_INFO_OS_GET_TIME() (SUBTRACT_CLOCK_CYCLES(\
((u32_t)IORD_ALTERA_AVALON_TIMER_SNAPH(TIMER_0_BASE) << 16)\ ((uint32_t)IORD_ALTERA_AVALON_TIMER_SNAPH(TIMER_0_BASE) << 16)\
| ((u32_t)IORD_ALTERA_AVALON_TIMER_SNAPL(TIMER_0_BASE)))) | ((uint32_t)IORD_ALTERA_AVALON_TIMER_SNAPL(TIMER_0_BASE))))
#define TIMING_INFO_GET_TIMER_VALUE() (\ #define TIMING_INFO_GET_TIMER_VALUE() (\
((u32_t)IORD_ALTERA_AVALON_TIMER_SNAPH(TIMER_0_BASE) << 16)\ ((uint32_t)IORD_ALTERA_AVALON_TIMER_SNAPH(TIMER_0_BASE) << 16)\
| ((u32_t)IORD_ALTERA_AVALON_TIMER_SNAPL(TIMER_0_BASE))) | ((uint32_t)IORD_ALTERA_AVALON_TIMER_SNAPL(TIMER_0_BASE)))
#define SUBTRACT_CLOCK_CYCLES(val) \ #define SUBTRACT_CLOCK_CYCLES(val) \
((IORD_ALTERA_AVALON_TIMER_PERIODH(TIMER_0_BASE) \ ((IORD_ALTERA_AVALON_TIMER_PERIODH(TIMER_0_BASE) \
<< 16 | \ << 16 | \
(IORD_ALTERA_AVALON_TIMER_PERIODL(TIMER_0_BASE))) \ (IORD_ALTERA_AVALON_TIMER_PERIODL(TIMER_0_BASE))) \
- ((u32_t)val)) - ((uint32_t)val))
#else #else
#define TIMING_INFO_PRE_READ() #define TIMING_INFO_PRE_READ()
#define TIMING_INFO_OS_GET_TIME() (k_cycle_get_32()) #define TIMING_INFO_OS_GET_TIME() (k_cycle_get_32())
#define TIMING_INFO_GET_TIMER_VALUE() (k_cycle_get_32()) #define TIMING_INFO_GET_TIMER_VALUE() (k_cycle_get_32())
#define SUBTRACT_CLOCK_CYCLES(val) ((u32_t)val) #define SUBTRACT_CLOCK_CYCLES(val) ((uint32_t)val)
#endif /* CONFIG_NRF_RTC_TIMER */ #endif /* CONFIG_NRF_RTC_TIMER */
@ -87,7 +87,7 @@ void read_timer_start_of_swap(void)
{ {
if (arch_timing_value_swap_end == 1U) { if (arch_timing_value_swap_end == 1U) {
TIMING_INFO_PRE_READ(); TIMING_INFO_PRE_READ();
arch_timing_swap_start = (u32_t) TIMING_INFO_OS_GET_TIME(); arch_timing_swap_start = (uint32_t) TIMING_INFO_OS_GET_TIME();
} }
} }
@ -97,7 +97,7 @@ void read_timer_end_of_swap(void)
TIMING_INFO_PRE_READ(); TIMING_INFO_PRE_READ();
arch_timing_value_swap_end = 2U; arch_timing_value_swap_end = 2U;
arch_timing_value_swap_common = arch_timing_value_swap_common =
(u64_t)TIMING_INFO_OS_GET_TIME(); (uint64_t)TIMING_INFO_OS_GET_TIME();
} }
} }
@ -107,29 +107,29 @@ void read_timer_end_of_swap(void)
void read_timer_start_of_isr(void) void read_timer_start_of_isr(void)
{ {
TIMING_INFO_PRE_READ(); TIMING_INFO_PRE_READ();
arch_timing_irq_start = (u32_t) TIMING_INFO_GET_TIMER_VALUE(); arch_timing_irq_start = (uint32_t) TIMING_INFO_GET_TIMER_VALUE();
} }
void read_timer_end_of_isr(void) void read_timer_end_of_isr(void)
{ {
TIMING_INFO_PRE_READ(); TIMING_INFO_PRE_READ();
arch_timing_irq_end = (u32_t) TIMING_INFO_GET_TIMER_VALUE(); arch_timing_irq_end = (uint32_t) TIMING_INFO_GET_TIMER_VALUE();
} }
void read_timer_start_of_tick_handler(void) void read_timer_start_of_tick_handler(void)
{ {
TIMING_INFO_PRE_READ(); TIMING_INFO_PRE_READ();
arch_timing_tick_start = (u32_t)TIMING_INFO_GET_TIMER_VALUE(); arch_timing_tick_start = (uint32_t)TIMING_INFO_GET_TIMER_VALUE();
} }
void read_timer_end_of_tick_handler(void) void read_timer_end_of_tick_handler(void)
{ {
TIMING_INFO_PRE_READ(); TIMING_INFO_PRE_READ();
arch_timing_tick_end = (u32_t) TIMING_INFO_GET_TIMER_VALUE(); arch_timing_tick_end = (uint32_t) TIMING_INFO_GET_TIMER_VALUE();
} }
void read_timer_end_of_userspace_enter(void) void read_timer_end_of_userspace_enter(void)
{ {
TIMING_INFO_PRE_READ(); TIMING_INFO_PRE_READ();
arch_timing_enter_user_mode_end = (u32_t)TIMING_INFO_GET_TIMER_VALUE(); arch_timing_enter_user_mode_end = (uint32_t)TIMING_INFO_GET_TIMER_VALUE();
} }

View file

@ -24,7 +24,7 @@
#if ALT_CPU_ICACHE_SIZE > 0 #if ALT_CPU_ICACHE_SIZE > 0
void z_nios2_icache_flush_all(void) void z_nios2_icache_flush_all(void)
{ {
u32_t i; uint32_t i;
for (i = 0U; i < ALT_CPU_ICACHE_SIZE; i += ALT_CPU_ICACHE_LINE_SIZE) { for (i = 0U; i < ALT_CPU_ICACHE_SIZE; i += ALT_CPU_ICACHE_LINE_SIZE) {
z_nios2_icache_flush(i); z_nios2_icache_flush(i);
@ -53,7 +53,7 @@ void z_nios2_icache_flush_all(void)
#if ALT_CPU_DCACHE_SIZE > 0 #if ALT_CPU_DCACHE_SIZE > 0
void z_nios2_dcache_flush_all(void) void z_nios2_dcache_flush_all(void)
{ {
u32_t i; uint32_t i;
for (i = 0U; i < ALT_CPU_DCACHE_SIZE; i += ALT_CPU_DCACHE_LINE_SIZE) { for (i = 0U; i < ALT_CPU_DCACHE_SIZE; i += ALT_CPU_DCACHE_LINE_SIZE) {
z_nios2_dcache_flush(i); z_nios2_dcache_flush(i);
@ -70,10 +70,10 @@ void z_nios2_dcache_flush_all(void)
* use the z_nios2_dcache_flush() routine instead. * use the z_nios2_dcache_flush() routine instead.
*/ */
#if ALT_CPU_DCACHE_SIZE > 0 #if ALT_CPU_DCACHE_SIZE > 0
void z_nios2_dcache_flush_no_writeback(void *start, u32_t len) void z_nios2_dcache_flush_no_writeback(void *start, uint32_t len)
{ {
u8_t *i; uint8_t *i;
u8_t *end = ((char *) start) + len; uint8_t *end = ((char *) start) + len;
for (i = start; i < end; i += ALT_CPU_DCACHE_LINE_SIZE) { for (i = start; i < end; i += ALT_CPU_DCACHE_LINE_SIZE) {
__asm__ volatile ("initda (%0)" :: "r" (i)); __asm__ volatile ("initda (%0)" :: "r" (i));
@ -85,7 +85,7 @@ void z_nios2_dcache_flush_no_writeback(void *start, u32_t len)
* multiple of 2 (which it always is). * multiple of 2 (which it always is).
*/ */
if (((u32_t) start) & (ALT_CPU_DCACHE_LINE_SIZE - 1)) { if (((uint32_t) start) & (ALT_CPU_DCACHE_LINE_SIZE - 1)) {
__asm__ volatile ("initda (%0)" :: "r" (i)); __asm__ volatile ("initda (%0)" :: "r" (i));
} }
} }

View file

@ -113,7 +113,7 @@ SECTION_FUNC(TEXT, __start)
ori r3, r3, 0xaaaa ori r3, r3, 0xaaaa
1: 1:
/* Loop through the z_interrupt_stacks treating it as an array of /* Loop through the z_interrupt_stacks treating it as an array of
* u32_t, setting each element to r3 */ * uint32_t, setting each element to r3 */
stw r3, (r1) stw r3, (r1)
subi r2, r2, 4 subi r2, r2, 4
addi r1, r1, 4 addi r1, r1, 4

View file

@ -41,7 +41,7 @@ FUNC_NORETURN void z_nios2_fatal_error(unsigned int reason,
#if defined(CONFIG_EXTRA_EXCEPTION_INFO) && \ #if defined(CONFIG_EXTRA_EXCEPTION_INFO) && \
(defined(CONFIG_PRINTK) || defined(CONFIG_LOG)) \ (defined(CONFIG_PRINTK) || defined(CONFIG_LOG)) \
&& defined(ALT_CPU_HAS_EXTRA_EXCEPTION_INFO) && defined(ALT_CPU_HAS_EXTRA_EXCEPTION_INFO)
static char *cause_str(u32_t cause_code) static char *cause_str(uint32_t cause_code)
{ {
switch (cause_code) { switch (cause_code) {
case 0: case 0:
@ -105,7 +105,7 @@ FUNC_NORETURN void _Fault(const z_arch_esf_t *esf)
#if defined(CONFIG_PRINTK) || defined(CONFIG_LOG) #if defined(CONFIG_PRINTK) || defined(CONFIG_LOG)
/* Unfortunately, completely unavailable on Nios II/e cores */ /* Unfortunately, completely unavailable on Nios II/e cores */
#ifdef ALT_CPU_HAS_EXTRA_EXCEPTION_INFO #ifdef ALT_CPU_HAS_EXTRA_EXCEPTION_INFO
u32_t exc_reg, badaddr_reg, eccftl; uint32_t exc_reg, badaddr_reg, eccftl;
enum nios2_exception_cause cause; enum nios2_exception_cause cause;
exc_reg = z_nios2_creg_read(NIOS2_CR_EXCEPTION); exc_reg = z_nios2_creg_read(NIOS2_CR_EXCEPTION);

View file

@ -33,7 +33,7 @@ FUNC_NORETURN void z_irq_spurious(void *unused)
void arch_irq_enable(unsigned int irq) void arch_irq_enable(unsigned int irq)
{ {
u32_t ienable; uint32_t ienable;
unsigned int key; unsigned int key;
key = irq_lock(); key = irq_lock();
@ -49,7 +49,7 @@ void arch_irq_enable(unsigned int irq)
void arch_irq_disable(unsigned int irq) void arch_irq_disable(unsigned int irq)
{ {
u32_t ienable; uint32_t ienable;
unsigned int key; unsigned int key;
key = irq_lock(); key = irq_lock();
@ -63,7 +63,7 @@ void arch_irq_disable(unsigned int irq)
int arch_irq_is_enabled(unsigned int irq) int arch_irq_is_enabled(unsigned int irq)
{ {
u32_t ienable; uint32_t ienable;
ienable = z_nios2_creg_read(NIOS2_CR_IENABLE); ienable = z_nios2_creg_read(NIOS2_CR_IENABLE);
return ienable & BIT(irq); return ienable & BIT(irq);
@ -76,7 +76,7 @@ int arch_irq_is_enabled(unsigned int irq)
* *
* @param ipending Bitfield of interrupts * @param ipending Bitfield of interrupts
*/ */
void _enter_irq(u32_t ipending) void _enter_irq(uint32_t ipending)
{ {
int index; int index;
@ -122,7 +122,7 @@ void _enter_irq(u32_t ipending)
#ifdef CONFIG_DYNAMIC_INTERRUPTS #ifdef CONFIG_DYNAMIC_INTERRUPTS
int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority, int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
void (*routine)(void *parameter), void *parameter, void (*routine)(void *parameter), void *parameter,
u32_t flags) uint32_t flags)
{ {
ARG_UNUSED(flags); ARG_UNUSED(flags);
ARG_UNUSED(priority); ARG_UNUSED(priority);

View file

@ -48,8 +48,8 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
iframe->arg2 = arg2; iframe->arg2 = arg2;
iframe->arg3 = arg3; iframe->arg3 = arg3;
thread->callee_saved.sp = (u32_t)iframe; thread->callee_saved.sp = (uint32_t)iframe;
thread->callee_saved.ra = (u32_t)z_thread_entry_wrapper; thread->callee_saved.ra = (uint32_t)z_thread_entry_wrapper;
thread->callee_saved.key = NIOS2_STATUS_PIE_MSK; thread->callee_saved.key = NIOS2_STATUS_PIE_MSK;
/* Leave the rest of thread->callee_saved junk */ /* Leave the rest of thread->callee_saved junk */
} }

View file

@ -58,7 +58,7 @@ void z_nios2_icache_flush_all(void);
#if ALT_CPU_DCACHE_SIZE > 0 #if ALT_CPU_DCACHE_SIZE > 0
void z_nios2_dcache_flush_all(void); void z_nios2_dcache_flush_all(void);
void z_nios2_dcache_flush_no_writeback(void *start, u32_t len); void z_nios2_dcache_flush_no_writeback(void *start, uint32_t len);
#else #else
#define z_nios2_dcache_flush_all() do { } while (0) #define z_nios2_dcache_flush_all() do { } while (0)
#define z_nios2_dcache_flush_no_writeback(x, y) do { } while (0) #define z_nios2_dcache_flush_no_writeback(x, y) do { } while (0)

View file

@ -47,7 +47,7 @@ int arch_irq_is_enabled(unsigned int irq)
*/ */
int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority, int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
void (*routine)(void *parameter), void (*routine)(void *parameter),
void *parameter, u32_t flags) void *parameter, uint32_t flags)
{ {
posix_isr_declare(irq, (int)flags, routine, parameter); posix_isr_declare(irq, (int)flags, routine, parameter);
posix_irq_priority_set(irq, priority, flags); posix_irq_priority_set(irq, priority, flags);

View file

@ -98,7 +98,7 @@ void arch_switch_to_main_thread(struct k_thread *main_thread,
void posix_irq_check_idle_exit(void) void posix_irq_check_idle_exit(void)
{ {
if (_kernel.idle) { if (_kernel.idle) {
s32_t idle_val = _kernel.idle; int32_t idle_val = _kernel.idle;
_kernel.idle = 0; _kernel.idle = 0;
z_sys_power_save_idle_exit(idle_val); z_sys_power_save_idle_exit(idle_val);

View file

@ -32,7 +32,7 @@ FUNC_NORETURN void z_irq_spurious(void *unused)
#ifdef CONFIG_DYNAMIC_INTERRUPTS #ifdef CONFIG_DYNAMIC_INTERRUPTS
int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority, int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
void (*routine)(void *parameter), void *parameter, void (*routine)(void *parameter), void *parameter,
u32_t flags) uint32_t flags)
{ {
ARG_UNUSED(flags); ARG_UNUSED(flags);

View file

@ -20,8 +20,8 @@ static struct acpi_madt *madt;
static bool validate_checksum(void *buf, int len) static bool validate_checksum(void *buf, int len)
{ {
u8_t *cp = buf; uint8_t *cp = buf;
u8_t checksum = 0; uint8_t checksum = 0;
while (len--) { while (len--) {
checksum += *(cp++); checksum += *(cp++);
@ -88,7 +88,7 @@ void z_acpi_init(void)
* If it's valid, then remember it for later. * If it's valid, then remember it for later.
*/ */
int nr_sdts = (rsdt->sdt.length - sizeof(rsdt)) / sizeof(u32_t); int nr_sdts = (rsdt->sdt.length - sizeof(rsdt)) / sizeof(uint32_t);
for (int i = 0; i < nr_sdts; ++i) { for (int i = 0; i < nr_sdts; ++i) {
struct acpi_sdt *sdt = UINT_TO_POINTER(rsdt->sdts[i]); struct acpi_sdt *sdt = UINT_TO_POINTER(rsdt->sdts[i]);

View file

@ -43,7 +43,7 @@ static inline uintptr_t esf_get_code(const z_arch_esf_t *esf)
} }
#ifdef CONFIG_THREAD_STACK_INFO #ifdef CONFIG_THREAD_STACK_INFO
bool z_x86_check_stack_bounds(uintptr_t addr, size_t size, u16_t cs) bool z_x86_check_stack_bounds(uintptr_t addr, size_t size, uint16_t cs)
{ {
uintptr_t start, end; uintptr_t start, end;
@ -89,7 +89,7 @@ struct stack_frame {
#define MAX_STACK_FRAMES 8 #define MAX_STACK_FRAMES 8
static void unwind_stack(uintptr_t base_ptr, u16_t cs) static void unwind_stack(uintptr_t base_ptr, uint16_t cs)
{ {
struct stack_frame *frame; struct stack_frame *frame;
int i; int i;
@ -342,7 +342,7 @@ void z_x86_page_fault_handler(z_arch_esf_t *esf)
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
if ((void *)esf->rip >= exceptions[i].start && if ((void *)esf->rip >= exceptions[i].start &&
(void *)esf->rip < exceptions[i].end) { (void *)esf->rip < exceptions[i].end) {
esf->rip = (u64_t)(exceptions[i].fixup); esf->rip = (uint64_t)(exceptions[i].fixup);
return; return;
} }
#else #else

View file

@ -49,7 +49,7 @@
* *
* C function prototype: * C function prototype:
* *
* void _exception_enter(u32_t error_code, void *handler) * void _exception_enter(uint32_t error_code, void *handler)
* *
*/ */

View file

@ -127,20 +127,20 @@ struct task_state_segment _main_tss = {
* In a special kernel page that, unlike all other kernel pages, * In a special kernel page that, unlike all other kernel pages,
* is marked present in the user page table. * is marked present in the user page table.
*/ */
.esp0 = (u32_t)&z_trampoline_stack_end .esp0 = (uint32_t)&z_trampoline_stack_end
#endif #endif
}; };
/* Special TSS for handling double-faults with a known good stack */ /* Special TSS for handling double-faults with a known good stack */
Z_GENERIC_SECTION(.tss) Z_GENERIC_SECTION(.tss)
struct task_state_segment _df_tss = { struct task_state_segment _df_tss = {
.esp = (u32_t)(_df_stack + sizeof(_df_stack)), .esp = (uint32_t)(_df_stack + sizeof(_df_stack)),
.cs = CODE_SEG, .cs = CODE_SEG,
.ds = DATA_SEG, .ds = DATA_SEG,
.es = DATA_SEG, .es = DATA_SEG,
.ss = DATA_SEG, .ss = DATA_SEG,
.eip = (u32_t)df_handler_top, .eip = (uint32_t)df_handler_top,
.cr3 = (u32_t)&z_x86_kernel_ptables .cr3 = (uint32_t)&z_x86_kernel_ptables
}; };
static __used void df_handler_bottom(void) static __used void df_handler_bottom(void)
@ -149,8 +149,8 @@ static __used void df_handler_bottom(void)
int reason = K_ERR_CPU_EXCEPTION; int reason = K_ERR_CPU_EXCEPTION;
/* Restore the top half so it is runnable again */ /* Restore the top half so it is runnable again */
_df_tss.esp = (u32_t)(_df_stack + sizeof(_df_stack)); _df_tss.esp = (uint32_t)(_df_stack + sizeof(_df_stack));
_df_tss.eip = (u32_t)df_handler_top; _df_tss.eip = (uint32_t)df_handler_top;
LOG_ERR("Double Fault"); LOG_ERR("Double Fault");
#ifdef CONFIG_THREAD_STACK_INFO #ifdef CONFIG_THREAD_STACK_INFO
@ -181,14 +181,14 @@ static FUNC_NORETURN __used void df_handler_top(void)
_df_esf.eflags = _main_tss.eflags; _df_esf.eflags = _main_tss.eflags;
/* Restore the main IA task to a runnable state */ /* Restore the main IA task to a runnable state */
_main_tss.esp = (u32_t)(ARCH_THREAD_STACK_BUFFER( _main_tss.esp = (uint32_t)(ARCH_THREAD_STACK_BUFFER(
z_interrupt_stacks[0]) + CONFIG_ISR_STACK_SIZE); z_interrupt_stacks[0]) + CONFIG_ISR_STACK_SIZE);
_main_tss.cs = CODE_SEG; _main_tss.cs = CODE_SEG;
_main_tss.ds = DATA_SEG; _main_tss.ds = DATA_SEG;
_main_tss.es = DATA_SEG; _main_tss.es = DATA_SEG;
_main_tss.ss = DATA_SEG; _main_tss.ss = DATA_SEG;
_main_tss.eip = (u32_t)df_handler_bottom; _main_tss.eip = (uint32_t)df_handler_bottom;
_main_tss.cr3 = (u32_t)&z_x86_kernel_ptables; _main_tss.cr3 = (uint32_t)&z_x86_kernel_ptables;
_main_tss.eflags = 0U; _main_tss.eflags = 0U;
/* NT bit is set in EFLAGS so we will task switch back to _main_tss /* NT bit is set in EFLAGS so we will task switch back to _main_tss

View file

@ -47,7 +47,7 @@
#include <kernel_internal.h> #include <kernel_internal.h>
/* SSE control/status register default value (used by assembler code) */ /* SSE control/status register default value (used by assembler code) */
extern u32_t _sse_mxcsr_default_value; extern uint32_t _sse_mxcsr_default_value;
/** /**
* *
@ -190,7 +190,7 @@ void k_float_enable(struct k_thread *thread, unsigned int options)
/* Indicate thread requires floating point context saving */ /* Indicate thread requires floating point context saving */
thread->base.user_options |= (u8_t)options; thread->base.user_options |= (uint8_t)options;
/* /*
* The current thread might not allow FP instructions, so clear CR0[TS] * The current thread might not allow FP instructions, so clear CR0[TS]

View file

@ -169,7 +169,7 @@ static unsigned int priority_to_free_vector(unsigned int requested_priority)
*/ */
static void *get_dynamic_stub(int stub_idx) static void *get_dynamic_stub(int stub_idx)
{ {
u32_t offset; uint32_t offset;
/* /*
* Because we want the sizes of the stubs to be consistent and minimized, * Because we want the sizes of the stubs to be consistent and minimized,
@ -181,7 +181,7 @@ static void *get_dynamic_stub(int stub_idx)
((stub_idx / Z_DYN_STUB_PER_BLOCK) * ((stub_idx / Z_DYN_STUB_PER_BLOCK) *
Z_DYN_STUB_LONG_JMP_EXTRA_SIZE); Z_DYN_STUB_LONG_JMP_EXTRA_SIZE);
return (void *)((u32_t)&z_dynamic_stubs_begin + offset); return (void *)((uint32_t)&z_dynamic_stubs_begin + offset);
} }
extern const struct pseudo_descriptor z_x86_idt; extern const struct pseudo_descriptor z_x86_idt;
@ -192,13 +192,13 @@ static void idt_vector_install(int vector, void *irq_handler)
key = irq_lock(); key = irq_lock();
z_init_irq_gate(&z_x86_idt.entries[vector], CODE_SEG, z_init_irq_gate(&z_x86_idt.entries[vector], CODE_SEG,
(u32_t)irq_handler, 0); (uint32_t)irq_handler, 0);
irq_unlock(key); irq_unlock(key);
} }
int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority, int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
void (*routine)(void *parameter), void *parameter, void (*routine)(void *parameter), void *parameter,
u32_t flags) uint32_t flags)
{ {
int vector, stub_idx, key; int vector, stub_idx, key;
@ -233,7 +233,7 @@ int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
* *
* @param stub_idx Index into the dyn_irq_list array * @param stub_idx Index into the dyn_irq_list array
*/ */
void z_x86_dynamic_irq_handler(u8_t stub_idx) void z_x86_dynamic_irq_handler(uint8_t stub_idx)
{ {
dyn_irq_list[stub_idx].handler(dyn_irq_list[stub_idx].param); dyn_irq_list[stub_idx].handler(dyn_irq_list[stub_idx].param);
} }

View file

@ -23,13 +23,13 @@
* for when z_swap() switches to it for the first time. * for when z_swap() switches to it for the first time.
*/ */
struct _x86_initial_frame { struct _x86_initial_frame {
u32_t swap_retval; uint32_t swap_retval;
u32_t ebp; uint32_t ebp;
u32_t ebx; uint32_t ebx;
u32_t esi; uint32_t esi;
u32_t edi; uint32_t edi;
void *thread_entry; void *thread_entry;
u32_t eflags; uint32_t eflags;
k_thread_entry_t entry; k_thread_entry_t entry;
void *p1; void *p1;
void *p2; void *p2;
@ -101,7 +101,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
initial_frame->p3 = parameter3; initial_frame->p3 = parameter3;
initial_frame->eflags = EFLAGS_INITIAL; initial_frame->eflags = EFLAGS_INITIAL;
#ifdef _THREAD_WRAPPER_REQUIRED #ifdef _THREAD_WRAPPER_REQUIRED
initial_frame->edi = (u32_t)swap_entry; initial_frame->edi = (uint32_t)swap_entry;
initial_frame->thread_entry = z_x86_thread_entry_wrapper; initial_frame->thread_entry = z_x86_thread_entry_wrapper;
#else #else
initial_frame->thread_entry = swap_entry; initial_frame->thread_entry = swap_entry;

View file

@ -279,8 +279,8 @@ z_x86_user_string_nlen_fixup:
/* FUNC_NORETURN void z_x86_userspace_enter(k_thread_entry_t user_entry, /* FUNC_NORETURN void z_x86_userspace_enter(k_thread_entry_t user_entry,
* void *p1, void *p2, void *p3, * void *p1, void *p2, void *p3,
* u32_t stack_end, * uint32_t stack_end,
* u32_t stack_start) * uint32_t stack_start)
* *
* A one-way trip to userspace. * A one-way trip to userspace.
*/ */

View file

@ -18,28 +18,28 @@
* The symbol is weak so that boards/SoC files can override. * The symbol is weak so that boards/SoC files can override.
*/ */
__weak u8_t x86_cpu_loapics[] = { 0, 1, 2, 3 }; __weak uint8_t x86_cpu_loapics[] = { 0, 1, 2, 3 };
extern char x86_ap_start[]; /* AP entry point in locore.S */ extern char x86_ap_start[]; /* AP entry point in locore.S */
extern u8_t _exception_stack[]; extern uint8_t _exception_stack[];
extern u8_t _exception_stack1[]; extern uint8_t _exception_stack1[];
extern u8_t _exception_stack2[]; extern uint8_t _exception_stack2[];
extern u8_t _exception_stack3[]; extern uint8_t _exception_stack3[];
#ifdef CONFIG_X86_KPTI #ifdef CONFIG_X86_KPTI
extern u8_t z_x86_trampoline_stack[]; extern uint8_t z_x86_trampoline_stack[];
extern u8_t z_x86_trampoline_stack1[]; extern uint8_t z_x86_trampoline_stack1[];
extern u8_t z_x86_trampoline_stack2[]; extern uint8_t z_x86_trampoline_stack2[];
extern u8_t z_x86_trampoline_stack3[]; extern uint8_t z_x86_trampoline_stack3[];
#endif /* CONFIG_X86_KPTI */ #endif /* CONFIG_X86_KPTI */
Z_GENERIC_SECTION(.tss) Z_GENERIC_SECTION(.tss)
struct x86_tss64 tss0 = { struct x86_tss64 tss0 = {
#ifdef CONFIG_X86_KPTI #ifdef CONFIG_X86_KPTI
.ist2 = (u64_t) z_x86_trampoline_stack + Z_X86_TRAMPOLINE_STACK_SIZE, .ist2 = (uint64_t) z_x86_trampoline_stack + Z_X86_TRAMPOLINE_STACK_SIZE,
#endif #endif
.ist7 = (u64_t) _exception_stack + CONFIG_EXCEPTION_STACK_SIZE, .ist7 = (uint64_t) _exception_stack + CONFIG_EXCEPTION_STACK_SIZE,
.iomapb = 0xFFFF, .iomapb = 0xFFFF,
.cpu = &(_kernel.cpus[0]) .cpu = &(_kernel.cpus[0])
}; };
@ -48,9 +48,9 @@ struct x86_tss64 tss0 = {
Z_GENERIC_SECTION(.tss) Z_GENERIC_SECTION(.tss)
struct x86_tss64 tss1 = { struct x86_tss64 tss1 = {
#ifdef CONFIG_X86_KPTI #ifdef CONFIG_X86_KPTI
.ist2 = (u64_t) z_x86_trampoline_stack1 + Z_X86_TRAMPOLINE_STACK_SIZE, .ist2 = (uint64_t) z_x86_trampoline_stack1 + Z_X86_TRAMPOLINE_STACK_SIZE,
#endif #endif
.ist7 = (u64_t) _exception_stack1 + CONFIG_EXCEPTION_STACK_SIZE, .ist7 = (uint64_t) _exception_stack1 + CONFIG_EXCEPTION_STACK_SIZE,
.iomapb = 0xFFFF, .iomapb = 0xFFFF,
.cpu = &(_kernel.cpus[1]) .cpu = &(_kernel.cpus[1])
}; };
@ -60,9 +60,9 @@ struct x86_tss64 tss1 = {
Z_GENERIC_SECTION(.tss) Z_GENERIC_SECTION(.tss)
struct x86_tss64 tss2 = { struct x86_tss64 tss2 = {
#ifdef CONFIG_X86_KPTI #ifdef CONFIG_X86_KPTI
.ist2 = (u64_t) z_x86_trampoline_stack2 + Z_X86_TRAMPOLINE_STACK_SIZE, .ist2 = (uint64_t) z_x86_trampoline_stack2 + Z_X86_TRAMPOLINE_STACK_SIZE,
#endif #endif
.ist7 = (u64_t) _exception_stack2 + CONFIG_EXCEPTION_STACK_SIZE, .ist7 = (uint64_t) _exception_stack2 + CONFIG_EXCEPTION_STACK_SIZE,
.iomapb = 0xFFFF, .iomapb = 0xFFFF,
.cpu = &(_kernel.cpus[2]) .cpu = &(_kernel.cpus[2])
}; };
@ -72,9 +72,9 @@ struct x86_tss64 tss2 = {
Z_GENERIC_SECTION(.tss) Z_GENERIC_SECTION(.tss)
struct x86_tss64 tss3 = { struct x86_tss64 tss3 = {
#ifdef CONFIG_X86_KPTI #ifdef CONFIG_X86_KPTI
.ist2 = (u64_t) z_x86_trampoline_stack3 + Z_X86_TRAMPOLINE_STACK_SIZE, .ist2 = (uint64_t) z_x86_trampoline_stack3 + Z_X86_TRAMPOLINE_STACK_SIZE,
#endif #endif
.ist7 = (u64_t) _exception_stack3 + CONFIG_EXCEPTION_STACK_SIZE, .ist7 = (uint64_t) _exception_stack3 + CONFIG_EXCEPTION_STACK_SIZE,
.iomapb = 0xFFFF, .iomapb = 0xFFFF,
.cpu = &(_kernel.cpus[3]) .cpu = &(_kernel.cpus[3])
}; };
@ -86,7 +86,7 @@ struct x86_cpuboot x86_cpuboot[] = {
{ {
.tr = X86_KERNEL_CPU0_TR, .tr = X86_KERNEL_CPU0_TR,
.gs_base = &tss0, .gs_base = &tss0,
.sp = (u64_t) (z_interrupt_stacks[0] + CONFIG_ISR_STACK_SIZE + .sp = (uint64_t) (z_interrupt_stacks[0] + CONFIG_ISR_STACK_SIZE +
ARCH_THREAD_STACK_RESERVED), ARCH_THREAD_STACK_RESERVED),
.fn = z_x86_prep_c, .fn = z_x86_prep_c,
#ifdef CONFIG_X86_MMU #ifdef CONFIG_X86_MMU
@ -121,10 +121,10 @@ struct x86_cpuboot x86_cpuboot[] = {
void arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz, void arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz,
arch_cpustart_t fn, void *arg) arch_cpustart_t fn, void *arg)
{ {
u8_t vector = ((unsigned long) x86_ap_start) >> 12; uint8_t vector = ((unsigned long) x86_ap_start) >> 12;
u8_t apic_id = x86_cpu_loapics[cpu_num]; uint8_t apic_id = x86_cpu_loapics[cpu_num];
x86_cpuboot[cpu_num].sp = (u64_t) Z_THREAD_STACK_BUFFER(stack) + sz; x86_cpuboot[cpu_num].sp = (uint64_t) Z_THREAD_STACK_BUFFER(stack) + sz;
x86_cpuboot[cpu_num].fn = fn; x86_cpuboot[cpu_num].fn = fn;
x86_cpuboot[cpu_num].arg = arg; x86_cpuboot[cpu_num].arg = arg;
#ifdef CONFIG_X86_MMU #ifdef CONFIG_X86_MMU
@ -151,10 +151,10 @@ FUNC_NORETURN void z_x86_cpu_init(struct x86_cpuboot *cpuboot)
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
/* Set landing site for 'syscall' instruction */ /* Set landing site for 'syscall' instruction */
z_x86_msr_write(X86_LSTAR_MSR, (u64_t)z_x86_syscall_entry_stub); z_x86_msr_write(X86_LSTAR_MSR, (uint64_t)z_x86_syscall_entry_stub);
/* Set segment descriptors for syscall privilege transitions */ /* Set segment descriptors for syscall privilege transitions */
z_x86_msr_write(X86_STAR_MSR, (u64_t)X86_STAR_UPPER << 32); z_x86_msr_write(X86_STAR_MSR, (uint64_t)X86_STAR_UPPER << 32);
/* Mask applied to RFLAGS when making a syscall */ /* Mask applied to RFLAGS when making a syscall */
z_x86_msr_write(X86_FMASK_MSR, EFLAGS_SYSCALL); z_x86_msr_write(X86_FMASK_MSR, EFLAGS_SYSCALL);

View file

@ -70,9 +70,9 @@ static int allocate_vector(unsigned int priority)
*/ */
int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority, int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
void (*func)(void *arg), void *arg, u32_t flags) void (*func)(void *arg), void *arg, uint32_t flags)
{ {
u32_t key; uint32_t key;
int vector; int vector;
__ASSERT(irq <= CONFIG_MAX_IRQ_LINES, "IRQ %u out of range", irq); __ASSERT(irq <= CONFIG_MAX_IRQ_LINES, "IRQ %u out of range", irq);

View file

@ -42,10 +42,10 @@ void z_multiboot_init(struct multiboot_info *info)
if ((info->flags & MULTIBOOT_INFO_FLAGS_MMAP) && if ((info->flags & MULTIBOOT_INFO_FLAGS_MMAP) &&
(x86_memmap_source < X86_MEMMAP_SOURCE_MULTIBOOT_MMAP)) { (x86_memmap_source < X86_MEMMAP_SOURCE_MULTIBOOT_MMAP)) {
u32_t address = info->mmap_addr; uint32_t address = info->mmap_addr;
struct multiboot_mmap *mmap; struct multiboot_mmap *mmap;
int index = 0; int index = 0;
u32_t type; uint32_t type;
while ((address < (info->mmap_addr + info->mmap_length)) && while ((address < (info->mmap_addr + info->mmap_length)) &&
(index < CONFIG_X86_MEMMAP_ENTRIES)) { (index < CONFIG_X86_MEMMAP_ENTRIES)) {
@ -124,9 +124,9 @@ static int multiboot_framebuf_init(struct device *dev)
* the pitch and adjust the start address center our canvas. * the pitch and adjust the start address center our canvas.
*/ */
u16_t adj_x; uint16_t adj_x;
u16_t adj_y; uint16_t adj_y;
u32_t *buffer; uint32_t *buffer;
adj_x = info->fb_width - CONFIG_MULTIBOOT_FRAMEBUF_X; adj_x = info->fb_width - CONFIG_MULTIBOOT_FRAMEBUF_X;
adj_y = info->fb_height - CONFIG_MULTIBOOT_FRAMEBUF_Y; adj_y = info->fb_height - CONFIG_MULTIBOOT_FRAMEBUF_Y;

View file

@ -32,7 +32,7 @@
* Helper function for exported configuration functions. Configuration access * Helper function for exported configuration functions. Configuration access
* ain't atomic, so spinlock to keep drivers from clobbering each other. * ain't atomic, so spinlock to keep drivers from clobbering each other.
*/ */
static void pcie_conf(pcie_bdf_t bdf, unsigned int reg, bool write, u32_t *data) static void pcie_conf(pcie_bdf_t bdf, unsigned int reg, bool write, uint32_t *data)
{ {
static struct k_spinlock lock; static struct k_spinlock lock;
k_spinlock_key_t k; k_spinlock_key_t k;
@ -56,15 +56,15 @@ static void pcie_conf(pcie_bdf_t bdf, unsigned int reg, bool write, u32_t *data)
/* these functions are explained in include/drivers/pcie/pcie.h */ /* these functions are explained in include/drivers/pcie/pcie.h */
u32_t pcie_conf_read(pcie_bdf_t bdf, unsigned int reg) uint32_t pcie_conf_read(pcie_bdf_t bdf, unsigned int reg)
{ {
u32_t data; uint32_t data;
pcie_conf(bdf, reg, false, &data); pcie_conf(bdf, reg, false, &data);
return data; return data;
} }
void pcie_conf_write(pcie_bdf_t bdf, unsigned int reg, u32_t data) void pcie_conf_write(pcie_bdf_t bdf, unsigned int reg, uint32_t data)
{ {
pcie_conf(bdf, reg, true, &data); pcie_conf(bdf, reg, true, &data);
} }
@ -73,13 +73,13 @@ void pcie_conf_write(pcie_bdf_t bdf, unsigned int reg, u32_t data)
/* these functions are explained in include/drivers/pcie/msi.h */ /* these functions are explained in include/drivers/pcie/msi.h */
u32_t pcie_msi_map(unsigned int irq) uint32_t pcie_msi_map(unsigned int irq)
{ {
ARG_UNUSED(irq); ARG_UNUSED(irq);
return 0xFEE00000U; /* standard delivery to BSP local APIC */ return 0xFEE00000U; /* standard delivery to BSP local APIC */
} }
u16_t pcie_msi_mdr(unsigned int irq) uint16_t pcie_msi_mdr(unsigned int irq)
{ {
unsigned char vector = Z_IRQ_TO_INTERRUPT_VECTOR(irq); unsigned char vector = Z_IRQ_TO_INTERRUPT_VECTOR(irq);

View file

@ -22,7 +22,7 @@
static inline void cold_reboot(void) static inline void cold_reboot(void)
{ {
u8_t reset_value = X86_RST_CNT_CPU_RST | X86_RST_CNT_SYS_RST | uint8_t reset_value = X86_RST_CNT_CPU_RST | X86_RST_CNT_SYS_RST |
X86_RST_CNT_FULL_RST; X86_RST_CNT_FULL_RST;
sys_out8(reset_value, X86_RST_CNT_REG); sys_out8(reset_value, X86_RST_CNT_REG);
} }

View file

@ -25,9 +25,9 @@
#define CPUID_SPEC_CTRL_IBRS BIT(26) #define CPUID_SPEC_CTRL_IBRS BIT(26)
#if defined(CONFIG_DISABLE_SSBD) || defined(CONFIG_ENABLE_EXTENDED_IBRS) #if defined(CONFIG_DISABLE_SSBD) || defined(CONFIG_ENABLE_EXTENDED_IBRS)
static u32_t cpuid_extended_features(void) static uint32_t cpuid_extended_features(void)
{ {
u32_t eax, ebx, ecx = 0U, edx; uint32_t eax, ebx, ecx = 0U, edx;
if (__get_cpuid(CPUID_EXTENDED_FEATURES_LVL, if (__get_cpuid(CPUID_EXTENDED_FEATURES_LVL,
&eax, &ebx, &ecx, &edx) == 0) { &eax, &ebx, &ecx, &edx) == 0) {
@ -41,8 +41,8 @@ static int spec_ctrl_init(struct device *dev)
{ {
ARG_UNUSED(dev); ARG_UNUSED(dev);
u32_t enable_bits = 0U; uint32_t enable_bits = 0U;
u32_t cpuid7 = cpuid_extended_features(); uint32_t cpuid7 = cpuid_extended_features();
#ifdef CONFIG_DISABLE_SSBD #ifdef CONFIG_DISABLE_SSBD
if ((cpuid7 & CPUID_SPEC_CTRL_SSBD) != 0U) { if ((cpuid7 & CPUID_SPEC_CTRL_SSBD) != 0U) {
@ -55,7 +55,7 @@ static int spec_ctrl_init(struct device *dev)
} }
#endif #endif
if (enable_bits != 0U) { if (enable_bits != 0U) {
u64_t cur = z_x86_msr_read(X86_SPEC_CTRL_MSR); uint64_t cur = z_x86_msr_read(X86_SPEC_CTRL_MSR);
z_x86_msr_write(X86_SPEC_CTRL_MSR, z_x86_msr_write(X86_SPEC_CTRL_MSR,
cur | enable_bits); cur | enable_bits);

View file

@ -68,7 +68,7 @@ void z_x86_swap_update_page_tables(struct k_thread *incoming)
FUNC_NORETURN static void drop_to_user(k_thread_entry_t user_entry, FUNC_NORETURN static void drop_to_user(k_thread_entry_t user_entry,
void *p1, void *p2, void *p3) void *p1, void *p2, void *p3)
{ {
u32_t stack_end; uint32_t stack_end;
/* Transition will reset stack pointer to initial, discarding /* Transition will reset stack pointer to initial, discarding
* any old context since this is a one-way operation * any old context since this is a one-way operation

View file

@ -76,7 +76,7 @@ MMU_BOOT_REGION(&__kernel_ram_start, &__kernel_ram_size,
*/ */
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
static inline void pml4e_update_pdpt(u64_t *pml4e, struct x86_mmu_pdpt *pdpt) static inline void pml4e_update_pdpt(uint64_t *pml4e, struct x86_mmu_pdpt *pdpt)
{ {
uintptr_t pdpt_addr = (uintptr_t)pdpt; uintptr_t pdpt_addr = (uintptr_t)pdpt;
@ -85,7 +85,7 @@ static inline void pml4e_update_pdpt(u64_t *pml4e, struct x86_mmu_pdpt *pdpt)
} }
#endif /* CONFIG_X86_64 */ #endif /* CONFIG_X86_64 */
static inline void pdpte_update_pd(u64_t *pdpte, struct x86_mmu_pd *pd) static inline void pdpte_update_pd(uint64_t *pdpte, struct x86_mmu_pd *pd)
{ {
uintptr_t pd_addr = (uintptr_t)pd; uintptr_t pd_addr = (uintptr_t)pd;
@ -96,7 +96,7 @@ static inline void pdpte_update_pd(u64_t *pdpte, struct x86_mmu_pd *pd)
(pd_addr & Z_X86_MMU_PDPTE_PD_MASK)); (pd_addr & Z_X86_MMU_PDPTE_PD_MASK));
} }
static inline void pde_update_pt(u64_t *pde, struct x86_mmu_pt *pt) static inline void pde_update_pt(uint64_t *pde, struct x86_mmu_pt *pt)
{ {
uintptr_t pt_addr = (uintptr_t)pt; uintptr_t pt_addr = (uintptr_t)pt;
@ -106,7 +106,7 @@ static inline void pde_update_pt(u64_t *pde, struct x86_mmu_pt *pt)
(pt_addr & Z_X86_MMU_PDE_PT_MASK)); (pt_addr & Z_X86_MMU_PDE_PT_MASK));
} }
static inline void pte_update_addr(u64_t *pte, uintptr_t addr) static inline void pte_update_addr(uint64_t *pte, uintptr_t addr)
{ {
*pte = ((*pte & ~Z_X86_MMU_PTE_ADDR_MASK) | *pte = ((*pte & ~Z_X86_MMU_PTE_ADDR_MASK) |
(addr & Z_X86_MMU_PTE_ADDR_MASK)); (addr & Z_X86_MMU_PTE_ADDR_MASK));
@ -121,7 +121,7 @@ static inline void pte_update_addr(u64_t *pte, uintptr_t addr)
* Not trying to capture every flag, just the most interesting stuff, * Not trying to capture every flag, just the most interesting stuff,
* Present, write, XD, user, in typically encountered combinations. * Present, write, XD, user, in typically encountered combinations.
*/ */
static bool dump_entry_flags(const char *name, u64_t flags) static bool dump_entry_flags(const char *name, uint64_t flags)
{ {
if ((flags & Z_X86_MMU_P) == 0) { if ((flags & Z_X86_MMU_P) == 0) {
LOG_ERR("%s: Non-present", name); LOG_ERR("%s: Non-present", name);
@ -140,7 +140,7 @@ static bool dump_entry_flags(const char *name, u64_t flags)
void z_x86_dump_mmu_flags(struct x86_page_tables *ptables, uintptr_t addr) void z_x86_dump_mmu_flags(struct x86_page_tables *ptables, uintptr_t addr)
{ {
u64_t entry; uint64_t entry;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
entry = *z_x86_get_pml4e(ptables, addr); entry = *z_x86_get_pml4e(ptables, addr);
@ -174,7 +174,7 @@ void z_x86_dump_mmu_flags(struct x86_page_tables *ptables, uintptr_t addr)
} }
} }
static char get_entry_code(u64_t value) static char get_entry_code(uint64_t value)
{ {
char ret; char ret;
@ -209,7 +209,7 @@ static char get_entry_code(u64_t value)
return ret; return ret;
} }
static void print_entries(u64_t entries_array[], size_t count) static void print_entries(uint64_t entries_array[], size_t count)
{ {
int column = 0; int column = 0;
@ -245,7 +245,7 @@ static void z_x86_dump_pd(struct x86_mmu_pd *pd, uintptr_t base, int index)
for (int i = 0; i < Z_X86_NUM_PD_ENTRIES; i++) { for (int i = 0; i < Z_X86_NUM_PD_ENTRIES; i++) {
struct x86_mmu_pt *pt; struct x86_mmu_pt *pt;
u64_t pde = pd->entry[i]; uint64_t pde = pd->entry[i];
if (((pde & Z_X86_MMU_P) == 0) || ((pde & Z_X86_MMU_PS) != 0)) { if (((pde & Z_X86_MMU_P) == 0) || ((pde & Z_X86_MMU_PS) != 0)) {
/* Skip non-present, or 2MB directory entries, there's /* Skip non-present, or 2MB directory entries, there's
@ -268,7 +268,7 @@ static void z_x86_dump_pdpt(struct x86_mmu_pdpt *pdpt, uintptr_t base,
for (int i = 0; i < Z_X86_NUM_PDPT_ENTRIES; i++) { for (int i = 0; i < Z_X86_NUM_PDPT_ENTRIES; i++) {
struct x86_mmu_pd *pd; struct x86_mmu_pd *pd;
u64_t pdpte = pdpt->entry[i]; uint64_t pdpte = pdpt->entry[i];
if ((pdpte & Z_X86_MMU_P) == 0) { if ((pdpte & Z_X86_MMU_P) == 0) {
continue; continue;
@ -292,7 +292,7 @@ static void z_x86_dump_pml4(struct x86_mmu_pml4 *pml4)
for (int i = 0; i < Z_X86_NUM_PML4_ENTRIES; i++) { for (int i = 0; i < Z_X86_NUM_PML4_ENTRIES; i++) {
struct x86_mmu_pdpt *pdpt; struct x86_mmu_pdpt *pdpt;
u64_t pml4e = pml4->entry[i]; uint64_t pml4e = pml4->entry[i];
if ((pml4e & Z_X86_MMU_P) == 0) { if ((pml4e & Z_X86_MMU_P) == 0) {
continue; continue;
@ -316,7 +316,7 @@ void z_x86_dump_page_tables(struct x86_page_tables *ptables)
#endif #endif
void z_x86_mmu_get_flags(struct x86_page_tables *ptables, void *addr, void z_x86_mmu_get_flags(struct x86_page_tables *ptables, void *addr,
u64_t *pde_flags, u64_t *pte_flags) uint64_t *pde_flags, uint64_t *pte_flags)
{ {
*pde_flags = *z_x86_get_pde(ptables, (uintptr_t)addr) & *pde_flags = *z_x86_get_pde(ptables, (uintptr_t)addr) &
~Z_X86_MMU_PDE_PT_MASK; ~Z_X86_MMU_PDE_PT_MASK;
@ -358,7 +358,7 @@ static int x86_mmu_validate_pt(struct x86_mmu_pt *pt, uintptr_t addr,
int ret = 0; int ret = 0;
while (true) { while (true) {
u64_t pte = *z_x86_pt_get_pte(pt, pos); uint64_t pte = *z_x86_pt_get_pte(pt, pos);
if ((pte & Z_X86_MMU_P) == 0 || (pte & Z_X86_MMU_US) == 0 || if ((pte & Z_X86_MMU_P) == 0 || (pte & Z_X86_MMU_US) == 0 ||
(write && (pte & Z_X86_MMU_RW) == 0)) { (write && (pte & Z_X86_MMU_RW) == 0)) {
@ -387,7 +387,7 @@ static int x86_mmu_validate_pd(struct x86_mmu_pd *pd, uintptr_t addr,
size_t to_examine; size_t to_examine;
while (remaining) { while (remaining) {
u64_t pde = *z_x86_pd_get_pde(pd, pos); uint64_t pde = *z_x86_pd_get_pde(pd, pos);
if ((pde & Z_X86_MMU_P) == 0 || (pde & Z_X86_MMU_US) == 0 || if ((pde & Z_X86_MMU_P) == 0 || (pde & Z_X86_MMU_US) == 0 ||
(write && (pde & Z_X86_MMU_RW) == 0)) { (write && (pde & Z_X86_MMU_RW) == 0)) {
@ -429,7 +429,7 @@ static int x86_mmu_validate_pdpt(struct x86_mmu_pdpt *pdpt, uintptr_t addr,
size_t to_examine; size_t to_examine;
while (remaining) { while (remaining) {
u64_t pdpte = *z_x86_pdpt_get_pdpte(pdpt, pos); uint64_t pdpte = *z_x86_pdpt_get_pdpte(pdpt, pos);
if ((pdpte & Z_X86_MMU_P) == 0) { if ((pdpte & Z_X86_MMU_P) == 0) {
/* Non-present */ /* Non-present */
@ -478,7 +478,7 @@ static int x86_mmu_validate_pml4(struct x86_mmu_pml4 *pml4, uintptr_t addr,
size_t to_examine; size_t to_examine;
while (remaining) { while (remaining) {
u64_t pml4e = *z_x86_pml4_get_pml4e(pml4, pos); uint64_t pml4e = *z_x86_pml4_get_pml4e(pml4, pos);
struct x86_mmu_pdpt *pdpt; struct x86_mmu_pdpt *pdpt;
if ((pml4e & Z_X86_MMU_P) == 0 || (pml4e & Z_X86_MMU_US) == 0 || if ((pml4e & Z_X86_MMU_P) == 0 || (pml4e & Z_X86_MMU_US) == 0 ||
@ -553,7 +553,7 @@ static inline void tlb_flush_page(void *addr)
Z_X86_MMU_PCD) Z_X86_MMU_PCD)
void z_x86_mmu_set_flags(struct x86_page_tables *ptables, void *ptr, void z_x86_mmu_set_flags(struct x86_page_tables *ptables, void *ptr,
size_t size, u64_t flags, u64_t mask, bool flush) size_t size, uint64_t flags, uint64_t mask, bool flush)
{ {
uintptr_t addr = (uintptr_t)ptr; uintptr_t addr = (uintptr_t)ptr;
@ -572,13 +572,13 @@ void z_x86_mmu_set_flags(struct x86_page_tables *ptables, void *ptr,
* modified. * modified.
*/ */
while (size != 0) { while (size != 0) {
u64_t *pte; uint64_t *pte;
u64_t *pde; uint64_t *pde;
u64_t *pdpte; uint64_t *pdpte;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
u64_t *pml4e; uint64_t *pml4e;
#endif #endif
u64_t cur_flags = flags; uint64_t cur_flags = flags;
bool exec = (flags & Z_X86_MMU_XD) == 0; bool exec = (flags & Z_X86_MMU_XD) == 0;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
@ -678,7 +678,7 @@ static inline bool is_within_system_ram(uintptr_t addr)
/* Ignored bit posiition at all levels */ /* Ignored bit posiition at all levels */
#define IGNORED BIT64(11) #define IGNORED BIT64(11)
static void maybe_clear_xd(u64_t *entry, bool exec) static void maybe_clear_xd(uint64_t *entry, bool exec)
{ {
/* Execute disable bit needs special handling, we should only set it at /* Execute disable bit needs special handling, we should only set it at
* intermediate levels if ALL containing pages have XD set (instead of * intermediate levels if ALL containing pages have XD set (instead of
@ -696,17 +696,17 @@ static void maybe_clear_xd(u64_t *entry, bool exec)
} }
static void add_mmu_region_page(struct x86_page_tables *ptables, static void add_mmu_region_page(struct x86_page_tables *ptables,
uintptr_t addr, u64_t flags, bool user_table) uintptr_t addr, uint64_t flags, bool user_table)
{ {
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
u64_t *pml4e; uint64_t *pml4e;
#endif #endif
struct x86_mmu_pdpt *pdpt; struct x86_mmu_pdpt *pdpt;
u64_t *pdpte; uint64_t *pdpte;
struct x86_mmu_pd *pd; struct x86_mmu_pd *pd;
u64_t *pde; uint64_t *pde;
struct x86_mmu_pt *pt; struct x86_mmu_pt *pt;
u64_t *pte; uint64_t *pte;
bool exec = (flags & Z_X86_MMU_XD) == 0; bool exec = (flags & Z_X86_MMU_XD) == 0;
#ifdef CONFIG_X86_KPTI #ifdef CONFIG_X86_KPTI
@ -795,7 +795,7 @@ static void add_mmu_region(struct x86_page_tables *ptables,
bool user_table) bool user_table)
{ {
size_t size; size_t size;
u64_t flags; uint64_t flags;
uintptr_t addr; uintptr_t addr;
__ASSERT((rgn->address & MMU_PAGE_MASK) == 0U, __ASSERT((rgn->address & MMU_PAGE_MASK) == 0U,
@ -818,7 +818,7 @@ static void add_mmu_region(struct x86_page_tables *ptables,
} }
void z_x86_add_mmu_region(uintptr_t addr, size_t size, u64_t flags) void z_x86_add_mmu_region(uintptr_t addr, size_t size, uint64_t flags)
{ {
struct mmu_region rgn = { struct mmu_region rgn = {
.address = addr, .address = addr,
@ -883,7 +883,7 @@ static uintptr_t thread_pdpt_create(uintptr_t pages,
uintptr_t pos = pages, phys_addr = Z_X86_PDPT_START; uintptr_t pos = pages, phys_addr = Z_X86_PDPT_START;
for (int i = 0; i < Z_X86_NUM_PDPT; i++, phys_addr += Z_X86_PDPT_AREA) { for (int i = 0; i < Z_X86_NUM_PDPT; i++, phys_addr += Z_X86_PDPT_AREA) {
u64_t *pml4e; uint64_t *pml4e;
struct x86_mmu_pdpt *master_pdpt, *dest_pdpt; struct x86_mmu_pdpt *master_pdpt, *dest_pdpt;
/* obtain master PDPT tables for the address range and copy /* obtain master PDPT tables for the address range and copy
@ -911,7 +911,7 @@ static uintptr_t thread_pd_create(uintptr_t pages,
uintptr_t pos = pages, phys_addr = Z_X86_PD_START; uintptr_t pos = pages, phys_addr = Z_X86_PD_START;
for (int i = 0; i < Z_X86_NUM_PD; i++, phys_addr += Z_X86_PD_AREA) { for (int i = 0; i < Z_X86_NUM_PD; i++, phys_addr += Z_X86_PD_AREA) {
u64_t *pdpte; uint64_t *pdpte;
struct x86_mmu_pd *master_pd, *dest_pd; struct x86_mmu_pd *master_pd, *dest_pd;
/* Obtain PD in master tables for the address range and copy /* Obtain PD in master tables for the address range and copy
@ -941,7 +941,7 @@ static uintptr_t thread_pt_create(uintptr_t pages,
uintptr_t pos = pages, phys_addr = Z_X86_PT_START; uintptr_t pos = pages, phys_addr = Z_X86_PT_START;
for (int i = 0; i < Z_X86_NUM_PT; i++, phys_addr += Z_X86_PT_AREA) { for (int i = 0; i < Z_X86_NUM_PT; i++, phys_addr += Z_X86_PT_AREA) {
u64_t *pde; uint64_t *pde;
struct x86_mmu_pt *master_pt, *dest_pt; struct x86_mmu_pt *master_pt, *dest_pt;
/* Same as we did with the directories, obtain PT in master /* Same as we did with the directories, obtain PT in master
@ -1067,7 +1067,7 @@ static void reset_mem_partition(struct x86_page_tables *thread_ptables,
__ASSERT((size & MMU_PAGE_MASK) == 0U, "unaligned size provided"); __ASSERT((size & MMU_PAGE_MASK) == 0U, "unaligned size provided");
while (size != 0) { while (size != 0) {
u64_t *thread_pte, *master_pte; uint64_t *thread_pte, *master_pte;
thread_pte = z_x86_get_pte(thread_ptables, addr); thread_pte = z_x86_get_pte(thread_ptables, addr);
master_pte = z_x86_get_pte(&USER_PTABLES, addr); master_pte = z_x86_get_pte(&USER_PTABLES, addr);
@ -1082,8 +1082,8 @@ static void reset_mem_partition(struct x86_page_tables *thread_ptables,
static void apply_mem_partition(struct x86_page_tables *ptables, static void apply_mem_partition(struct x86_page_tables *ptables,
struct k_mem_partition *partition) struct k_mem_partition *partition)
{ {
u64_t x86_attr; uint64_t x86_attr;
u64_t mask; uint64_t mask;
if (IS_ENABLED(CONFIG_X86_KPTI)) { if (IS_ENABLED(CONFIG_X86_KPTI)) {
x86_attr = partition->attr | Z_X86_MMU_P; x86_attr = partition->attr | Z_X86_MMU_P;
@ -1171,7 +1171,7 @@ void z_x86_thread_pt_init(struct k_thread *thread)
* configuration applied. * configuration applied.
*/ */
void arch_mem_domain_partition_remove(struct k_mem_domain *domain, void arch_mem_domain_partition_remove(struct k_mem_domain *domain,
u32_t partition_id) uint32_t partition_id)
{ {
sys_dnode_t *node, *next_node; sys_dnode_t *node, *next_node;
@ -1230,7 +1230,7 @@ void arch_mem_domain_thread_remove(struct k_thread *thread)
} }
void arch_mem_domain_partition_add(struct k_mem_domain *domain, void arch_mem_domain_partition_add(struct k_mem_domain *domain,
u32_t partition_id) uint32_t partition_id)
{ {
sys_dnode_t *node, *next_node; sys_dnode_t *node, *next_node;

View file

@ -20,9 +20,9 @@ extern char _locore_start[], _locore_end[];
struct x86_cpuboot { struct x86_cpuboot {
volatile int ready; /* CPU has started */ volatile int ready; /* CPU has started */
u16_t tr; /* selector for task register */ uint16_t tr; /* selector for task register */
struct x86_tss64 *gs_base; /* Base address for GS segment */ struct x86_tss64 *gs_base; /* Base address for GS segment */
u64_t sp; /* initial stack pointer */ uint64_t sp; /* initial stack pointer */
arch_cpustart_t fn; /* kernel entry function */ arch_cpustart_t fn; /* kernel entry function */
void *arg; /* argument for above function */ void *arg; /* argument for above function */
#ifdef CONFIG_X86_MMU #ifdef CONFIG_X86_MMU
@ -32,7 +32,7 @@ struct x86_cpuboot {
typedef struct x86_cpuboot x86_cpuboot_t; typedef struct x86_cpuboot x86_cpuboot_t;
extern u8_t x86_cpu_loapics[]; /* CPU logical ID -> local APIC ID */ extern uint8_t x86_cpu_loapics[]; /* CPU logical ID -> local APIC ID */
#endif /* _ASMLANGUAGE */ #endif /* _ASMLANGUAGE */

View file

@ -87,7 +87,7 @@ void z_x86_page_fault_handler(z_arch_esf_t *esf);
* @param cs Code segment of faulting context * @param cs Code segment of faulting context
* @return true if addr/size region is not within the thread stack * @return true if addr/size region is not within the thread stack
*/ */
bool z_x86_check_stack_bounds(uintptr_t addr, size_t size, u16_t cs); bool z_x86_check_stack_bounds(uintptr_t addr, size_t size, uint16_t cs);
#endif /* CONFIG_THREAD_STACK_INFO */ #endif /* CONFIG_THREAD_STACK_INFO */
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE

View file

@ -27,7 +27,7 @@
* @return N/A * @return N/A
*/ */
void z_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags) void z_irq_priority_set(unsigned int irq, unsigned int prio, uint32_t flags)
{ {
__ASSERT(prio < XCHAL_EXCM_LEVEL + 1, __ASSERT(prio < XCHAL_EXCM_LEVEL + 1,
"invalid priority %d! values must be less than %d\n", "invalid priority %d! values must be less than %d\n",
@ -41,7 +41,7 @@ void z_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags)
#ifndef CONFIG_MULTI_LEVEL_INTERRUPTS #ifndef CONFIG_MULTI_LEVEL_INTERRUPTS
int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority, int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
void (*routine)(void *parameter), void (*routine)(void *parameter),
void *parameter, u32_t flags) void *parameter, uint32_t flags)
{ {
ARG_UNUSED(flags); ARG_UNUSED(flags);
ARG_UNUSED(priority); ARG_UNUSED(priority);
@ -52,7 +52,7 @@ int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
#else /* !CONFIG_MULTI_LEVEL_INTERRUPTS */ #else /* !CONFIG_MULTI_LEVEL_INTERRUPTS */
int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority, int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
void (*routine)(void *parameter), void (*routine)(void *parameter),
void *parameter, u32_t flags) void *parameter, uint32_t flags)
{ {
return z_soc_irq_connect_dynamic(irq, priority, routine, parameter, return z_soc_irq_connect_dynamic(irq, priority, routine, parameter,
flags); flags);

View file

@ -139,7 +139,7 @@ static inline unsigned int get_bits(int offset, int num_bits, unsigned int val)
#define DEF_INT_C_HANDLER(l) \ #define DEF_INT_C_HANDLER(l) \
void *xtensa_int##l##_c(void *interrupted_stack) \ void *xtensa_int##l##_c(void *interrupted_stack) \
{ \ { \
u32_t irqs, intenable, m; \ uint32_t irqs, intenable, m; \
__asm__ volatile("rsr.interrupt %0" : "=r"(irqs)); \ __asm__ volatile("rsr.interrupt %0" : "=r"(irqs)); \
__asm__ volatile("rsr.intenable %0" : "=r"(intenable)); \ __asm__ volatile("rsr.intenable %0" : "=r"(intenable)); \
irqs &= intenable; \ irqs &= intenable; \
@ -188,7 +188,7 @@ void *xtensa_excint1_c(int *interrupted_stack)
bsa[BSA_PC_OFF/4] += 3; bsa[BSA_PC_OFF/4] += 3;
} else { } else {
u32_t ps = bsa[BSA_PS_OFF/4]; uint32_t ps = bsa[BSA_PS_OFF/4];
__asm__ volatile("rsr.excvaddr %0" : "=r"(vaddr)); __asm__ volatile("rsr.excvaddr %0" : "=r"(vaddr));
@ -219,7 +219,7 @@ void *xtensa_excint1_c(int *interrupted_stack)
int z_xtensa_irq_is_enabled(unsigned int irq) int z_xtensa_irq_is_enabled(unsigned int irq)
{ {
u32_t ie; uint32_t ie;
__asm__ volatile("rsr.intenable %0" : "=r"(ie)); __asm__ volatile("rsr.intenable %0" : "=r"(ie));

View file

@ -126,7 +126,7 @@
static int pmod_mux_init(struct device *device) static int pmod_mux_init(struct device *device)
{ {
volatile u32_t *mux_regs = (u32_t *)(PMODMUX_BASE_ADDR); volatile uint32_t *mux_regs = (uint32_t *)(PMODMUX_BASE_ADDR);
mux_regs[SPI_MAP_CTRL] = SPI_MAP_CTRL_DEFAULT; mux_regs[SPI_MAP_CTRL] = SPI_MAP_CTRL_DEFAULT;
mux_regs[UART_MAP_CTRL] = UART_MAP_CTRL_DEFAULT; mux_regs[UART_MAP_CTRL] = UART_MAP_CTRL_DEFAULT;

View file

@ -25,7 +25,7 @@ static int lpcxpresso_54114_pinmux_init(struct device *dev)
#if DT_NODE_HAS_COMPAT_STATUS(DT_NODELABEL(flexcomm0), nxp_lpc_usart, okay) && CONFIG_SERIAL #if DT_NODE_HAS_COMPAT_STATUS(DT_NODELABEL(flexcomm0), nxp_lpc_usart, okay) && CONFIG_SERIAL
/* USART0 RX, TX */ /* USART0 RX, TX */
const u32_t port0_pin0_config = ( const uint32_t port0_pin0_config = (
IOCON_PIO_FUNC1 | IOCON_PIO_FUNC1 |
IOCON_PIO_MODE_INACT | IOCON_PIO_MODE_INACT |
IOCON_PIO_INV_DI | IOCON_PIO_INV_DI |
@ -35,7 +35,7 @@ static int lpcxpresso_54114_pinmux_init(struct device *dev)
IOCON_PIO_OPENDRAIN_DI IOCON_PIO_OPENDRAIN_DI
); );
const u32_t port0_pin1_config = ( const uint32_t port0_pin1_config = (
IOCON_PIO_FUNC1 | IOCON_PIO_FUNC1 |
IOCON_PIO_MODE_INACT | IOCON_PIO_MODE_INACT |
IOCON_PIO_INV_DI | IOCON_PIO_INV_DI |
@ -51,7 +51,7 @@ static int lpcxpresso_54114_pinmux_init(struct device *dev)
#endif #endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(gpio0), okay) #if DT_NODE_HAS_STATUS(DT_NODELABEL(gpio0), okay)
const u32_t port0_pin29_config = ( const uint32_t port0_pin29_config = (
IOCON_PIO_FUNC0 | IOCON_PIO_FUNC0 |
IOCON_PIO_MODE_PULLUP | IOCON_PIO_MODE_PULLUP |
IOCON_PIO_INV_DI | IOCON_PIO_INV_DI |
@ -62,7 +62,7 @@ static int lpcxpresso_54114_pinmux_init(struct device *dev)
pinmux_pin_set(port0, 29, port0_pin29_config); pinmux_pin_set(port0, 29, port0_pin29_config);
const u32_t port0_pin24_config = ( const uint32_t port0_pin24_config = (
IOCON_PIO_FUNC0 | IOCON_PIO_FUNC0 |
IOCON_PIO_INV_DI | IOCON_PIO_INV_DI |
IOCON_PIO_DIGITAL_EN | IOCON_PIO_DIGITAL_EN |
@ -71,7 +71,7 @@ static int lpcxpresso_54114_pinmux_init(struct device *dev)
); );
pinmux_pin_set(port0, 24, port0_pin24_config); pinmux_pin_set(port0, 24, port0_pin24_config);
const u32_t port0_pin31_config = ( const uint32_t port0_pin31_config = (
IOCON_PIO_FUNC0 | IOCON_PIO_FUNC0 |
IOCON_PIO_MODE_PULLUP | IOCON_PIO_MODE_PULLUP |
IOCON_PIO_INV_DI | IOCON_PIO_INV_DI |
@ -81,7 +81,7 @@ static int lpcxpresso_54114_pinmux_init(struct device *dev)
); );
pinmux_pin_set(port0, 31, port0_pin31_config); pinmux_pin_set(port0, 31, port0_pin31_config);
const u32_t port0_pin4_config = ( const uint32_t port0_pin4_config = (
IOCON_PIO_FUNC0 | IOCON_PIO_FUNC0 |
IOCON_PIO_MODE_PULLUP | IOCON_PIO_MODE_PULLUP |
IOCON_PIO_INV_DI | IOCON_PIO_INV_DI |
@ -94,7 +94,7 @@ static int lpcxpresso_54114_pinmux_init(struct device *dev)
#endif #endif
#if DT_NODE_HAS_STATUS(DT_NODELABEL(gpio1), okay) #if DT_NODE_HAS_STATUS(DT_NODELABEL(gpio1), okay)
const u32_t port1_pin10_config = ( const uint32_t port1_pin10_config = (
IOCON_PIO_FUNC0 | IOCON_PIO_FUNC0 |
IOCON_PIO_MODE_PULLUP | IOCON_PIO_MODE_PULLUP |
IOCON_PIO_INV_DI | IOCON_PIO_INV_DI |

View file

@ -25,7 +25,7 @@ static int lpcxpresso_55s16_pinmux_init(struct device *dev)
#if DT_PHA_HAS_CELL(DT_ALIAS(sw0), gpios, pin) #if DT_PHA_HAS_CELL(DT_ALIAS(sw0), gpios, pin)
/* Wakeup button */ /* Wakeup button */
const u32_t sw0_config = ( const uint32_t sw0_config = (
IOCON_PIO_FUNC0 | IOCON_PIO_FUNC0 |
IOCON_PIO_INV_DI | IOCON_PIO_INV_DI |
IOCON_PIO_DIGITAL_EN | IOCON_PIO_DIGITAL_EN |
@ -37,7 +37,7 @@ static int lpcxpresso_55s16_pinmux_init(struct device *dev)
#if DT_PHA_HAS_CELL(DT_ALIAS(sw1), gpios, pin) #if DT_PHA_HAS_CELL(DT_ALIAS(sw1), gpios, pin)
/* USR button */ /* USR button */
const u32_t sw1_config = ( const uint32_t sw1_config = (
IOCON_PIO_FUNC0 | IOCON_PIO_FUNC0 |
IOCON_PIO_INV_DI | IOCON_PIO_INV_DI |
IOCON_PIO_DIGITAL_EN | IOCON_PIO_DIGITAL_EN |
@ -49,7 +49,7 @@ static int lpcxpresso_55s16_pinmux_init(struct device *dev)
#if DT_PHA_HAS_CELL(DT_ALIAS(sw2), gpios, pin) #if DT_PHA_HAS_CELL(DT_ALIAS(sw2), gpios, pin)
/* ISP button */ /* ISP button */
const u32_t sw2_config = ( const uint32_t sw2_config = (
IOCON_PIO_FUNC0 | IOCON_PIO_FUNC0 |
IOCON_PIO_INV_DI | IOCON_PIO_INV_DI |
IOCON_PIO_DIGITAL_EN | IOCON_PIO_DIGITAL_EN |
@ -61,7 +61,7 @@ static int lpcxpresso_55s16_pinmux_init(struct device *dev)
#if DT_PHA_HAS_CELL(DT_ALIAS(led0), gpios, pin) #if DT_PHA_HAS_CELL(DT_ALIAS(led0), gpios, pin)
/* Red LED */ /* Red LED */
const u32_t led0_config = ( const uint32_t led0_config = (
IOCON_PIO_FUNC0 | IOCON_PIO_FUNC0 |
IOCON_PIO_INV_DI | IOCON_PIO_INV_DI |
IOCON_PIO_DIGITAL_EN | IOCON_PIO_DIGITAL_EN |
@ -73,7 +73,7 @@ static int lpcxpresso_55s16_pinmux_init(struct device *dev)
#if DT_PHA_HAS_CELL(DT_ALIAS(led1), gpios, pin) #if DT_PHA_HAS_CELL(DT_ALIAS(led1), gpios, pin)
/* Green LED */ /* Green LED */
const u32_t led1_config = ( const uint32_t led1_config = (
IOCON_PIO_FUNC0 | IOCON_PIO_FUNC0 |
IOCON_PIO_INV_DI | IOCON_PIO_INV_DI |
IOCON_PIO_DIGITAL_EN | IOCON_PIO_DIGITAL_EN |
@ -85,7 +85,7 @@ static int lpcxpresso_55s16_pinmux_init(struct device *dev)
#if DT_PHA_HAS_CELL(DT_ALIAS(led2), gpios, pin) #if DT_PHA_HAS_CELL(DT_ALIAS(led2), gpios, pin)
/* Blue LED */ /* Blue LED */
const u32_t led2_config = ( const uint32_t led2_config = (
IOCON_PIO_FUNC0 | IOCON_PIO_FUNC0 |
IOCON_PIO_INV_DI | IOCON_PIO_INV_DI |
IOCON_PIO_DIGITAL_EN | IOCON_PIO_DIGITAL_EN |
@ -97,7 +97,7 @@ static int lpcxpresso_55s16_pinmux_init(struct device *dev)
#if DT_NODE_HAS_COMPAT_STATUS(DT_NODELABEL(flexcomm0), nxp_lpc_usart, okay) && CONFIG_SERIAL #if DT_NODE_HAS_COMPAT_STATUS(DT_NODELABEL(flexcomm0), nxp_lpc_usart, okay) && CONFIG_SERIAL
/* USART0 RX, TX */ /* USART0 RX, TX */
const u32_t port0_pin29_config = ( const uint32_t port0_pin29_config = (
IOCON_PIO_FUNC1 | IOCON_PIO_FUNC1 |
IOCON_PIO_MODE_INACT | IOCON_PIO_MODE_INACT |
IOCON_PIO_INV_DI | IOCON_PIO_INV_DI |
@ -105,7 +105,7 @@ static int lpcxpresso_55s16_pinmux_init(struct device *dev)
IOCON_PIO_SLEW_STANDARD | IOCON_PIO_SLEW_STANDARD |
IOCON_PIO_OPENDRAIN_DI IOCON_PIO_OPENDRAIN_DI
); );
const u32_t port0_pin30_config = ( const uint32_t port0_pin30_config = (
IOCON_PIO_FUNC1 | IOCON_PIO_FUNC1 |
IOCON_PIO_MODE_INACT | IOCON_PIO_MODE_INACT |
IOCON_PIO_INV_DI | IOCON_PIO_INV_DI |

View file

@ -25,7 +25,7 @@ static int lpcxpresso_55s69_pinmux_init(struct device *dev)
#if DT_NODE_HAS_COMPAT_STATUS(DT_NODELABEL(flexcomm0), nxp_lpc_usart, okay) && CONFIG_SERIAL #if DT_NODE_HAS_COMPAT_STATUS(DT_NODELABEL(flexcomm0), nxp_lpc_usart, okay) && CONFIG_SERIAL
/* USART0 RX, TX */ /* USART0 RX, TX */
const u32_t port0_pin29_config = ( const uint32_t port0_pin29_config = (
IOCON_PIO_FUNC1 | IOCON_PIO_FUNC1 |
IOCON_PIO_MODE_INACT | IOCON_PIO_MODE_INACT |
IOCON_PIO_INV_DI | IOCON_PIO_INV_DI |
@ -34,7 +34,7 @@ static int lpcxpresso_55s69_pinmux_init(struct device *dev)
IOCON_PIO_OPENDRAIN_DI IOCON_PIO_OPENDRAIN_DI
); );
const u32_t port0_pin30_config = ( const uint32_t port0_pin30_config = (
IOCON_PIO_FUNC1 | IOCON_PIO_FUNC1 |
IOCON_PIO_MODE_INACT | IOCON_PIO_MODE_INACT |
IOCON_PIO_INV_DI | IOCON_PIO_INV_DI |
@ -49,7 +49,7 @@ static int lpcxpresso_55s69_pinmux_init(struct device *dev)
#endif #endif
#if DT_PHA_HAS_CELL(DT_ALIAS(sw0), gpios, pin) #if DT_PHA_HAS_CELL(DT_ALIAS(sw0), gpios, pin)
const u32_t sw0_config = ( const uint32_t sw0_config = (
IOCON_PIO_FUNC0 | IOCON_PIO_FUNC0 |
IOCON_PIO_MODE_PULLUP | IOCON_PIO_MODE_PULLUP |
IOCON_PIO_INV_DI | IOCON_PIO_INV_DI |
@ -63,7 +63,7 @@ static int lpcxpresso_55s69_pinmux_init(struct device *dev)
#if DT_PHA_HAS_CELL(DT_ALIAS(sw1), gpios, pin) #if DT_PHA_HAS_CELL(DT_ALIAS(sw1), gpios, pin)
const u32_t sw1_config = ( const uint32_t sw1_config = (
IOCON_PIO_FUNC0 | IOCON_PIO_FUNC0 |
IOCON_PIO_MODE_PULLUP | IOCON_PIO_MODE_PULLUP |
IOCON_PIO_INV_DI | IOCON_PIO_INV_DI |
@ -76,7 +76,7 @@ static int lpcxpresso_55s69_pinmux_init(struct device *dev)
#endif #endif
#if DT_PHA_HAS_CELL(DT_ALIAS(sw2), gpios, pin) #if DT_PHA_HAS_CELL(DT_ALIAS(sw2), gpios, pin)
const u32_t sw2_config = ( const uint32_t sw2_config = (
IOCON_PIO_FUNC0 | IOCON_PIO_FUNC0 |
IOCON_PIO_MODE_PULLUP | IOCON_PIO_MODE_PULLUP |
IOCON_PIO_INV_DI | IOCON_PIO_INV_DI |

View file

@ -29,17 +29,17 @@ static gpio_pin_config_t enet_gpio_config = {
*/ */
static void mimxrt1050_evk_usdhc_pinmux( static void mimxrt1050_evk_usdhc_pinmux(
u16_t nusdhc, bool init, uint16_t nusdhc, bool init,
u32_t speed, u32_t strength) uint32_t speed, uint32_t strength)
{ {
u32_t cmd_data = IOMUXC_SW_PAD_CTL_PAD_SPEED(speed) | uint32_t cmd_data = IOMUXC_SW_PAD_CTL_PAD_SPEED(speed) |
IOMUXC_SW_PAD_CTL_PAD_SRE_MASK | IOMUXC_SW_PAD_CTL_PAD_SRE_MASK |
IOMUXC_SW_PAD_CTL_PAD_PKE_MASK | IOMUXC_SW_PAD_CTL_PAD_PKE_MASK |
IOMUXC_SW_PAD_CTL_PAD_PUE_MASK | IOMUXC_SW_PAD_CTL_PAD_PUE_MASK |
IOMUXC_SW_PAD_CTL_PAD_HYS_MASK | IOMUXC_SW_PAD_CTL_PAD_HYS_MASK |
IOMUXC_SW_PAD_CTL_PAD_PUS(1) | IOMUXC_SW_PAD_CTL_PAD_PUS(1) |
IOMUXC_SW_PAD_CTL_PAD_DSE(strength); IOMUXC_SW_PAD_CTL_PAD_DSE(strength);
u32_t clk = IOMUXC_SW_PAD_CTL_PAD_SPEED(speed) | uint32_t clk = IOMUXC_SW_PAD_CTL_PAD_SPEED(speed) |
IOMUXC_SW_PAD_CTL_PAD_SRE_MASK | IOMUXC_SW_PAD_CTL_PAD_SRE_MASK |
IOMUXC_SW_PAD_CTL_PAD_HYS_MASK | IOMUXC_SW_PAD_CTL_PAD_HYS_MASK |
IOMUXC_SW_PAD_CTL_PAD_PUS(0) | IOMUXC_SW_PAD_CTL_PAD_PUS(0) |

View file

@ -31,10 +31,10 @@ static gpio_pin_config_t enet_gpio_config = {
*Hyst. Enable Field: Hysteresis Enabled. *Hyst. Enable Field: Hysteresis Enabled.
*/ */
static void mimxrt1060_evk_usdhc_pinmux(u16_t nusdhc, bool init, u32_t speed, static void mimxrt1060_evk_usdhc_pinmux(uint16_t nusdhc, bool init, uint32_t speed,
u32_t strength) uint32_t strength)
{ {
u32_t cmd_data = IOMUXC_SW_PAD_CTL_PAD_SPEED(speed) | uint32_t cmd_data = IOMUXC_SW_PAD_CTL_PAD_SPEED(speed) |
IOMUXC_SW_PAD_CTL_PAD_SRE_MASK | IOMUXC_SW_PAD_CTL_PAD_SRE_MASK |
IOMUXC_SW_PAD_CTL_PAD_PKE_MASK | IOMUXC_SW_PAD_CTL_PAD_PKE_MASK |
IOMUXC_SW_PAD_CTL_PAD_PUE_MASK | IOMUXC_SW_PAD_CTL_PAD_PUE_MASK |
@ -42,7 +42,7 @@ static void mimxrt1060_evk_usdhc_pinmux(u16_t nusdhc, bool init, u32_t speed,
IOMUXC_SW_PAD_CTL_PAD_PUS(1) | IOMUXC_SW_PAD_CTL_PAD_PUS(1) |
IOMUXC_SW_PAD_CTL_PAD_DSE(strength); IOMUXC_SW_PAD_CTL_PAD_DSE(strength);
u32_t clk = IOMUXC_SW_PAD_CTL_PAD_SPEED(speed) | uint32_t clk = IOMUXC_SW_PAD_CTL_PAD_SPEED(speed) |
IOMUXC_SW_PAD_CTL_PAD_SRE_MASK | IOMUXC_SW_PAD_CTL_PAD_SRE_MASK |
IOMUXC_SW_PAD_CTL_PAD_HYS_MASK | IOMUXC_SW_PAD_CTL_PAD_HYS_MASK |
IOMUXC_SW_PAD_CTL_PAD_PUS(0) | IOMUXC_SW_PAD_CTL_PAD_PUS(0) |

View file

@ -31,10 +31,10 @@ static gpio_pin_config_t enet_gpio_config = {
*Hyst. Enable Field: Hysteresis Enabled. *Hyst. Enable Field: Hysteresis Enabled.
*/ */
static void mimxrt1064_evk_usdhc_pinmux(u16_t nusdhc, bool init, u32_t speed, static void mimxrt1064_evk_usdhc_pinmux(uint16_t nusdhc, bool init, uint32_t speed,
u32_t strength) uint32_t strength)
{ {
u32_t cmd_data = IOMUXC_SW_PAD_CTL_PAD_SPEED(speed) | uint32_t cmd_data = IOMUXC_SW_PAD_CTL_PAD_SPEED(speed) |
IOMUXC_SW_PAD_CTL_PAD_SRE_MASK | IOMUXC_SW_PAD_CTL_PAD_SRE_MASK |
IOMUXC_SW_PAD_CTL_PAD_PKE_MASK | IOMUXC_SW_PAD_CTL_PAD_PKE_MASK |
IOMUXC_SW_PAD_CTL_PAD_PUE_MASK | IOMUXC_SW_PAD_CTL_PAD_PUE_MASK |
@ -42,7 +42,7 @@ static void mimxrt1064_evk_usdhc_pinmux(u16_t nusdhc, bool init, u32_t speed,
IOMUXC_SW_PAD_CTL_PAD_PUS(1) | IOMUXC_SW_PAD_CTL_PAD_PUS(1) |
IOMUXC_SW_PAD_CTL_PAD_DSE(strength); IOMUXC_SW_PAD_CTL_PAD_DSE(strength);
u32_t clk = IOMUXC_SW_PAD_CTL_PAD_SPEED(speed) | uint32_t clk = IOMUXC_SW_PAD_CTL_PAD_SPEED(speed) |
IOMUXC_SW_PAD_CTL_PAD_SRE_MASK | IOMUXC_SW_PAD_CTL_PAD_SRE_MASK |
IOMUXC_SW_PAD_CTL_PAD_HYS_MASK | IOMUXC_SW_PAD_CTL_PAD_HYS_MASK |
IOMUXC_SW_PAD_CTL_PAD_PUS(0) | IOMUXC_SW_PAD_CTL_PAD_PUS(0) |

View file

@ -114,9 +114,9 @@ enum { kSerialFlash_1Pad = 1,
struct flexspi_lut_seq_t { struct flexspi_lut_seq_t {
u8_t seqNum; uint8_t seqNum;
u8_t seqId; uint8_t seqId;
u16_t reserved; uint16_t reserved;
}; };
@ -129,54 +129,54 @@ enum { kDeviceConfigCmdType_Generic,
struct flexspi_mem_config_t { struct flexspi_mem_config_t {
u32_t tag; uint32_t tag;
u32_t version; uint32_t version;
u32_t reserved0; uint32_t reserved0;
u8_t readSampleClkSrc; uint8_t readSampleClkSrc;
u8_t csHoldTime; uint8_t csHoldTime;
u8_t csSetupTime; uint8_t csSetupTime;
u8_t columnAddressWidth; uint8_t columnAddressWidth;
u8_t deviceModeCfgEnable; uint8_t deviceModeCfgEnable;
u8_t deviceModeType; uint8_t deviceModeType;
u16_t waitTimeCfgCommands; uint16_t waitTimeCfgCommands;
struct flexspi_lut_seq_t deviceModeSeq; struct flexspi_lut_seq_t deviceModeSeq;
u32_t deviceModeArg; uint32_t deviceModeArg;
u8_t configCmdEnable; uint8_t configCmdEnable;
u8_t configModeType[3]; uint8_t configModeType[3];
struct flexspi_lut_seq_t configCmdSeqs[3]; struct flexspi_lut_seq_t configCmdSeqs[3];
u32_t reserved1; uint32_t reserved1;
u32_t configCmdArgs[3]; uint32_t configCmdArgs[3];
u32_t reserved2; uint32_t reserved2;
u32_t controllerMiscOption; uint32_t controllerMiscOption;
u8_t deviceType; uint8_t deviceType;
u8_t sflashPadType; uint8_t sflashPadType;
u8_t serialClkFreq; uint8_t serialClkFreq;
u8_t lutCustomSeqEnable; uint8_t lutCustomSeqEnable;
u32_t reserved3[2]; uint32_t reserved3[2];
u32_t sflashA1Size; uint32_t sflashA1Size;
u32_t sflashA2Size; uint32_t sflashA2Size;
u32_t sflashB1Size; uint32_t sflashB1Size;
u32_t sflashB2Size; uint32_t sflashB2Size;
u32_t csPadSettingOverride; uint32_t csPadSettingOverride;
u32_t sclkPadSettingOverride; uint32_t sclkPadSettingOverride;
u32_t dataPadSettingOverride; uint32_t dataPadSettingOverride;
u32_t dqsPadSettingOverride; uint32_t dqsPadSettingOverride;
u32_t timeoutInMs; uint32_t timeoutInMs;
u32_t commandInterval; uint32_t commandInterval;
u16_t dataValidTime[2]; uint16_t dataValidTime[2];
u16_t busyOffset; uint16_t busyOffset;
u16_t busyBitPolarity; uint16_t busyBitPolarity;
u32_t lookupTable[64]; uint32_t lookupTable[64];
struct flexspi_lut_seq_t lutCustomSeq[12]; struct flexspi_lut_seq_t lutCustomSeq[12];
u32_t reserved4[4]; uint32_t reserved4[4];
}; };
@ -217,17 +217,17 @@ struct flexspi_mem_config_t {
struct flexspi_nor_config_t { struct flexspi_nor_config_t {
struct flexspi_mem_config_t memConfig; struct flexspi_mem_config_t memConfig;
u32_t pageSize; uint32_t pageSize;
u32_t sectorSize; uint32_t sectorSize;
u8_t ipcmdSerialClkFreq; uint8_t ipcmdSerialClkFreq;
u8_t isUniformBlockSize; uint8_t isUniformBlockSize;
u8_t reserved0[2]; uint8_t reserved0[2];
u8_t serialNorType; uint8_t serialNorType;
u8_t needExitNoCmdMode; uint8_t needExitNoCmdMode;
u8_t halfClkForNonReadCmd; uint8_t halfClkForNonReadCmd;
u8_t needRestoreNoCmdMode; uint8_t needRestoreNoCmdMode;
u32_t blockSize; uint32_t blockSize;
u32_t reserve2[11]; uint32_t reserve2[11];
}; };
#ifdef __cplusplus #ifdef __cplusplus

View file

@ -16,7 +16,7 @@ __attribute__((section(".boot_hdr.dcd_data")))
#pragma location = ".boot_hdr.dcd_data" #pragma location = ".boot_hdr.dcd_data"
#endif #endif
const u8_t dcd_data[] = { const uint8_t dcd_data[] = {
0xD2, 0xD2,
0x04, 0x30, 0x04, 0x30,
@ -289,6 +289,6 @@ const u8_t dcd_data[] = {
}; };
#else #else
const u8_t dcd_data[] = { 0x00 }; const uint8_t dcd_data[] = { 0x00 };
#endif #endif
#endif #endif

View file

@ -29,17 +29,17 @@ static gpio_pin_config_t enet_gpio_config = {
*/ */
static void mm_swiftio_usdhc_pinmux( static void mm_swiftio_usdhc_pinmux(
u16_t nusdhc, bool init, uint16_t nusdhc, bool init,
u32_t speed, u32_t strength) uint32_t speed, uint32_t strength)
{ {
u32_t cmd_data = IOMUXC_SW_PAD_CTL_PAD_SPEED(speed) | uint32_t cmd_data = IOMUXC_SW_PAD_CTL_PAD_SPEED(speed) |
IOMUXC_SW_PAD_CTL_PAD_SRE_MASK | IOMUXC_SW_PAD_CTL_PAD_SRE_MASK |
IOMUXC_SW_PAD_CTL_PAD_PKE_MASK | IOMUXC_SW_PAD_CTL_PAD_PKE_MASK |
IOMUXC_SW_PAD_CTL_PAD_PUE_MASK | IOMUXC_SW_PAD_CTL_PAD_PUE_MASK |
IOMUXC_SW_PAD_CTL_PAD_HYS_MASK | IOMUXC_SW_PAD_CTL_PAD_HYS_MASK |
IOMUXC_SW_PAD_CTL_PAD_PUS(1) | IOMUXC_SW_PAD_CTL_PAD_PUS(1) |
IOMUXC_SW_PAD_CTL_PAD_DSE(strength); IOMUXC_SW_PAD_CTL_PAD_DSE(strength);
u32_t clk = IOMUXC_SW_PAD_CTL_PAD_SPEED(speed) | uint32_t clk = IOMUXC_SW_PAD_CTL_PAD_SPEED(speed) |
IOMUXC_SW_PAD_CTL_PAD_SRE_MASK | IOMUXC_SW_PAD_CTL_PAD_SRE_MASK |
IOMUXC_SW_PAD_CTL_PAD_HYS_MASK | IOMUXC_SW_PAD_CTL_PAD_HYS_MASK |
IOMUXC_SW_PAD_CTL_PAD_PUS(0) | IOMUXC_SW_PAD_CTL_PAD_PUS(0) |

View file

@ -117,9 +117,9 @@
*/ */
static void arm_mps2_pinmux_defaults(void) static void arm_mps2_pinmux_defaults(void)
{ {
u32_t gpio_0 = 0U; uint32_t gpio_0 = 0U;
u32_t gpio_1 = 0U; uint32_t gpio_1 = 0U;
u32_t gpio_2 = 0U; uint32_t gpio_2 = 0U;
/* Set GPIO Alternate Functions */ /* Set GPIO Alternate Functions */

View file

@ -117,9 +117,9 @@
*/ */
static void arm_mps2_pinmux_defaults(void) static void arm_mps2_pinmux_defaults(void)
{ {
u32_t gpio_0 = 0; uint32_t gpio_0 = 0;
u32_t gpio_1 = 0; uint32_t gpio_1 = 0;
u32_t gpio_2 = 0; uint32_t gpio_2 = 0;
/* Set GPIO Alternate Functions */ /* Set GPIO Alternate Functions */

View file

@ -56,8 +56,8 @@ LOG_MODULE_REGISTER(board_control, CONFIG_BOARD_NRF9160DK_LOG_LEVEL);
*/ */
__packed struct pin_config { __packed struct pin_config {
u8_t pin; uint8_t pin;
u8_t val; uint8_t val;
}; };
/* The following tables specify the configuration of each pin based on the /* The following tables specify the configuration of each pin based on the
@ -183,7 +183,7 @@ static int pins_configure(struct device *port, const struct pin_config cfg[],
* so configure the pin as output with the proper initial * so configure the pin as output with the proper initial
* state. * state.
*/ */
u32_t flag = (cfg[i].val ? GPIO_OUTPUT_LOW uint32_t flag = (cfg[i].val ? GPIO_OUTPUT_LOW
: GPIO_OUTPUT_HIGH); : GPIO_OUTPUT_HIGH);
err = gpio_pin_configure(port, cfg[i].pin, flag); err = gpio_pin_configure(port, cfg[i].pin, flag);
if (err) { if (err) {
@ -198,9 +198,9 @@ static int pins_configure(struct device *port, const struct pin_config cfg[],
} }
static void chip_reset(struct device *gpio, static void chip_reset(struct device *gpio,
struct gpio_callback *cb, u32_t pins) struct gpio_callback *cb, uint32_t pins)
{ {
const u32_t stamp = k_cycle_get_32(); const uint32_t stamp = k_cycle_get_32();
printk("GPIO reset line asserted, device reset.\n"); printk("GPIO reset line asserted, device reset.\n");
printk("Bye @ cycle32 %u\n", stamp); printk("Bye @ cycle32 %u\n", stamp);
@ -208,7 +208,7 @@ static void chip_reset(struct device *gpio,
NVIC_SystemReset(); NVIC_SystemReset();
} }
static void reset_pin_wait_low(struct device *port, u32_t pin) static void reset_pin_wait_low(struct device *port, uint32_t pin)
{ {
int val; int val;
@ -221,7 +221,7 @@ static void reset_pin_wait_low(struct device *port, u32_t pin)
static int reset_pin_configure(struct device *p0, struct device *p1) static int reset_pin_configure(struct device *p0, struct device *p1)
{ {
int err; int err;
u32_t pin = 0; uint32_t pin = 0;
struct device *port = NULL; struct device *port = NULL;
static struct gpio_callback gpio_ctx; static struct gpio_callback gpio_ctx;

View file

@ -22,19 +22,19 @@
static struct k_spinlock lock; static struct k_spinlock lock;
static u32_t last_count; static uint32_t last_count;
static u32_t counter_sub(u32_t a, u32_t b) static uint32_t counter_sub(uint32_t a, uint32_t b)
{ {
return (a - b) & COUNTER_MAX; return (a - b) & COUNTER_MAX;
} }
static void set_comparator(u32_t cyc) static void set_comparator(uint32_t cyc)
{ {
nrf_timer_cc_set(TIMER, 0, cyc & COUNTER_MAX); nrf_timer_cc_set(TIMER, 0, cyc & COUNTER_MAX);
} }
static u32_t counter(void) static uint32_t counter(void)
{ {
nrf_timer_task_trigger(TIMER, nrf_timer_capture_task_get(1)); nrf_timer_task_trigger(TIMER, nrf_timer_capture_task_get(1));
@ -47,13 +47,13 @@ void timer0_nrf_isr(void *arg)
TIMER->EVENTS_COMPARE[0] = 0; TIMER->EVENTS_COMPARE[0] = 0;
k_spinlock_key_t key = k_spin_lock(&lock); k_spinlock_key_t key = k_spin_lock(&lock);
u32_t t = counter(); uint32_t t = counter();
u32_t dticks = counter_sub(t, last_count) / CYC_PER_TICK; uint32_t dticks = counter_sub(t, last_count) / CYC_PER_TICK;
last_count += dticks * CYC_PER_TICK; last_count += dticks * CYC_PER_TICK;
if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) { if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
u32_t next = last_count + CYC_PER_TICK; uint32_t next = last_count + CYC_PER_TICK;
/* As below: we're guaranteed to get an interrupt as /* As below: we're guaranteed to get an interrupt as
* long as it's set two or more cycles in the future * long as it's set two or more cycles in the future
@ -103,16 +103,16 @@ int z_clock_driver_init(struct device *device)
return 0; return 0;
} }
void z_clock_set_timeout(s32_t ticks, bool idle) void z_clock_set_timeout(int32_t ticks, bool idle)
{ {
ARG_UNUSED(idle); ARG_UNUSED(idle);
#ifdef CONFIG_TICKLESS_KERNEL #ifdef CONFIG_TICKLESS_KERNEL
ticks = (ticks == K_TICKS_FOREVER) ? MAX_TICKS : ticks; ticks = (ticks == K_TICKS_FOREVER) ? MAX_TICKS : ticks;
ticks = MAX(MIN(ticks - 1, (s32_t)MAX_TICKS), 0); ticks = MAX(MIN(ticks - 1, (int32_t)MAX_TICKS), 0);
k_spinlock_key_t key = k_spin_lock(&lock); k_spinlock_key_t key = k_spin_lock(&lock);
u32_t cyc, dt, t = counter(); uint32_t cyc, dt, t = counter();
bool zli_fixup = IS_ENABLED(CONFIG_ZERO_LATENCY_IRQS); bool zli_fixup = IS_ENABLED(CONFIG_ZERO_LATENCY_IRQS);
/* Round up to next tick boundary */ /* Round up to next tick boundary */
@ -164,23 +164,23 @@ void z_clock_set_timeout(s32_t ticks, bool idle)
#endif /* CONFIG_TICKLESS_KERNEL */ #endif /* CONFIG_TICKLESS_KERNEL */
} }
u32_t z_clock_elapsed(void) uint32_t z_clock_elapsed(void)
{ {
if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) { if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
return 0; return 0;
} }
k_spinlock_key_t key = k_spin_lock(&lock); k_spinlock_key_t key = k_spin_lock(&lock);
u32_t ret = counter_sub(counter(), last_count) / CYC_PER_TICK; uint32_t ret = counter_sub(counter(), last_count) / CYC_PER_TICK;
k_spin_unlock(&lock, key); k_spin_unlock(&lock, key);
return ret; return ret;
} }
u32_t z_timer_cycle_get_32(void) uint32_t z_timer_cycle_get_32(void)
{ {
k_spinlock_key_t key = k_spin_lock(&lock); k_spinlock_key_t key = k_spin_lock(&lock);
u32_t ret = counter_sub(counter(), last_count) + last_count; uint32_t ret = counter_sub(counter(), last_count) + last_count;
k_spin_unlock(&lock, key); k_spin_unlock(&lock, key);
return ret; return ret;

View file

@ -13,7 +13,7 @@
struct pwr_ctrl_cfg { struct pwr_ctrl_cfg {
const char *port; const char *port;
u32_t pin; uint32_t pin;
}; };
static int pwr_ctrl_init(struct device *dev) static int pwr_ctrl_init(struct device *dev)

View file

@ -94,8 +94,8 @@
*/ */
static void arm_v2m_beetle_pinmux_defaults(void) static void arm_v2m_beetle_pinmux_defaults(void)
{ {
u32_t gpio_0 = 0U; uint32_t gpio_0 = 0U;
u32_t gpio_1 = 0U; uint32_t gpio_1 = 0U;
/* Set GPIO Alternate Functions */ /* Set GPIO Alternate Functions */

View file

@ -37,7 +37,7 @@ static void arm_musca_pinmux_defaults(void)
*/ */
static void arm_musca_pinmux_defaults(void) static void arm_musca_pinmux_defaults(void)
{ {
volatile u32_t *scc = (u32_t *)DT_REG_ADDR(DT_INST(0, arm_scc)); volatile uint32_t *scc = (uint32_t *)DT_REG_ADDR(DT_INST(0, arm_scc));
/* there is only altfunc1, so steer all alt funcs to use 1 */ /* there is only altfunc1, so steer all alt funcs to use 1 */
scc[IOMUX_ALTF1_INSEL] = 0xffff; scc[IOMUX_ALTF1_INSEL] = 0xffff;

View file

@ -37,7 +37,7 @@ static void arm_musca_b1_pinmux_defaults(void)
*/ */
static void arm_musca_b1_pinmux_defaults(void) static void arm_musca_b1_pinmux_defaults(void)
{ {
volatile u32_t *scc = (u32_t *)DT_REG_ADDR(DT_INST(0, arm_scc)); volatile uint32_t *scc = (uint32_t *)DT_REG_ADDR(DT_INST(0, arm_scc));
/* there is only altfunc1, so steer all alt funcs to use 1 */ /* there is only altfunc1, so steer all alt funcs to use 1 */
scc[IOMUX_ALTF1_INSEL] = 0xffff; scc[IOMUX_ALTF1_INSEL] = 0xffff;

View file

@ -17,7 +17,7 @@ extern "C" {
void posix_isr_declare(unsigned int irq_p, int flags, void isr_p(void *), void posix_isr_declare(unsigned int irq_p, int flags, void isr_p(void *),
void *isr_param_p); void *isr_param_p);
void posix_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags); void posix_irq_priority_set(unsigned int irq, unsigned int prio, uint32_t flags);
/** /**
* Configure a static interrupt. * Configure a static interrupt.

View file

@ -146,16 +146,16 @@ void cmd_read_option_value(const char *str, void *dest, const char type,
endptr = (char *)str + strlen(str); endptr = (char *)str + strlen(str);
break; break;
case 'u': case 'u':
*(u32_t *)dest = strtoul(str, &endptr, 0); *(uint32_t *)dest = strtoul(str, &endptr, 0);
break; break;
case 'U': case 'U':
*(u64_t *)dest = strtoull(str, &endptr, 0); *(uint64_t *)dest = strtoull(str, &endptr, 0);
break; break;
case 'i': case 'i':
*(s32_t *)dest = strtol(str, &endptr, 0); *(int32_t *)dest = strtol(str, &endptr, 0);
break; break;
case 'I': case 'I':
*(s64_t *)dest = strtoll(str, &endptr, 0); *(int64_t *)dest = strtoll(str, &endptr, 0);
break; break;
case 'd': case 'd':
*(double *)dest = strtod(str, &endptr); *(double *)dest = strtod(str, &endptr);
@ -201,16 +201,16 @@ void cmd_args_set_defaults(struct args_struct_t args_struct[])
*(char **)args_struct[count].dest = NULL; *(char **)args_struct[count].dest = NULL;
break; break;
case 'u': case 'u':
*(u32_t *)args_struct[count].dest = UINT32_MAX; *(uint32_t *)args_struct[count].dest = UINT32_MAX;
break; break;
case 'U': case 'U':
*(u64_t *)args_struct[count].dest = UINT64_MAX; *(uint64_t *)args_struct[count].dest = UINT64_MAX;
break; break;
case 'i': case 'i':
*(s32_t *)args_struct[count].dest = INT32_MAX; *(int32_t *)args_struct[count].dest = INT32_MAX;
break; break;
case 'I': case 'I':
*(s64_t *)args_struct[count].dest = INT64_MAX; *(int64_t *)args_struct[count].dest = INT64_MAX;
break; break;
case 'd': case 'd':
*(double *)args_struct[count].dest = NAN; *(double *)args_struct[count].dest = NAN;

View file

@ -24,14 +24,14 @@
#include <sys/util.h> #include <sys/util.h>
static u64_t simu_time; /* The actual time as known by the HW models */ static uint64_t simu_time; /* The actual time as known by the HW models */
static u64_t end_of_time = NEVER; /* When will this device stop */ static uint64_t end_of_time = NEVER; /* When will this device stop */
/* List of HW model timers: */ /* List of HW model timers: */
extern u64_t hw_timer_timer; /* When should this timer_model be called */ extern uint64_t hw_timer_timer; /* When should this timer_model be called */
extern u64_t irq_ctrl_timer; extern uint64_t irq_ctrl_timer;
#ifdef CONFIG_HAS_SDL #ifdef CONFIG_HAS_SDL
extern u64_t sdl_event_timer; extern uint64_t sdl_event_timer;
#endif #endif
static enum { static enum {
@ -44,7 +44,7 @@ static enum {
NONE NONE
} next_timer_index = NONE; } next_timer_index = NONE;
static u64_t *Timer_list[NUMBER_OF_TIMERS] = { static uint64_t *Timer_list[NUMBER_OF_TIMERS] = {
&hw_timer_timer, &hw_timer_timer,
&irq_ctrl_timer, &irq_ctrl_timer,
#ifdef CONFIG_HAS_SDL #ifdef CONFIG_HAS_SDL
@ -52,7 +52,7 @@ static u64_t *Timer_list[NUMBER_OF_TIMERS] = {
#endif #endif
}; };
static u64_t next_timer_time; static uint64_t next_timer_time;
/* Have we received a SIGTERM or SIGINT */ /* Have we received a SIGTERM or SIGINT */
static volatile sig_atomic_t signaled_end; static volatile sig_atomic_t signaled_end;
@ -169,7 +169,7 @@ void hwm_main_loop(void)
/** /**
* Set the simulated time when the process will stop * Set the simulated time when the process will stop
*/ */
void hwm_set_end_of_time(u64_t new_end_of_time) void hwm_set_end_of_time(uint64_t new_end_of_time)
{ {
end_of_time = new_end_of_time; end_of_time = new_end_of_time;
} }
@ -177,12 +177,12 @@ void hwm_set_end_of_time(u64_t new_end_of_time)
/** /**
* Return the current time as known by the device * Return the current time as known by the device
*/ */
u64_t hwm_get_time(void) uint64_t hwm_get_time(void)
{ {
return simu_time; return simu_time;
} }
u64_t posix_get_hw_cycle(void) uint64_t posix_get_hw_cycle(void)
{ {
return hwm_get_time(); return hwm_get_time();
} }

View file

@ -19,8 +19,8 @@ extern "C" {
void hwm_main_loop(void); void hwm_main_loop(void);
void hwm_init(void); void hwm_init(void);
void hwm_cleanup(void); void hwm_cleanup(void);
void hwm_set_end_of_time(u64_t new_end_of_time); void hwm_set_end_of_time(uint64_t new_end_of_time);
u64_t hwm_get_time(void); uint64_t hwm_get_time(void);
void hwm_find_next_timer(void); void hwm_find_next_timer(void);
#ifdef __cplusplus #ifdef __cplusplus

View file

@ -16,11 +16,11 @@
#include "posix_soc.h" #include "posix_soc.h"
#include "zephyr/types.h" #include "zephyr/types.h"
u64_t irq_ctrl_timer = NEVER; uint64_t irq_ctrl_timer = NEVER;
static u64_t irq_status; /* pending interrupts */ static uint64_t irq_status; /* pending interrupts */
static u64_t irq_premask; /* interrupts before the mask */ static uint64_t irq_premask; /* interrupts before the mask */
/* /*
* Mask of which interrupts will actually cause the cpu to vector into its * Mask of which interrupts will actually cause the cpu to vector into its
@ -30,7 +30,7 @@ static u64_t irq_premask; /* interrupts before the mask */
* If the irq_mask enables and interrupt pending in irq_premask, it will cause * If the irq_mask enables and interrupt pending in irq_premask, it will cause
* the controller to raise the interrupt immediately * the controller to raise the interrupt immediately
*/ */
static u64_t irq_mask; static uint64_t irq_mask;
/* /*
* Interrupts lock/disable. When set, interrupts are registered * Interrupts lock/disable. When set, interrupts are registered
@ -40,7 +40,7 @@ static u64_t irq_mask;
static bool irqs_locked; static bool irqs_locked;
static bool lock_ignore; /* For the hard fake IRQ, temporarily ignore lock */ static bool lock_ignore; /* For the hard fake IRQ, temporarily ignore lock */
static u8_t irq_prio[N_IRQS]; /* Priority of each interrupt */ static uint8_t irq_prio[N_IRQS]; /* Priority of each interrupt */
/* note that prio = 0 == highest, prio=255 == lowest */ /* note that prio = 0 == highest, prio=255 == lowest */
static int currently_running_prio = 256; /* 255 is the lowest prio interrupt */ static int currently_running_prio = 256; /* 255 is the lowest prio interrupt */
@ -77,7 +77,7 @@ void hw_irq_ctrl_prio_set(unsigned int irq, unsigned int prio)
irq_prio[irq] = prio; irq_prio[irq] = prio;
} }
u8_t hw_irq_ctrl_get_prio(unsigned int irq) uint8_t hw_irq_ctrl_get_prio(unsigned int irq)
{ {
return irq_prio[irq]; return irq_prio[irq];
} }
@ -94,14 +94,14 @@ int hw_irq_ctrl_get_highest_prio_irq(void)
return -1; return -1;
} }
u64_t irq_status = hw_irq_ctrl_get_irq_status(); uint64_t irq_status = hw_irq_ctrl_get_irq_status();
int winner = -1; int winner = -1;
int winner_prio = 256; int winner_prio = 256;
while (irq_status != 0U) { while (irq_status != 0U) {
int irq_nbr = find_lsb_set(irq_status) - 1; int irq_nbr = find_lsb_set(irq_status) - 1;
irq_status &= ~((u64_t) 1 << irq_nbr); irq_status &= ~((uint64_t) 1 << irq_nbr);
if ((winner_prio > (int)irq_prio[irq_nbr]) if ((winner_prio > (int)irq_prio[irq_nbr])
&& (currently_running_prio > (int)irq_prio[irq_nbr])) { && (currently_running_prio > (int)irq_prio[irq_nbr])) {
winner = irq_nbr; winner = irq_nbr;
@ -112,14 +112,14 @@ int hw_irq_ctrl_get_highest_prio_irq(void)
} }
u32_t hw_irq_ctrl_get_current_lock(void) uint32_t hw_irq_ctrl_get_current_lock(void)
{ {
return irqs_locked; return irqs_locked;
} }
u32_t hw_irq_ctrl_change_lock(u32_t new_lock) uint32_t hw_irq_ctrl_change_lock(uint32_t new_lock)
{ {
u32_t previous_lock = irqs_locked; uint32_t previous_lock = irqs_locked;
irqs_locked = new_lock; irqs_locked = new_lock;
@ -150,23 +150,23 @@ void hw_irq_ctrl_clear_all_irqs(void)
void hw_irq_ctrl_disable_irq(unsigned int irq) void hw_irq_ctrl_disable_irq(unsigned int irq)
{ {
irq_mask &= ~((u64_t)1<<irq); irq_mask &= ~((uint64_t)1<<irq);
} }
int hw_irq_ctrl_is_irq_enabled(unsigned int irq) int hw_irq_ctrl_is_irq_enabled(unsigned int irq)
{ {
return (irq_mask & ((u64_t)1 << irq))?1:0; return (irq_mask & ((uint64_t)1 << irq))?1:0;
} }
u64_t hw_irq_ctrl_get_irq_mask(void) uint64_t hw_irq_ctrl_get_irq_mask(void)
{ {
return irq_mask; return irq_mask;
} }
void hw_irq_ctrl_clear_irq(unsigned int irq) void hw_irq_ctrl_clear_irq(unsigned int irq)
{ {
irq_status &= ~((u64_t)1<<irq); irq_status &= ~((uint64_t)1<<irq);
irq_premask &= ~((u64_t)1<<irq); irq_premask &= ~((uint64_t)1<<irq);
} }
@ -180,8 +180,8 @@ void hw_irq_ctrl_clear_irq(unsigned int irq)
*/ */
void hw_irq_ctrl_enable_irq(unsigned int irq) void hw_irq_ctrl_enable_irq(unsigned int irq)
{ {
irq_mask |= ((u64_t)1<<irq); irq_mask |= ((uint64_t)1<<irq);
if (irq_premask & ((u64_t)1<<irq)) { /* if IRQ is pending */ if (irq_premask & ((uint64_t)1<<irq)) { /* if IRQ is pending */
hw_irq_ctrl_raise_im_from_sw(irq); hw_irq_ctrl_raise_im_from_sw(irq);
} }
} }
@ -190,10 +190,10 @@ void hw_irq_ctrl_enable_irq(unsigned int irq)
static inline void hw_irq_ctrl_irq_raise_prefix(unsigned int irq) static inline void hw_irq_ctrl_irq_raise_prefix(unsigned int irq)
{ {
if (irq < N_IRQS) { if (irq < N_IRQS) {
irq_premask |= ((u64_t)1<<irq); irq_premask |= ((uint64_t)1<<irq);
if (irq_mask & (1 << irq)) { if (irq_mask & (1 << irq)) {
irq_status |= ((u64_t)1<<irq); irq_status |= ((uint64_t)1<<irq);
} }
} else if (irq == PHONY_HARD_IRQ) { } else if (irq == PHONY_HARD_IRQ) {
lock_ignore = true; lock_ignore = true;

View file

@ -17,15 +17,15 @@
* Return the (simulation) time in microseconds * Return the (simulation) time in microseconds
* where clock_type is one of RTC_CLOCK_* * where clock_type is one of RTC_CLOCK_*
*/ */
u64_t native_rtc_gettime_us(int clock_type) uint64_t native_rtc_gettime_us(int clock_type)
{ {
if (clock_type == RTC_CLOCK_BOOT) { if (clock_type == RTC_CLOCK_BOOT) {
return hwm_get_time(); return hwm_get_time();
} else if (clock_type == RTC_CLOCK_REALTIME) { /* RTC_CLOCK_REALTIME */ } else if (clock_type == RTC_CLOCK_REALTIME) { /* RTC_CLOCK_REALTIME */
return hwtimer_get_simu_rtc_time(); return hwtimer_get_simu_rtc_time();
} else if (clock_type == RTC_CLOCK_PSEUDOHOSTREALTIME) { } else if (clock_type == RTC_CLOCK_PSEUDOHOSTREALTIME) {
u32_t nsec; uint32_t nsec;
u64_t sec; uint64_t sec;
hwtimer_get_pseudohost_rtc_time(&nsec, &sec); hwtimer_get_pseudohost_rtc_time(&nsec, &sec);
return sec * 1000000UL + nsec / 1000U; return sec * 1000000UL + nsec / 1000U;
@ -41,10 +41,10 @@ u64_t native_rtc_gettime_us(int clock_type)
* get the simulation time split in nsec and seconds * get the simulation time split in nsec and seconds
* where clock_type is one of RTC_CLOCK_* * where clock_type is one of RTC_CLOCK_*
*/ */
void native_rtc_gettime(int clock_type, u32_t *nsec, u64_t *sec) void native_rtc_gettime(int clock_type, uint32_t *nsec, uint64_t *sec)
{ {
if (clock_type == RTC_CLOCK_BOOT || clock_type == RTC_CLOCK_REALTIME) { if (clock_type == RTC_CLOCK_BOOT || clock_type == RTC_CLOCK_REALTIME) {
u64_t us = native_rtc_gettime_us(clock_type); uint64_t us = native_rtc_gettime_us(clock_type);
*nsec = (us % 1000000UL) * 1000U; *nsec = (us % 1000000UL) * 1000U;
*sec = us / 1000000UL; *sec = us / 1000000UL;
} else { /* RTC_CLOCK_PSEUDOHOSTREALTIME */ } else { /* RTC_CLOCK_PSEUDOHOSTREALTIME */
@ -57,7 +57,7 @@ void native_rtc_gettime(int clock_type, u32_t *nsec, u64_t *sec)
* Note that this only affects the RTC_CLOCK_REALTIME and * Note that this only affects the RTC_CLOCK_REALTIME and
* RTC_CLOCK_PSEUDOHOSTREALTIME clocks. * RTC_CLOCK_PSEUDOHOSTREALTIME clocks.
*/ */
void native_rtc_offset(s64_t delta_us) void native_rtc_offset(int64_t delta_us)
{ {
hwtimer_adjust_rtc_offset(delta_us); hwtimer_adjust_rtc_offset(delta_us);
} }

View file

@ -41,7 +41,7 @@ extern "C" {
* *
* @return Number of microseconds * @return Number of microseconds
*/ */
u64_t native_rtc_gettime_us(int clock_type); uint64_t native_rtc_gettime_us(int clock_type);
/** /**
* @brief Get the value of a clock split in in nsec and seconds * @brief Get the value of a clock split in in nsec and seconds
@ -50,7 +50,7 @@ u64_t native_rtc_gettime_us(int clock_type);
* @param nsec Pointer to store the nanoseconds * @param nsec Pointer to store the nanoseconds
* @param nsec Pointer to store the seconds * @param nsec Pointer to store the seconds
*/ */
void native_rtc_gettime(int clock_type, u32_t *nsec, u64_t *sec); void native_rtc_gettime(int clock_type, uint32_t *nsec, uint64_t *sec);
/** /**
* @brief Offset the real time clock by a number of microseconds. * @brief Offset the real time clock by a number of microseconds.
@ -60,7 +60,7 @@ void native_rtc_gettime(int clock_type, u32_t *nsec, u64_t *sec);
* @param delta_us Number of microseconds to offset. The value is added to all * @param delta_us Number of microseconds to offset. The value is added to all
* offsetable clocks. * offsetable clocks.
*/ */
void native_rtc_offset(s64_t delta_us); void native_rtc_offset(int64_t delta_us);
/** /**
* @brief Adjust the speed of the clock source by a multiplicative factor * @brief Adjust the speed of the clock source by a multiplicative factor

View file

@ -11,7 +11,7 @@
#include "soc.h" #include "soc.h"
#include "hw_models_top.h" #include "hw_models_top.h"
u64_t sdl_event_timer; uint64_t sdl_event_timer;
static void sdl_handle_window_event(const SDL_Event *event) static void sdl_handle_window_event(const SDL_Event *event)
{ {

View file

@ -42,7 +42,7 @@
* Note: the caller has to allocate the destination buffer (at least 17 chars) * Note: the caller has to allocate the destination buffer (at least 17 chars)
*/ */
#include <stdio.h> #include <stdio.h>
static char *us_time_to_str(char *dest, u64_t time) static char *us_time_to_str(char *dest, uint64_t time)
{ {
if (time != NEVER) { if (time != NEVER) {
unsigned int hour; unsigned int hour;
@ -64,13 +64,13 @@ static char *us_time_to_str(char *dest, u64_t time)
} }
#endif #endif
u64_t hw_timer_timer; uint64_t hw_timer_timer;
u64_t hw_timer_tick_timer; uint64_t hw_timer_tick_timer;
u64_t hw_timer_awake_timer; uint64_t hw_timer_awake_timer;
static u64_t tick_p; /* Period of the ticker */ static uint64_t tick_p; /* Period of the ticker */
static s64_t silent_ticks; static int64_t silent_ticks;
static bool real_time_mode = static bool real_time_mode =
#if defined(CONFIG_NATIVE_POSIX_SLOWDOWN_TO_REAL_TIME) #if defined(CONFIG_NATIVE_POSIX_SLOWDOWN_TO_REAL_TIME)
@ -85,7 +85,7 @@ static bool reset_rtc; /*"Reset" the RTC on boot*/
* When this executable started running, this value shall not be changed after * When this executable started running, this value shall not be changed after
* boot * boot
*/ */
static u64_t boot_time; static uint64_t boot_time;
/* /*
* Ratio of the simulated clock to the real host time * Ratio of the simulated clock to the real host time
@ -103,21 +103,21 @@ static double clock_ratio = 1.0;
* *
* This variable is only kept for debugging purposes * This variable is only kept for debugging purposes
*/ */
static s64_t last_drift_offset; static int64_t last_drift_offset;
#endif #endif
/* /*
* Offsets of the RTC relative to the hardware models simu_time * Offsets of the RTC relative to the hardware models simu_time
* "simu_time" == simulated time which starts at 0 on boot * "simu_time" == simulated time which starts at 0 on boot
*/ */
static s64_t rtc_offset; static int64_t rtc_offset;
/* Last host/real time when the ratio was adjusted */ /* Last host/real time when the ratio was adjusted */
static u64_t last_radj_rtime; static uint64_t last_radj_rtime;
/* Last simulated time when the ratio was adjusted */ /* Last simulated time when the ratio was adjusted */
static u64_t last_radj_stime; static uint64_t last_radj_stime;
extern u64_t posix_get_hw_cycle(void); extern uint64_t posix_get_hw_cycle(void);
void hwtimer_set_real_time_mode(bool new_rt) void hwtimer_set_real_time_mode(bool new_rt)
{ {
@ -138,12 +138,12 @@ static inline void host_clock_gettime(struct timespec *tv)
#endif #endif
} }
u64_t get_host_us_time(void) uint64_t get_host_us_time(void)
{ {
struct timespec tv; struct timespec tv;
host_clock_gettime(&tv); host_clock_gettime(&tv);
return (u64_t)tv.tv_sec * 1e6 + tv.tv_nsec / 1000; return (uint64_t)tv.tv_sec * 1e6 + tv.tv_nsec / 1000;
} }
void hwtimer_init(void) void hwtimer_init(void)
@ -159,10 +159,10 @@ void hwtimer_init(void)
} }
if (!reset_rtc) { if (!reset_rtc) {
struct timespec tv; struct timespec tv;
u64_t realhosttime; uint64_t realhosttime;
clock_gettime(CLOCK_REALTIME, &tv); clock_gettime(CLOCK_REALTIME, &tv);
realhosttime = (u64_t)tv.tv_sec * 1e6 + tv.tv_nsec / 1000; realhosttime = (uint64_t)tv.tv_sec * 1e6 + tv.tv_nsec / 1000;
rtc_offset += realhosttime; rtc_offset += realhosttime;
} }
@ -176,7 +176,7 @@ void hwtimer_cleanup(void)
/** /**
* Enable the HW timer tick interrupts with a period <period> in micoseconds * Enable the HW timer tick interrupts with a period <period> in micoseconds
*/ */
void hwtimer_enable(u64_t period) void hwtimer_enable(uint64_t period)
{ {
tick_p = period; tick_p = period;
hw_timer_tick_timer = hwm_get_time() + tick_p; hw_timer_tick_timer = hwm_get_time() + tick_p;
@ -187,12 +187,12 @@ void hwtimer_enable(u64_t period)
static void hwtimer_tick_timer_reached(void) static void hwtimer_tick_timer_reached(void)
{ {
if (real_time_mode) { if (real_time_mode) {
u64_t expected_rt = (hw_timer_tick_timer - last_radj_stime) uint64_t expected_rt = (hw_timer_tick_timer - last_radj_stime)
/ clock_ratio / clock_ratio
+ last_radj_rtime; + last_radj_rtime;
u64_t real_time = get_host_us_time(); uint64_t real_time = get_host_us_time();
s64_t diff = expected_rt - real_time; int64_t diff = expected_rt - real_time;
#if DEBUG_NP_TIMER #if DEBUG_NP_TIMER
char es[30]; char es[30];
@ -236,7 +236,7 @@ static void hwtimer_awake_timer_reached(void)
void hwtimer_timer_reached(void) void hwtimer_timer_reached(void)
{ {
u64_t Now = hw_timer_timer; uint64_t Now = hw_timer_timer;
if (hw_timer_awake_timer == Now) { if (hw_timer_awake_timer == Now) {
hwtimer_awake_timer_reached(); hwtimer_awake_timer_reached();
@ -255,7 +255,7 @@ void hwtimer_timer_reached(void)
* *
* This is meant for k_busy_wait() like functionality * This is meant for k_busy_wait() like functionality
*/ */
void hwtimer_wake_in_time(u64_t time) void hwtimer_wake_in_time(uint64_t time)
{ {
if (hw_timer_awake_timer > time) { if (hw_timer_awake_timer > time) {
hw_timer_awake_timer = time; hw_timer_awake_timer = time;
@ -267,12 +267,12 @@ void hwtimer_wake_in_time(u64_t time)
* The kernel wants to skip the next sys_ticks tick interrupts * The kernel wants to skip the next sys_ticks tick interrupts
* If sys_ticks == 0, the next interrupt will be raised. * If sys_ticks == 0, the next interrupt will be raised.
*/ */
void hwtimer_set_silent_ticks(s64_t sys_ticks) void hwtimer_set_silent_ticks(int64_t sys_ticks)
{ {
silent_ticks = sys_ticks; silent_ticks = sys_ticks;
} }
s64_t hwtimer_get_pending_silent_ticks(void) int64_t hwtimer_get_pending_silent_ticks(void)
{ {
return silent_ticks; return silent_ticks;
} }
@ -291,7 +291,7 @@ void hwtimer_reset_rtc(void)
* Set a time offset (microseconds) of the RTC simulated time * Set a time offset (microseconds) of the RTC simulated time
* Note: This should not be used after starting * Note: This should not be used after starting
*/ */
void hwtimer_set_rtc_offset(s64_t offset) void hwtimer_set_rtc_offset(int64_t offset)
{ {
rtc_offset = offset; rtc_offset = offset;
} }
@ -308,7 +308,7 @@ void hwtimer_set_rt_ratio(double ratio)
/** /**
* Increase or decrease the RTC simulated time by offset_delta * Increase or decrease the RTC simulated time by offset_delta
*/ */
void hwtimer_adjust_rtc_offset(s64_t offset_delta) void hwtimer_adjust_rtc_offset(int64_t offset_delta)
{ {
rtc_offset += offset_delta; rtc_offset += offset_delta;
} }
@ -318,8 +318,8 @@ void hwtimer_adjust_rtc_offset(s64_t offset_delta)
*/ */
void hwtimer_adjust_rt_ratio(double ratio_correction) void hwtimer_adjust_rt_ratio(double ratio_correction)
{ {
u64_t current_stime = hwm_get_time(); uint64_t current_stime = hwm_get_time();
s64_t s_diff = current_stime - last_radj_stime; int64_t s_diff = current_stime - last_radj_stime;
/* Accumulated real time drift time since last adjustment: */ /* Accumulated real time drift time since last adjustment: */
last_radj_rtime += s_diff / clock_ratio; last_radj_rtime += s_diff / clock_ratio;
@ -327,7 +327,7 @@ void hwtimer_adjust_rt_ratio(double ratio_correction)
#if DEBUG_NP_TIMER #if DEBUG_NP_TIMER
char ct[30]; char ct[30];
s64_t r_drift = (long double)(clock_ratio-1.0)/(clock_ratio)*s_diff; int64_t r_drift = (long double)(clock_ratio-1.0)/(clock_ratio)*s_diff;
last_drift_offset += r_drift; last_drift_offset += r_drift;
us_time_to_str(ct, current_stime); us_time_to_str(ct, current_stime);
@ -351,7 +351,7 @@ void hwtimer_adjust_rt_ratio(double ratio_correction)
/** /**
* Return the current simulated RTC time in microseconds * Return the current simulated RTC time in microseconds
*/ */
s64_t hwtimer_get_simu_rtc_time(void) int64_t hwtimer_get_simu_rtc_time(void)
{ {
return hwm_get_time() + rtc_offset; return hwm_get_time() + rtc_offset;
} }
@ -367,7 +367,7 @@ s64_t hwtimer_get_simu_rtc_time(void)
* This will be the case in general if native_posix is not able to run at or * This will be the case in general if native_posix is not able to run at or
* faster than real time. * faster than real time.
*/ */
void hwtimer_get_pseudohost_rtc_time(u32_t *nsec, u64_t *sec) void hwtimer_get_pseudohost_rtc_time(uint32_t *nsec, uint64_t *sec)
{ {
/* /*
* Note: long double has a 64bits mantissa in x86. * Note: long double has a 64bits mantissa in x86.
@ -396,8 +396,8 @@ void hwtimer_get_pseudohost_rtc_time(u32_t *nsec, u64_t *sec)
host_clock_gettime(&tv); host_clock_gettime(&tv);
u64_t rt_us = (u64_t)tv.tv_sec * 1000000ULL + tv.tv_nsec / 1000; uint64_t rt_us = (uint64_t)tv.tv_sec * 1000000ULL + tv.tv_nsec / 1000;
u32_t rt_ns = tv.tv_nsec % 1000; uint32_t rt_ns = tv.tv_nsec % 1000;
long double drt_us = (long double)rt_us - last_radj_rtime; long double drt_us = (long double)rt_us - last_radj_rtime;
long double drt_ns = drt_us * 1000.0 + (long double)rt_ns; long double drt_ns = drt_us * 1000.0 + (long double)rt_ns;

View file

@ -18,19 +18,19 @@ void hwtimer_init(void);
void hwtimer_cleanup(void); void hwtimer_cleanup(void);
void hwtimer_set_real_time_mode(bool new_rt); void hwtimer_set_real_time_mode(bool new_rt);
void hwtimer_timer_reached(void); void hwtimer_timer_reached(void);
void hwtimer_wake_in_time(u64_t time); void hwtimer_wake_in_time(uint64_t time);
void hwtimer_set_silent_ticks(s64_t sys_ticks); void hwtimer_set_silent_ticks(int64_t sys_ticks);
void hwtimer_enable(u64_t period); void hwtimer_enable(uint64_t period);
s64_t hwtimer_get_pending_silent_ticks(void); int64_t hwtimer_get_pending_silent_ticks(void);
void hwtimer_reset_rtc(void); void hwtimer_reset_rtc(void);
void hwtimer_set_rtc_offset(s64_t offset); void hwtimer_set_rtc_offset(int64_t offset);
void hwtimer_set_rt_ratio(double ratio); void hwtimer_set_rt_ratio(double ratio);
void hwtimer_adjust_rtc_offset(s64_t offset_delta); void hwtimer_adjust_rtc_offset(int64_t offset_delta);
void hwtimer_adjust_rt_ratio(double ratio_correction); void hwtimer_adjust_rt_ratio(double ratio_correction);
s64_t hwtimer_get_simu_rtc_time(void); int64_t hwtimer_get_simu_rtc_time(void);
void hwtimer_get_pseudohost_rtc_time(u32_t *nsec, u64_t *sec); void hwtimer_get_pseudohost_rtc_time(uint32_t *nsec, uint64_t *sec);
#ifdef __cplusplus #ifdef __cplusplus
} }

Some files were not shown because too many files have changed in this diff Show more