arch: native: Run Zephyr natively in a POSIX OS

A new arch (posix) which relies on pthreads to emulate the context
switching
A new soc for it (inf_clock) which emulates a CPU running at an
infinely high clock (so when the CPU is awaken it runs till completion
in 0 time)
A new board, which provides a trivial system tick timer and
irq generation.

Origin: Original

Fixes #1891

Signed-off-by: Alberto Escolar Piedras <alpi@oticon.com>
Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
Alberto Escolar Piedras 2017-10-03 16:31:55 +02:00 committed by Anas Nashif
parent 274ad46a84
commit 76f7644118
42 changed files with 2324 additions and 9 deletions

View file

@ -64,7 +64,6 @@ zephyr_include_directories(
${STDINCLUDE}
)
zephyr_compile_definitions(
KERNEL
__ZEPHYR__=1
@ -95,11 +94,13 @@ zephyr_compile_options(
-Wformat
-Wformat-security
-Wno-format-zero-length
-Wno-main
-imacros ${AUTOCONF_H}
-ffreestanding
-include ${AUTOCONF_H}
-Wno-main
${NOSTDINC_F}
)
zephyr_compile_options(
$<$<COMPILE_LANGUAGE:C>:-std=c99>
@ -114,13 +115,15 @@ zephyr_compile_options(
$<$<COMPILE_LANGUAGE:ASM>:-D_ASMLANGUAGE>
)
if(NOT CONFIG_NATIVE_APPLICATION)
zephyr_ld_options(
-nostartfiles
-nodefaultlibs
-nostdlib
-static
-no-pie
)
)
endif()
# ==========================================================================
#
@ -231,7 +234,6 @@ endif()
zephyr_cc_option_ifdef(CONFIG_DEBUG_SECTION_MISMATCH -fno-inline-functions-called-once)
zephyr_cc_option_ifdef(CONFIG_STACK_USAGE -fstack-usage)
zephyr_compile_options(-nostdinc)
zephyr_system_include_directories(${NOSTDINC})
# Force an error when things like SYS_INIT(foo, ...) occur with a missing header.
@ -248,9 +250,15 @@ if(IS_TEST)
endif()
set_ifndef(LINKERFLAGPREFIX -Wl)
if(NOT CONFIG_NATIVE_APPLICATION)
zephyr_ld_options(
${LINKERFLAGPREFIX},-X
${LINKERFLAGPREFIX},-N
)
endif()
zephyr_ld_options(
${LINKERFLAGPREFIX},--gc-sections
${LINKERFLAGPREFIX},--build-id=none
)
@ -502,7 +510,7 @@ add_custom_command(
# TODO: Remove duplication
COMMAND ${CMAKE_C_COMPILER}
-x assembler-with-cpp
-nostdinc
${NOSTDINC_F}
-undef
-MD -MF linker.cmd.dep -MT ${BASE_NAME}/linker.cmd
${ZEPHYR_INCLUDES}
@ -676,12 +684,20 @@ endif()
get_property(GKOF GLOBAL PROPERTY GENERATED_KERNEL_OBJECT_FILES)
get_property(GKSF GLOBAL PROPERTY GENERATED_KERNEL_SOURCE_FILES)
get_property(TOPT GLOBAL PROPERTY TOPT)
set_ifndef( TOPT -T)
# FIXME: Is there any way to get rid of empty_file.c?
add_executable( zephyr_prebuilt misc/empty_file.c)
target_link_libraries(zephyr_prebuilt -T${PROJECT_BINARY_DIR}/linker.cmd ${zephyr_lnk})
target_link_libraries(zephyr_prebuilt ${TOPT} ${PROJECT_BINARY_DIR}/linker.cmd ${zephyr_lnk})
set_property(TARGET zephyr_prebuilt PROPERTY LINK_DEPENDS ${PROJECT_BINARY_DIR}/linker.cmd)
add_dependencies( zephyr_prebuilt linker_script offsets)
if(NOT CONFIG_NATIVE_APPLICATION)
set(NOSTDINC_F -nostdinc)
endif()
if(GKOF OR GKSF)
set(logical_target_for_zephyr_elf kernel_elf)
@ -694,7 +710,7 @@ if(GKOF OR GKSF)
${LINKER_SCRIPT_DEP}
COMMAND ${CMAKE_C_COMPILER}
-x assembler-with-cpp
-nostdinc
${NOSTDINC_F}
-undef
-MD -MF linker_pass2.cmd.dep -MT ${BASE_NAME}/linker_pass2.cmd
${ZEPHYR_INCLUDES}
@ -713,7 +729,7 @@ if(GKOF OR GKSF)
)
add_executable( kernel_elf misc/empty_file.c ${GKSF})
target_link_libraries(kernel_elf ${GKOF} -T${PROJECT_BINARY_DIR}/linker_pass2.cmd ${zephyr_lnk})
target_link_libraries(kernel_elf ${GKOF} ${TOPT} ${PROJECT_BINARY_DIR}/linker_pass2.cmd ${zephyr_lnk})
set_property(TARGET kernel_elf PROPERTY LINK_DEPENDS ${PROJECT_BINARY_DIR}/linker_pass2.cmd)
add_dependencies( kernel_elf linker_pass2_script)
else()

View file

@ -34,6 +34,14 @@ config RISCV32
config XTENSA
bool "Xtensa architecture"
config ARCH_POSIX
bool "POSIX (native) architecture"
select ATOMIC_OPERATIONS_BUILTIN
select ARCH_HAS_CUSTOM_SWAP_TO_MAIN
select ARCH_HAS_CUSTOM_BUSY_WAIT
select ARCH_HAS_THREAD_ABORT
select NATIVE_APPLICATION
endchoice

37
arch/posix/CMakeLists.txt Normal file
View file

@ -0,0 +1,37 @@
zephyr_cc_option_ifdef(CONFIG_LTO -flto)
zephyr_compile_options(
-fno-freestanding
-Wno-undef
-Wno-implicit-function-declaration
-m32
-MMD
-MP
${TOOLCHAIN_C_FLAGS}
${ARCH_FLAG}
-include ${PROJECT_SOURCE_DIR}/arch/posix/include/posix_cheats.h
)
zephyr_compile_definitions(_POSIX_C_SOURCE=199309)
zephyr_ld_options(
-ldl
-pthread
-m32
)
# About the -include directive: The reason to do it this way, is because in this
# manner it is transparent to the application. Otherwise posix_cheats.h needs to
# be included in all the applications' files which define main( ), and in any
# app file which uses the pthreads like API provided by Zephyr
# ( include/posix/pthread.h / kernel/pthread.c ) [And any future API added to
# Zephyr which will clash with the native POSIX API] . It would also need to
# be included in a few zephyr kernel files.
add_subdirectory(soc)
add_subdirectory(core)
# Override the flag used with linker.cmd
# "-Wl,--just-symbols linker.cmd" instead of "-T linker.cmd"
set_property(GLOBAL PROPERTY TOPT -Wl,--just-symbols)

31
arch/posix/Kconfig Normal file
View file

@ -0,0 +1,31 @@
# Kconfig - General configuration options
#
# Copyright (c) 2017 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
choice
prompt "POSIX Configuration Selection"
depends on ARCH_POSIX
source "arch/posix/soc/*/Kconfig.soc"
endchoice
menu "POSIX (native) Options"
depends on ARCH_POSIX
config ARCH
default "posix"
config ARCH_DEFCONFIG
string
default "arch/posix/defconfig"
source "arch/posix/core/Kconfig"
source "arch/posix/soc/*/Kconfig"
endmenu

View file

@ -0,0 +1,9 @@
zephyr_library()
zephyr_library_compile_definitions(_POSIX_CHEATS_H)
zephyr_library_sources(
cpuhalt.c
fatal.c
posix_core.c
swap.c
thread.c
)

64
arch/posix/core/cpuhalt.c Normal file
View file

@ -0,0 +1,64 @@
/*
* Copyright (c) 2011-2015 Wind River Systems, Inc.
* Copyright (c) 2017 Oticon A/S
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file CPU power management code for POSIX
*
* This module provides an implementation of the architecture-specific
* k_cpu_idle() primitive required by the kernel idle loop component.
* It can be called within an implementation of _sys_power_save_idle(),
* which is provided for the kernel by the platform.
*
* The module also provides an implementation of k_cpu_atomic_idle(), which
* atomically re-enables interrupts and enters low power mode.
*
*/
#include "posix_core.h"
#include "posix_soc_if.h"
/**
*
* @brief Power save idle routine for IA-32
*
* This function will be called by the kernel idle loop or possibly within
* an implementation of _sys_power_save_idle in the kernel when the
* '_sys_power_save_flag' variable is non-zero.
*
* This function is just a pass thru to the SOC one
*
* @return N/A
*/
void k_cpu_idle(void)
{
posix_irq_full_unlock();
posix_halt_cpu();
}
/**
*
* @brief Atomically re-enable interrupts and enter low power mode
*
* INTERNAL
* The requirements for k_cpu_atomic_idle() are as follows:
* 1) The enablement of interrupts and entering a low-power mode needs to be
* atomic, i.e. there should be no period of time where interrupts are
* enabled before the processor enters a low-power mode. See the comments
* in k_lifo_get(), for example, of the race condition that
* occurs if this requirement is not met.
*
* 2) After waking up from the low-power mode, the interrupt lockout state
* must be restored as indicated in the 'imask' input parameter.
*
* This function is just a pass thru to the SOC one
*
* @return N/A
*/
void k_cpu_atomic_idle(unsigned int imask)
{
posix_atomic_halt_cpu(imask);
}

124
arch/posix/core/fatal.c Normal file
View file

@ -0,0 +1,124 @@
/*
* Copyright (c) 2016 Intel Corporation
* Copyright (c) 2017 Oticon A/S
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <kernel.h>
#include <arch/cpu.h>
#include <kernel_structs.h>
#include <misc/printk.h>
#include <inttypes.h>
#include "posix_soc_if.h"
const NANO_ESF _default_esf = {
0xdeadbaad
};
/**
*
* @brief Kernel fatal error handler
*
* This routine is called when a fatal error condition is detected
*
* The caller is expected to always provide a usable ESF. In the event that the
* fatal error does not have a hardware generated ESF, the caller should either
* create its own or call _Fault instead.
*
* @param reason the reason that the handler was called
* @param pEsf pointer to the exception stack frame
*
* @return This function does not return.
*/
FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,
const NANO_ESF *esf)
{
#ifdef CONFIG_PRINTK
switch (reason) {
case _NANO_ERR_CPU_EXCEPTION:
case _NANO_ERR_SPURIOUS_INT:
break;
case _NANO_ERR_INVALID_TASK_EXIT:
printk("***** Invalid Exit Software Error! *****\n");
break;
case _NANO_ERR_ALLOCATION_FAIL:
printk("**** Kernel Allocation Failure! ****\n");
break;
case _NANO_ERR_KERNEL_OOPS:
printk("***** Kernel OOPS! *****\n");
break;
case _NANO_ERR_KERNEL_PANIC:
printk("***** Kernel Panic! *****\n");
break;
#ifdef CONFIG_STACK_SENTINEL
case _NANO_ERR_STACK_CHK_FAIL:
printk("***** Stack overflow *****\n");
break;
#endif
default:
printk("**** Unknown Fatal Error %u! ****\n", reason);
break;
}
#endif
void _SysFatalErrorHandler(unsigned int reason,
const NANO_ESF *pEsf);
_SysFatalErrorHandler(reason, esf);
}
/**
*
* @brief Fatal error handler
*
* This routine implements the corrective action to be taken when the system
* detects a fatal error.
*
* This sample implementation attempts to abort the current thread and allow
* the system to continue executing, which may permit the system to continue
* functioning with degraded capabilities.
*
* System designers may wish to enhance or substitute this sample
* implementation to take other actions, such as logging error (or debug)
* information to a persistent repository and/or rebooting the system.
*
* @param reason the fatal error reason
* @param pEsf pointer to exception stack frame
*
* @return N/A
*/
FUNC_NORETURN __weak void _SysFatalErrorHandler(unsigned int reason,
const NANO_ESF *pEsf)
{
ARG_UNUSED(pEsf);
#ifdef CONFIG_STACK_SENTINEL
if (reason == _NANO_ERR_STACK_CHK_FAIL) {
goto hang_system;
}
#endif
if (reason == _NANO_ERR_KERNEL_PANIC) {
goto hang_system;
}
if (k_is_in_isr() || _is_thread_essential()) {
posix_print_error_and_exit(
"Fatal fault in %s! Stopping...\n",
k_is_in_isr() ? "ISR" : "essential thread");
}
printk("Fatal fault in thread %p! Aborting.\n", _current);
k_thread_abort(_current);
hang_system:
posix_print_error_and_exit(
"Stopped in _SysFatalErrorHandler()\n");
CODE_UNREACHABLE;
}

View file

@ -0,0 +1,48 @@
/*
* Copyright (c) 2010-2014 Wind River Systems, Inc.
* Copyright (c) 2017 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Kernel structure member offset definition file
*
* This module is responsible for the generation of the absolute symbols whose
* value represents the member offsets for various IA-32 structures.
*
* All of the absolute symbols defined by this module will be present in the
* final kernel ELF image (due to the linker's reference to the _OffsetAbsSyms
* symbol).
*
* INTERNAL
* It is NOT necessary to define the offset for every member of a structure.
* Typically, only those members that are accessed by assembly language routines
* are defined; however, it doesn't hurt to define all fields for the sake of
* completeness.
*/
#include <gen_offset.h> /* located in kernel/include */
/* list of headers that define whose structure offsets will be generated */
#include <kernel_structs.h>
#include <kernel_offsets.h>
#ifdef CONFIG_DEBUG_INFO
GEN_OFFSET_SYM(_kernel_arch_t, isf);
#endif
#ifdef CONFIG_GDB_INFO
GEN_OFFSET_SYM(_thread_arch_t, esf);
#endif
#if (defined(CONFIG_FP_SHARING) || defined(CONFIG_GDB_INFO))
GEN_OFFSET_SYM(_thread_arch_t, excNestCount);
#endif
GEN_ABS_SYM_END

View file

@ -0,0 +1,544 @@
/*
* Copyright (c) 2017 Oticon A/S
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* Here is where things actually happen for the POSIX arch
*
* We isolate all functions here, to ensure they can be compiled as
* independently as possible to the remainder of Zephyr to avoid name clashes
* as Zephyr does provide functions with the same names as the POSIX threads
* functions
*/
/**
* Principle of operation:
*
* The Zephyr OS and its app run as a set of native pthreads.
* The Zephyr OS only sees one of this thread executing at a time.
* Which is running is controlled using {cond|mtx}_threads and
* currently_allowed_thread.
*
* The main part of the execution of each thread will occur in a fully
* synchronous and deterministic manner, and only when commanded by the Zephyr
* kernel.
* But the creation of a thread will spawn a new pthread whose start
* is asynchronous to the rest, until synchronized in posix_wait_until_allowed()
* below.
* Similarly aborting and canceling threads execute a tail in a quite
* asynchronous manner.
*
* This implementation is meant to be portable in between POSIX systems.
* A table (threads_table) is used to abstract the native pthreads.
* And index in this table is used to identify threads in the IF to the kernel.
*
*/
#define POSIX_ARCH_DEBUG_PRINTS 0
#include <pthread.h>
#include <stdbool.h>
#include <stdlib.h>
#include <string.h>
#include "posix_core.h"
#include "posix_soc_if.h"
#include "nano_internal.h"
#include "kernel_structs.h"
#include "ksched.h"
#define PREFIX "POSIX arch core: "
#define ERPREFIX PREFIX"error on "
#define NO_MEM_ERR PREFIX"Can't allocate memory\n"
#if POSIX_ARCH_DEBUG_PRINTS
#define PC_DEBUG(fmt, ...) posix_print_trace(PREFIX fmt, __VA_ARGS__)
#else
#define PC_DEBUG(...)
#endif
#define PC_ALLOC_CHUNK_SIZE 64
#define PC_REUSE_ABORTED_ENTRIES 0
/* tests/kernel/threads/scheduling/schedule_api fails when setting
* PC_REUSE_ABORTED_ENTRIES => don't set it by now
*/
static int threads_table_size;
struct threads_table_el {
enum {NOTUSED = 0, USED, ABORTING, ABORTED, FAILED} state;
bool running; /* Is this the currently running thread */
pthread_t thread; /* Actual pthread_t as returned by native kernel */
int thead_cnt; /* For debugging: Unique, consecutive, thread number */
};
static struct threads_table_el *threads_table;
static int thread_create_count; /* For debugging. Thread creation counter */
/*
* Conditional variable to block/awake all threads during swaps()
* (we only need 1 mutex and 1 cond variable for all threads)
*/
static pthread_cond_t cond_threads = PTHREAD_COND_INITIALIZER;
/* Mutex for the conditional variable posix_core_cond_threads */
static pthread_mutex_t mtx_threads = PTHREAD_MUTEX_INITIALIZER;
/* Token which tells which process is allowed to run now */
static int currently_allowed_thread;
static bool terminate; /* Are we terminating the program == cleaning up */
static void posix_wait_until_allowed(int this_th_nbr);
static void *posix_thread_starter(void *arg);
static void posix_preexit_cleanup(void);
/**
* Helper function, run by a thread is being aborted
*/
static void abort_tail(int this_th_nbr)
{
PC_DEBUG("Thread [%i] %i: %s: Aborting (exiting) (rel mut)\n",
threads_table[this_th_nbr].thead_cnt,
this_th_nbr,
__func__);
threads_table[this_th_nbr].running = false;
threads_table[this_th_nbr].state = ABORTED;
posix_preexit_cleanup();
pthread_exit(NULL);
}
/**
* Helper function to block this thread until it is allowed again
* (somebody calls posix_let_run() with this thread number
*
* Note that we go out of this function (the while loop below)
* with the mutex locked by this particular thread.
* In normal circumstances, the mutex is only unlocked internally in
* pthread_cond_wait() while waiting for cond_threads to be signaled
*/
static void posix_wait_until_allowed(int this_th_nbr)
{
threads_table[this_th_nbr].running = false;
PC_DEBUG("Thread [%i] %i: %s: Waiting to be allowed to run (rel mut)\n",
threads_table[this_th_nbr].thead_cnt,
this_th_nbr,
__func__);
while (this_th_nbr != currently_allowed_thread) {
pthread_cond_wait(&cond_threads, &mtx_threads);
if (threads_table &&
(threads_table[this_th_nbr].state == ABORTING)) {
abort_tail(this_th_nbr);
}
}
threads_table[this_th_nbr].running = true;
PC_DEBUG("Thread [%i] %i: %s(): I'm allowed to run! (hav mut)\n",
threads_table[this_th_nbr].thead_cnt,
this_th_nbr,
__func__);
}
/**
* Helper function to let the thread <next_allowed_th> run
* Note: posix_let_run() can only be called with the mutex locked
*/
static void posix_let_run(int next_allowed_th)
{
PC_DEBUG("%s: We let thread [%i] %i run\n",
__func__,
threads_table[next_allowed_th].thead_cnt,
next_allowed_th);
currently_allowed_thread = next_allowed_th;
/*
* We let all threads know one is able to run now (it may even be us
* again if fancied)
* Note that as we hold the mutex, they are going to be blocked until
* we reach our own posix_wait_until_allowed() while loop
*/
if (pthread_cond_broadcast(&cond_threads)) {
posix_print_error_and_exit(ERPREFIX"pthread_cond_signal()\n");
}
}
static void posix_preexit_cleanup(void)
{
/*
* Release the mutex so the next allowed thread can run
*/
if (pthread_mutex_unlock(&mtx_threads)) {
posix_print_error_and_exit(ERPREFIX"pthread_mutex_unlock()\n");
}
/* We detach ourselves so nobody needs to join to us */
pthread_detach(pthread_self());
}
/**
* Let the ready thread run and block this thread until it is allowed again
*
* called from __swap() which does the picking from the kernel structures
*/
void posix_swap(int next_allowed_thread_nbr, int this_th_nbr)
{
posix_let_run(next_allowed_thread_nbr);
if (threads_table[this_th_nbr].state == ABORTING) {
PC_DEBUG("Thread [%i] %i: %s: Aborting curr.\n",
threads_table[this_th_nbr].thead_cnt,
this_th_nbr,
__func__);
abort_tail(this_th_nbr);
} else {
posix_wait_until_allowed(this_th_nbr);
}
}
/**
* Let the ready thread (main) run, and exit this thread (init)
*
* Called from _arch_switch_to_main_thread() which does the picking from the
* kernel structures
*
* Note that we could have just done a swap(), but that would have left the
* init thread lingering. Instead here we exit the init thread after enabling
* the new one
*/
void posix_main_thread_start(int next_allowed_thread_nbr)
{
posix_let_run(next_allowed_thread_nbr);
PC_DEBUG("%s: Init thread dying now (rel mut)\n",
__func__);
posix_preexit_cleanup();
pthread_exit(NULL);
}
/**
* Handler called when any thread is cancelled or exits
*/
static void posix_cleanup_handler(void *arg)
{
/*
* If we are not terminating, this is just an aborted thread,
* and the mutex was already released
* Otherwise, release the mutex so other threads which may be
* caught waiting for it could terminate
*/
if (!terminate) {
return;
}
#if POSIX_ARCH_DEBUG_PRINTS
posix_thread_status_t *ptr = (posix_thread_status_t *) arg;
PC_DEBUG("Thread %i: %s: Canceling (rel mut)\n",
ptr->thread_idx,
__func__);
#endif
if (pthread_mutex_unlock(&mtx_threads)) {
posix_print_error_and_exit(ERPREFIX"pthread_mutex_unlock()\n");
}
/* We detach ourselves so nobody needs to join to us */
pthread_detach(pthread_self());
}
/**
* Helper function to start a Zephyr thread as a POSIX thread:
* It will block the thread until a __swap() is called for it
*
* Spawned from posix_new_thread() below
*/
static void *posix_thread_starter(void *arg)
{
posix_thread_status_t *ptr = (posix_thread_status_t *) arg;
PC_DEBUG("Thread [%i] %i: %s: Starting\n",
threads_table[ptr->thread_idx].thead_cnt,
ptr->thread_idx,
__func__);
/*
* We block until all other running threads reach the while loop
* in posix_wait_until_allowed() and they release the mutex
*/
if (pthread_mutex_lock(&mtx_threads)) {
posix_print_error_and_exit(ERPREFIX"pthread_mutex_lock()\n");
}
/*
* The program may have been finished before this thread ever got to run
*/
if (!threads_table) {
posix_cleanup_handler(arg);
pthread_exit(NULL);
}
pthread_cleanup_push(posix_cleanup_handler, arg);
PC_DEBUG("Thread [%i] %i: %s: After start mutex (hav mut)\n",
threads_table[ptr->thread_idx].thead_cnt,
ptr->thread_idx,
__func__);
/*
* The thread would try to execute immediately, so we block it
* until allowed
*/
posix_wait_until_allowed(ptr->thread_idx);
posix_new_thread_pre_start();
_thread_entry(ptr->entry_point, ptr->arg1, ptr->arg2, ptr->arg3);
/*
* We only reach this point if the thread actually returns which should
* not happen. But we handle it gracefully just in case
*/
posix_print_trace(PREFIX"Thread [%i] %i [%lu] ended!?!\n",
threads_table[ptr->thread_idx].thead_cnt,
ptr->thread_idx,
pthread_self());
threads_table[ptr->thread_idx].running = false;
threads_table[ptr->thread_idx].state = FAILED;
pthread_cleanup_pop(1);
return NULL;
}
/**
* Return the first free entry index in the threads table
*/
static int ttable_get_empty_slot(void)
{
for (int i = 0; i < threads_table_size; i++) {
if ((threads_table[i].state == NOTUSED)
|| (PC_REUSE_ABORTED_ENTRIES
&& (threads_table[i].state == ABORTED))) {
return i;
}
}
/*
* else, we run out table without finding an index
* => we expand the table
*/
threads_table = realloc(threads_table,
(threads_table_size + PC_ALLOC_CHUNK_SIZE)
* sizeof(struct threads_table_el));
if (threads_table == NULL) {
posix_print_error_and_exit(NO_MEM_ERR);
}
/* Clear new piece of table */
memset(&threads_table[threads_table_size],
0,
PC_ALLOC_CHUNK_SIZE * sizeof(struct threads_table_el));
threads_table_size += PC_ALLOC_CHUNK_SIZE;
/* The first newly created entry is good: */
return threads_table_size - PC_ALLOC_CHUNK_SIZE;
}
/**
* Called from _new_thread(),
* Create a new POSIX thread for the new Zephyr thread.
* _new_thread() picks from the kernel structures what it is that we need to
* call with what parameters
*/
void posix_new_thread(posix_thread_status_t *ptr)
{
int t_slot;
t_slot = ttable_get_empty_slot();
threads_table[t_slot].state = USED;
threads_table[t_slot].running = false;
threads_table[t_slot].thead_cnt = thread_create_count++;
ptr->thread_idx = t_slot;
if (pthread_create(&threads_table[t_slot].thread,
NULL,
posix_thread_starter,
(void *)ptr)) {
posix_print_error_and_exit(ERPREFIX"pthread_create()\n");
}
PC_DEBUG("created thread [%i] %i [%lu]\n",
threads_table[t_slot].thead_cnt,
ptr->thread_idx,
threads_table[t_slot].thread);
}
/**
* Called from _IntLibInit()
* prepare whatever needs to be prepared to be able to start threads
*/
void posix_init_multithreading(void)
{
thread_create_count = 0;
currently_allowed_thread = -1;
threads_table = calloc(PC_ALLOC_CHUNK_SIZE,
sizeof(struct threads_table_el));
if (threads_table == NULL) {
posix_print_error_and_exit(NO_MEM_ERR);
}
threads_table_size = PC_ALLOC_CHUNK_SIZE;
if (pthread_mutex_lock(&mtx_threads)) {
posix_print_error_and_exit(ERPREFIX"pthread_mutex_lock()\n");
}
}
/**
* Free any allocated memory by the posix core and clean up.
* Note that this function cannot be called from a SW thread
* (the CPU is assumed halted. Otherwise we will cancel ourselves)
*
* This function cannot guarantee the threads will be cancelled before the HW
* thread exists. The only way to do that, would be to wait for each of them in
* a join (without detaching them, but that could lead to locks in some
* convoluted cases. As a call to this function can come from an ASSERT or other
* error termination, we better do not assume things are working fine.
* => we prefer the supposed memory leak report from valgrind, and ensure we
* will not hang
*
*/
void posix_core_clean_up(void)
{
if (!threads_table) {
return;
}
terminate = true;
for (int i = 0; i < threads_table_size; i++) {
if (threads_table[i].state != USED) {
continue;
}
if (pthread_cancel(threads_table[i].thread)) {
posix_print_warning(
PREFIX"cleanup: could not stop thread %i\n",
i);
}
}
free(threads_table);
threads_table = NULL;
}
void posix_abort_thread(int thread_idx)
{
if (threads_table[thread_idx].state != USED) {
/* The thread may have been already aborted before */
return;
}
PC_DEBUG("Aborting not scheduled thread [%i] %i\n",
threads_table[thread_idx].thead_cnt,
thread_idx);
threads_table[thread_idx].state = ABORTING;
/*
* Note: the native thread will linger in RAM until it catches the
* mutex or awakes on the condition.
* Note that even if we would pthread_cancel() the thread here, that
* would be the case, but with a pthread_cancel() the mutex state would
* be uncontrolled
*/
}
#if defined(CONFIG_ARCH_HAS_THREAD_ABORT)
extern void _k_thread_single_abort(struct k_thread *thread);
void _impl_k_thread_abort(k_tid_t thread)
{
unsigned int key;
int thread_idx;
posix_thread_status_t *tstatus =
(posix_thread_status_t *)
thread->callee_saved.thread_status;
thread_idx = tstatus->thread_idx;
key = irq_lock();
__ASSERT(!(thread->base.user_options & K_ESSENTIAL),
"essential thread aborted");
_k_thread_single_abort(thread);
_thread_monitor_exit(thread);
if (_current == thread) {
if (tstatus->aborted == 0) {
tstatus->aborted = 1;
} else {
posix_print_warning(
PREFIX"The kernel is trying to abort and swap "
"out of an already aborted thread %i. This "
"should NOT have happened\n",
thread_idx);
}
threads_table[thread_idx].state = ABORTING;
PC_DEBUG("Thread [%i] %i: %s Marked myself "
"as aborting\n",
threads_table[thread_idx].thead_cnt,
thread_idx,
__func__);
_Swap(key);
CODE_UNREACHABLE;
}
if (tstatus->aborted == 0) {
PC_DEBUG("%s aborting now [%i] %i\n",
__func__,
threads_table[thread_idx].thead_cnt,
thread_idx);
tstatus->aborted = 1;
posix_abort_thread(thread_idx);
} else {
PC_DEBUG("%s ignoring re_abort of [%i] "
"%i\n",
__func__,
threads_table[thread_idx].thead_cnt,
thread_idx);
}
/* The abort handler might have altered the ready queue. */
_reschedule_threads(key);
}
#endif

104
arch/posix/core/swap.c Normal file
View file

@ -0,0 +1,104 @@
/*
* Copyright (c) 2010-2015 Wind River Systems, Inc.
* Copyright (c) 2017 Oticon A/S
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Kernel swapper code for POSIX
*
* This module implements the __swap() routine for the POSIX architecture.
*
*/
#include "kernel.h"
#include <kernel_structs.h>
#include "posix_core.h"
#include "irq.h"
/**
*
* @brief Initiate a cooperative context switch
*
* The __swap() routine is invoked by various kernel services to effect
* a cooperative context switch. Prior to invoking __swap(), the
* caller disables interrupts (via irq_lock) and the return 'key'
* is passed as a parameter to __swap().
*
*
* @return -EAGAIN, or a return value set by a call to
* _set_thread_return_value()
*
*/
unsigned int __swap(unsigned int key)
{
/*
* struct k_thread * _kernel.current is the currently runnig thread
* struct k_thread * _kernel.ready_q.cache contains the next thread to run
* (cannot be NULL)
*
* Here a "real" arch would save all processor registers, stack pointer and so
* forth.
* But we do not need to do so because we use posix threads => those are all
* nicely kept by the native OS kernel
*/
_kernel.current->callee_saved.key = key;
_kernel.current->callee_saved.retval = -EAGAIN;
/* retval may be modified with a call to _set_thread_return_value() */
#if CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH
_sys_k_event_logger_context_switch();
#endif
posix_thread_status_t *ready_thread_ptr =
(posix_thread_status_t *)
_kernel.ready_q.cache->callee_saved.thread_status;
posix_thread_status_t *this_thread_ptr =
(posix_thread_status_t *)
_kernel.current->callee_saved.thread_status;
_kernel.current = _kernel.ready_q.cache;
/*
* Here a "real" arch would load all processor registers for the thread
* to run. In this arch case, we just block this thread until allowed to
* run later, and signal to whomever is allowed to run to continue.
*/
posix_swap(ready_thread_ptr->thread_idx,
this_thread_ptr->thread_idx);
/* When we continue, _kernel->current points back to this thread */
irq_unlock(_kernel.current->callee_saved.key);
return _kernel.current->callee_saved.retval;
}
#ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN
/**
* This is just a version of __swap() in which we do not save anything about the
* current thread.
*
* Note that we will never come back to this thread:
* posix_core_main_thread_start() does never return
*/
void _arch_switch_to_main_thread(struct k_thread *main_thread,
k_thread_stack_t *main_stack,
size_t main_stack_size, k_thread_entry_t _main)
{
posix_thread_status_t *ready_thread_ptr =
(posix_thread_status_t *)
_kernel.ready_q.cache->callee_saved.thread_status;
_kernel.current = _kernel.ready_q.cache;
posix_main_thread_start(ready_thread_ptr->thread_idx);
}
#endif

89
arch/posix/core/thread.c Normal file
View file

@ -0,0 +1,89 @@
/*
* Copyright (c) 2010-2015 Wind River Systems, Inc.
* Copyright (c) 2017 Oticon A/S
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Thread support primitives
*
* This module provides core thread related primitives for the POSIX
* architecture
*/
#ifdef CONFIG_INIT_STACKS
#include <string.h>
#endif /* CONFIG_INIT_STACKS */
#include <toolchain.h>
#include <kernel_structs.h>
#include <wait_q.h>
#include "posix_core.h"
#include "posix_soc_if.h"
/**
* @brief Create a new kernel execution thread
*
* Initializes the k_thread object and sets up initial stack frame.
*
* @param thread pointer to thread struct memory, including any space needed
* for extra coprocessor context
* @param stack the pointer to aligned stack memory
* @param stack_size the stack size in bytes
* @param entry thread entry point routine
* @param arg1 first param to entry point
* @param arg2 second param to entry point
* @param arg3 third param to entry point
* @param priority thread priority
* @param options thread options: K_ESSENTIAL, K_FP_REGS, K_SSE_REGS
*
* Note that in this arch we cheat quite a bit: we use as stack a normal
* pthreads stack and therefore we ignore the stack size
*
*/
void _new_thread(struct k_thread *thread, k_thread_stack_t *stack,
size_t stack_size, k_thread_entry_t thread_func,
void *arg1, void *arg2, void *arg3,
int priority, unsigned int options)
{
char *stack_memory = K_THREAD_STACK_BUFFER(stack);
_ASSERT_VALID_PRIO(priority, thread_func);
posix_thread_status_t *thread_status;
_new_thread_init(thread, stack_memory, stack_size, priority, options);
/* We store it in the same place where normal archs store the
* "initial stack frame"
*/
thread_status = (posix_thread_status_t *)
STACK_ROUND_DOWN(stack_memory + stack_size
- sizeof(*thread_status));
/* _thread_entry() arguments */
thread_status->entry_point = thread_func;
thread_status->arg1 = arg1;
thread_status->arg2 = arg2;
thread_status->arg3 = arg3;
#if defined(CONFIG_ARCH_HAS_THREAD_ABORT)
thread_status->aborted = 0;
#endif
thread->callee_saved.thread_status = (u32_t)thread_status;
posix_new_thread(thread_status);
thread_monitor_init(thread);
}
void posix_new_thread_pre_start(void)
{
posix_irq_full_unlock();
}

View file

@ -0,0 +1,24 @@
/* Inline assembler kernel functions and macros */
/*
* Copyright (c) 2015, Wind River Systems, Inc.
* Copyright (c) 2017, Oticon A/S
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef _ASM_INLINE_H
#define _ASM_INLINE_H
#if !defined(CONFIG_ARCH_POSIX)
#error The arch/posix/include/asm_inline.h is only for the POSIX architecture
#endif
#if defined(__GNUC__)
#include <asm_inline_gcc.h> /* The empty one.. */
#include <arch/posix/asm_inline_gcc.h>
#else
#include <asm_inline_other.h>
#endif /* __GNUC__ */
#endif /* _ASM_INLINE_H */

View file

@ -0,0 +1 @@
/* EMTPTY ON PURPOSE. Why do the intel and ARM arch have 2 versions of it? */

View file

@ -0,0 +1,42 @@
/*
* Copyright (c) 2013-2016 Wind River Systems, Inc.
* Copyright (c) 2017 Oticon A/S
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Private kernel definitions (POSIX)
*
*/
#ifndef _kernel_arch_data__h_
#define _kernel_arch_data__h_
#ifdef __cplusplus
extern "C" {
#endif
#include <nano_internal.h>
/* stacks */
#define STACK_ROUND_UP(x) ROUND_UP(x, STACK_ALIGN_SIZE)
#define STACK_ROUND_DOWN(x) ROUND_DOWN(x, STACK_ALIGN_SIZE)
#ifndef _ASMLANGUAGE
struct _kernel_arch {
/* empty */
};
typedef struct _kernel_arch _kernel_arch_t;
#endif /* _ASMLANGUAGE */
#ifdef __cplusplus
}
#endif
#endif /* _kernel_arch_data__h_ */

View file

@ -0,0 +1,70 @@
/*
* Copyright (c) 2016 Wind River Systems, Inc.
* Copyright (c) 2017 Oticon A/S
*
* SPDX-License-Identifier: Apache-2.0
*/
/* This file is only meant to be included by kernel_structs.h */
#ifndef _kernel_arch_func__h_
#define _kernel_arch_func__h_
#include "kernel.h"
#include <toolchain/common.h>
#include "posix_core.h"
#ifndef _ASMLANGUAGE
#ifdef __cplusplus
extern "C" {
#endif
#if defined(CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN)
void _arch_switch_to_main_thread(struct k_thread *main_thread,
k_thread_stack_t *main_stack,
size_t main_stack_size, k_thread_entry_t _main);
#endif
/**
*
* @brief Performs architecture-specific initialization
*
* This routine performs architecture-specific initialization of the kernel.
* Trivial stuff is done inline; more complex initialization is done via
* function calls.
*
* @return N/A
*/
static inline void kernel_arch_init(void)
{
/* Nothing to be done */
}
static ALWAYS_INLINE void
_set_thread_return_value(struct k_thread *thread, unsigned int value)
{
thread->callee_saved.retval = value;
}
/*
* _IntLibInit() is called from the non-arch specific function,
* prepare_multithreading().
*/
static inline void _IntLibInit(void)
{
posix_init_multithreading();
}
#ifdef __cplusplus
}
#endif
#define _is_in_isr() (_kernel.nested != 0)
#endif /* _ASMLANGUAGE */
#endif /* _kernel_arch_func__h_ */

View file

@ -0,0 +1,57 @@
/*
* Copyright (c) 2017 Intel Corporation
* Copyright (c) 2017 Oticon A/S
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Per-arch thread definition
*
* This file contains definitions for
*
* struct _thread_arch
* struct _callee_saved
* struct _caller_saved
*
* necessary to instantiate instances of struct k_thread.
*/
#ifndef _kernel_arch_thread__h_
#define _kernel_arch_thread__h_
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
struct _caller_saved {
/*
* Nothing here
*/
};
struct _callee_saved {
/* IRQ status before irq_lock() and call to _Swap() */
u32_t key;
/* Return value of _Swap() */
u32_t retval;
/*
* Thread status pointer
* (We need to compile as 32bit binaries in POSIX)
*/
u32_t thread_status;
};
struct _thread_arch {
/* nothing for now */
};
typedef struct _thread_arch _thread_arch_t;
#endif /* _ASMLANGUAGE */
#endif /* _kernel_arch_thread__h_ */

View file

@ -0,0 +1,39 @@
/*
* Copyright (c) 2016 Intel Corporation
* Copyright (c) 2017 Oticon A/S
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Kernel event logger support for ARM
*/
#ifndef __KERNEL_EVENT_LOGGER_ARCH_H__
#define __KERNEL_EVENT_LOGGER_ARCH_H__
#include "posix_soc_if.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Get the identification of the current interrupt.
*
* This routine obtain the key of the interrupt that is currently processed
* if it is called from a ISR context.
*
* @return The key of the interrupt that is currently being processed.
*/
static inline int _sys_current_irq_key_get(void)
{
return posix_get_current_irq();
}
#ifdef __cplusplus
}
#endif
#endif /* __KERNEL_EVENT_LOGGER_ARCH_H__ */

View file

@ -0,0 +1,35 @@
/*
* Copyright (c) 2016 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef _offsets_short_arch__h_
#define _offsets_short_arch__h_
#include <offsets.h>
/* kernel */
#define _kernel_offset_to_isf \
(___kernel_t_arch_OFFSET + ___kernel_arch_t_isf_OFFSET)
/* end - kernel */
/* threads */
#define _thread_offset_to_excNestCount \
(___thread_t_arch_OFFSET + ___thread_arch_t_excNestCount_OFFSET)
#define _thread_offset_to_esp \
(___thread_t_callee_saved_OFFSET + ___callee_saved_t_esp_OFFSET)
#define _thread_offset_to_coopFloatReg \
(___thread_t_arch_OFFSET + ___thread_arch_t_coopFloatReg_OFFSET)
#define _thread_offset_to_preempFloatReg \
(___thread_t_arch_OFFSET + ___thread_arch_t_preempFloatReg_OFFSET)
/* end - threads */
#endif /* _offsets_short_arch__h_ */

View file

@ -0,0 +1,60 @@
/*
* Copyright (c) 2017 Oticon A/S
*
* SPDX-License-Identifier: Apache-2.0
*/
/*
* Header to be able to compile the Zephyr kernel on top of a POSIX OS
*/
#ifndef _POSIX_CHEATS_H
#define _POSIX_CHEATS_H
#ifdef CONFIG_ARCH_POSIX
#ifndef main
#define main(...) zephyr_app_main(__VA_ARGS__)
#endif
/* For the include/posix/pthreads.h provided with Zephyr,
* in case somebody would use it, we rename all symbols here adding
* some prefix, and we ensure this header is included
*/
#define timespec zap_timespec
#define pthread_mutex_t zap_pthread_mutex_t
#define pthread_mutexattr_t zap_pthread_mutexattr_t
#define pthread_cond_t zap_pthread_cond_t
#define pthread_condattr_t zap_pthread_condattr_t
#define pthread_barrier_t zap_pthread_barrier_t
#define pthread_barrierattr_t zap_pthread_barrierattr_t
#define pthread_cond_init(...) zap_pthread_cond_init(__VA_ARGS__)
#define pthread_cond_destroy(...) zap_pthread_cond_destroy(__VA_ARGS__)
#define pthread_cond_signal(...) zap_pthread_cond_signal(__VA_ARGS__)
#define pthread_cond_broadcast(...) zap_pthread_cond_broadcast(__VA_ARGS__)
#define pthread_cond_wait(...) zap_pthread_cond_wait(__VA_ARGS__)
#define pthread_cond_timedwait(...) zap_pthread_cond_timedwait(__VA_ARGS__)
#define pthread_condattr_init(...) zap_pthread_condattr_init(__VA_ARGS__)
#define pthread_condattr_destroy(...) zap_pthread_condattr_destroy(__VA_ARGS__)
#define pthread_mutex_init(...) zap_pthread_mutex_init(__VA_ARGS__)
#define pthread_mutex_destroy(...) zap_pthread_mutex_destroy(__VA_ARGS__)
#define pthread_mutex_lock(...) zap_pthread_mutex_lock(__VA_ARGS__)
#define pthread_mutex_timedlock(...) zap_pthread_mutex_timedlock(__VA_ARGS__)
#define pthread_mutex_trylock(...) zap_pthread_mutex_trylock(__VA_ARGS__)
#define pthread_mutex_unlock(...) zap_pthread_mutex_unlock(__VA_ARGS__)
#define pthread_mutexattr_init(...) zap_pthread_mutexattr_init(__VA_ARGS__)
#define pthread_mutexattr_destroy(...) \
zap_pthread_mutexattr_destroy(__VA_ARGS__)
#define pthread_barrier_wait(...) zap_pthread_barrier_wait(__VA_ARGS__)
#define pthread_barrier_init(...) zap_pthread_barrier_init(__VA_ARGS__)
#define pthread_barrier_destroy(...) zap_pthread_barrier_destroy(__VA_ARGS__)
#define pthread_barrierattr_init(...) zap_pthread_barrierattr_init(__VA_ARGS__)
#define pthread_barrierattr_destroy(...) \
zap_pthread_barrierattr_destroy(__VA_ARGS__)
#endif /* CONFIG_ARCH_POSIX */
#endif

View file

@ -0,0 +1,44 @@
/*
* Copyright (c) 2017 Oticon A/S
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef _POSIX_CORE_H
#define _POSIX_CORE_H
#include "kernel.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef struct {
k_thread_entry_t entry_point;
void *arg1;
void *arg2;
void *arg3;
int thread_idx;
#if defined(CONFIG_ARCH_HAS_THREAD_ABORT)
/* The kernel may indicate that a thread has been aborted several */
/* times */
int aborted;
#endif
} posix_thread_status_t;
void posix_new_thread(posix_thread_status_t *ptr);
void posix_swap(int next_allowed_thread_nbr, int this_thread_nbr);
void posix_main_thread_start(int next_allowed_thread_nbr);
void posix_init_multithreading(void);
void posix_core_clean_up(void);
void posix_new_thread_pre_start(void); /* defined in thread.c */
#ifdef __cplusplus
}
#endif
#endif /* _POSIX_CORE_H */

View file

@ -0,0 +1,46 @@
/*
* Copyright (c) 2017 Oticon A/S
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef _POSIX_CORE_SOC_PROVIDED_IF_H
#define _POSIX_CORE_SOC_PROVIDED_IF_H
/*
* This file lists the functions the POSIX architecture core expects the
* SOC or board will provide
*
* All functions listed here must be provided by the implementation of the SOC
* or all its boards
*/
#ifdef __cplusplus
extern "C" {
#endif
void posix_print_error_and_exit(const char *format, ...);
void posix_print_warning(const char *format, ...);
void posix_print_trace(const char *format, ...);
void posix_halt_cpu(void);
void posix_atomic_halt_cpu(unsigned int imask);
#include "soc_irq.h" /* Must exist and define _ARCH_IRQ/ISR_* macros */
unsigned int _arch_irq_lock(void);
void _arch_irq_unlock(unsigned int key);
void _arch_irq_enable(unsigned int irq);
void _arch_irq_disable(unsigned int irq);
int _arch_irq_is_enabled(unsigned int irq);
unsigned int posix_irq_lock(void);
void posix_irq_unlock(unsigned int key);
void posix_irq_full_unlock(void);
int posix_get_current_irq(void);
/* irq_offload() from irq_offload.h must also be defined by the SOC or board */
#ifdef __cplusplus
}
#endif
#endif /* _POSIX_CORE_SOC_PROVIDED_IF_H */

View file

@ -0,0 +1,6 @@
if(SOC_FAMILY)
add_subdirectory(${SOC_FAMILY})
else()
add_subdirectory(${SOC_NAME})
endif()

View file

@ -0,0 +1,5 @@
zephyr_library()
zephyr_library_compile_definitions(_POSIX_CHEATS_H)
zephyr_library_sources(
soc.c
)

View file

@ -0,0 +1,12 @@
#
# Copyright (c) 2017 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
if SOC_POSIX
config SOC
default inf_clock
endif

View file

@ -0,0 +1,13 @@
config SOC_POSIX
bool "Native POSIX port"
help
SOC for to the POSIX arch. It emulates a CPU running at an infinitely high
clock. That means the CPU will always run in zero time until completion after
each wake reason (e.g. interrupts), before going back to idle. Note that an
infinite loop in the code which does not sleep the CPU will cause the process
to apeared "hang", as simulated time does not advance while the cpu does not
sleep. Therefore do not use busy waits while waiting for something to happen
(if needed use k_busy_wait()).
Note that the interrupt handling is provided by the board.

View file

@ -0,0 +1,12 @@
/*
* Copyright (c) 2016 Intel Corporation
* Copyright (c) 2017 Oticon A/S
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @brief Linker script for the POSIX ARCH & INF_CLOCK SOC
*/
#include <arch/posix/linker.ld>

View file

@ -0,0 +1,35 @@
/*
* Copyright (c) 2017 Oticon A/S
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef _POSIX_CORE_BOARD_PROVIDED_IF_H
#define _POSIX_CORE_BOARD_PROVIDED_IF_H
#include "zephyr/types.h"
/*
* This file lists the functions the posix "inf_clock" soc
* expect the the board to provide
*
* All functions listed here must be provided by the implementation of the board
*
* See soc_irq.h for more
*/
#ifdef __cplusplus
extern "C" {
#endif
void posix_irq_handler(void);
void main_clean_up(int exit_code);
#if defined(CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT)
void k_busy_wait(u32_t usec_to_wait);
#endif
#ifdef __cplusplus
}
#endif
#endif /* _POSIX_CORE_BOARD_PROVIDED_IF_H */

View file

@ -0,0 +1,24 @@
/*
* Copyright (c) 2017 Oticon A/S
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef _POSIX_POSIX_SOC_INF_CLOCK_H
#define _POSIX_POSIX_SOC_INF_CLOCK_H
#include "posix_soc_if.h"
#ifdef __cplusplus
extern "C" {
#endif
void posix_interrupt_raised(void);
void posix_boot_cpu(void);
int posix_is_cpu_running(void);
#ifdef __cplusplus
}
#endif
#endif /* _POSIX_POSIX_SOC_INF_CLOCK_H */

View file

@ -0,0 +1,277 @@
/*
* Copyright (c) 2017 Oticon A/S
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* For all purposes, Zephyr threads see a CPU running at an infinitly high
* clock.
*
* Therefore, the code will always run until completion after each interrupt,
* after which k_cpu_idle() will be called releasing the execution back to the
* HW models.
*
* The HW models raising an interrupt will "awake the cpu" by calling
* poisix_interrupt_raised() which will transfer control to the irq handler,
* which will run inside SW/Zephyr contenxt. After which a __swap() to whatever
* Zephyr thread may follow.
* Again, once Zephyr is done, control is given back to the HW models.
*
*
* The Zephyr OS+APP code and the HW models are gated by a mutex +
* condition as there is no reason to let the zephyr threads run while the
* HW models run or vice versa
*
*/
#include <pthread.h>
#include <stdbool.h>
#include <unistd.h>
#include "posix_soc_if.h"
#include "posix_soc.h"
#include "posix_board_if.h"
#include "posix_core.h"
#include "nano_internal.h"
#define POSIX_ARCH_SOC_DEBUG_PRINTS 0
#define PREFIX "POSIX SOC: "
#define ERPREFIX PREFIX"error on "
#if POSIX_ARCH_SOC_DEBUG_PRINTS
#define PS_DEBUG(fmt, ...) posix_print_trace(PREFIX fmt, __VA_ARGS__)
#else
#define PS_DEBUG(...)
#endif
/* Conditional variable to know if the CPU is running or halted/idling */
static pthread_cond_t cond_cpu = PTHREAD_COND_INITIALIZER;
/* Mutex for the conditional variable posix_soc_cond_cpu */
static pthread_mutex_t mtx_cpu = PTHREAD_MUTEX_INITIALIZER;
/* Variable which tells if the CPU is halted (1) or not (0) */
static bool cpu_halted = true;
static bool soc_terminate; /* Is the program being closed */
int posix_is_cpu_running(void)
{
return !cpu_halted;
}
/**
* Helper function which changes the status of the CPU (halted or running)
* and waits until somebody else changes it to the opposite
*
* Both HW and SW threads will use this function to transfer control to the
* other side.
*
* This is how the idle thread halts the CPU and gets halted until the HW models
* raise a new interrupt; and how the HW models awake the CPU, and wait for it
* to complete and go to idle.
*/
static void posix_change_cpu_state_and_wait(bool halted)
{
if (pthread_mutex_lock(&mtx_cpu)) {
posix_print_error_and_exit(ERPREFIX"pthread_mutex_lock()\n");
}
PS_DEBUG("Going to halted = %d\n", halted);
cpu_halted = halted;
/* We let the other side know the CPU has changed state */
if (pthread_cond_broadcast(&cond_cpu)) {
posix_print_error_and_exit(
ERPREFIX"pthread_cond_broadcast()\n");
}
/* We wait until the CPU state has been changed. Either:
* we just awoke it, and therefore wait until the CPU has run until
* completion before continuing (before letting the HW models do
* anything else)
* or
* we are just hanging it, and therefore wait until the HW models awake
* it again
*/
while (cpu_halted == halted) {
/* Here we unlock the mutex while waiting */
pthread_cond_wait(&cond_cpu, &mtx_cpu);
}
PS_DEBUG("Awaken after halted = %d\n", halted);
if (pthread_mutex_unlock(&mtx_cpu)) {
posix_print_error_and_exit(ERPREFIX"pthread_mutex_unlock()\n");
}
}
/**
* HW models shall call this function to "awake the CPU"
* when they are raising an interrupt
*/
void posix_interrupt_raised(void)
{
/* We change the CPU to running state (we awake it), and block this
* thread until the CPU is hateld again
*/
posix_change_cpu_state_and_wait(false);
/*
* If while the SW was running it was decided to terminate the execution
* we stop immediately.
*/
if (soc_terminate) {
main_clean_up(0);
}
}
/**
* Called from k_cpu_idle(), the idle loop will call this function to set the
* CPU to "sleep".
* Interrupts should be unlocked before calling
*/
void posix_halt_cpu(void)
{
/* We change the CPU to halted state, and block this thread until it is
* set running again
*/
posix_change_cpu_state_and_wait(true);
/* We are awaken when some interrupt comes => let the "irq handler"
* check what interrupt was raised and call the appropriate irq handler
* That may trigger a __swap() to another Zephyr thread
*/
posix_irq_handler();
/*
* When the interrupt handler is back we go back to the idle loop (which
* will just call us back)
* Note that when we are coming back from the irq_handler the Zephyr
* kernel has swapped back to the idle thread
*/
}
/**
* Implementation of k_cpu_atomic_idle() for this SOC
*/
void posix_atomic_halt_cpu(unsigned int imask)
{
posix_irq_full_unlock();
posix_halt_cpu();
posix_irq_unlock(imask);
}
/**
* Just a wrapper function to call Zephyr's _Cstart()
* called from posix_boot_cpu()
*/
static void *zephyr_wrapper(void *a)
{
/* Ensure posix_boot_cpu has reached the cond loop */
if (pthread_mutex_lock(&mtx_cpu)) {
posix_print_error_and_exit(ERPREFIX"pthread_mutex_lock()\n");
}
if (pthread_mutex_unlock(&mtx_cpu)) {
posix_print_error_and_exit(ERPREFIX"pthread_mutex_unlock()\n");
}
#if (POSIX_ARCH_SOC_DEBUG_PRINTS)
pthread_t zephyr_thread = pthread_self();
PS_DEBUG("Zephyr init started (%lu)\n",
zephyr_thread);
#endif
/* Start Zephyr: */
_Cstart();
CODE_UNREACHABLE;
return NULL;
}
/**
* The HW models will call this function to "boot" the CPU
* == spawn the Zephyr init thread, which will then spawn
* anything it wants, and run until the CPU is set back to idle again
*/
void posix_boot_cpu(void)
{
if (pthread_mutex_lock(&mtx_cpu)) {
posix_print_error_and_exit(ERPREFIX"pthread_mutex_lock()\n");
}
cpu_halted = false;
pthread_t zephyr_thread;
/* Create a thread for Zephyr init: */
if (pthread_create(&zephyr_thread, NULL, zephyr_wrapper, NULL)) {
posix_print_error_and_exit(ERPREFIX"pthread_create\n");
}
/* And we wait until Zephyr has run til completion (has gone to idle) */
while (cpu_halted == false) {
pthread_cond_wait(&cond_cpu, &mtx_cpu);
}
if (pthread_mutex_unlock(&mtx_cpu)) {
posix_print_error_and_exit(ERPREFIX"pthread_mutex_unlock()\n");
}
if (soc_terminate) {
main_clean_up(0);
}
}
/**
* Clean up all memory allocated by the SOC and POSIX core
*
* This function can be called from both HW and SW threads
*/
void posix_soc_clean_up(void)
{
/*
* If we are being called from a HW thread we can cleanup
*
* Otherwise (!cpu_halted) we give back control to the HW thread and
* tell it to terminate ASAP
*/
if (cpu_halted) {
posix_core_clean_up();
} else if (soc_terminate == false) {
soc_terminate = true;
if (pthread_mutex_lock(&mtx_cpu)) {
posix_print_error_and_exit(
ERPREFIX"pthread_mutex_lock()\n");
}
cpu_halted = true;
if (pthread_cond_broadcast(&cond_cpu)) {
posix_print_error_and_exit(
ERPREFIX"pthread_cond_broadcast()\n");
}
if (pthread_mutex_unlock(&mtx_cpu)) {
posix_print_error_and_exit(
ERPREFIX"pthread_mutex_unlock()\n");
}
while (1) {
sleep(1);
/* This SW thread will wait until being cancelled from
* the HW thread. sleep() is a cancellation point, so it
* won't really wait 1 second
*/
}
}
}

View file

@ -0,0 +1,23 @@
/*
* Copyright (c) 2017 Oticon A/S
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef _POSIX_SOC_INF_CLOCK_SOC_H
#define _POSIX_SOC_INF_CLOCK_SOC_H
#include "board_soc.h"
#include "posix_soc.h"
#ifdef __cplusplus
extern "C" {
#endif
void poisix_soc_clean_up(void);
#ifdef __cplusplus
}
#endif
#endif /* _POSIX_SOC_INF_CLOCK_SOC_H */

View file

@ -0,0 +1,25 @@
/*
* Copyright (c) 2017 Oticon A/S
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef _SOC_IRQ_H
#define _SOC_IRQ_H
#include "board_irq.h"
/*
* This SOC relies on the boards providing all the IRQ support
*/
#ifdef __cplusplus
extern "C" {
#endif
#ifdef __cplusplus
}
#endif
#endif /* _SOC_IRQ_H */

View file

@ -0,0 +1,31 @@
# Configures CMake for using GCC, this script is re-used by several
# GCC-based toolchains
set(CMAKE_C_COMPILER gcc CACHE INTERNAL " " FORCE)
set(CMAKE_OBJCOPY objcopy CACHE INTERNAL " " FORCE)
set(CMAKE_OBJDUMP objdump CACHE INTERNAL " " FORCE)
#set(CMAKE_LINKER ld CACHE INTERNAL " " FORCE) # Not in use yet
set(CMAKE_AR ar CACHE INTERNAL " " FORCE)
set(CMAKE_RANLILB ranlib CACHE INTERNAL " " FORCE)
set(CMAKE_READELF readelf CACHE INTERNAL " " FORCE)
set(CMAKE_GDB gdb CACHE INTERNAL " " FORCE)
set(CMAKE_C_FLAGS -m32 CACHE INTERNAL " " FORCE)
set(CMAKE_CXX_FLAGS -m32 CACHE INTERNAL " " FORCE)
set(CMAKE_SHARED_LINKER_FLAGS -m32 CACHE INTERNAL " " FORCE)
#assert_exists(CMAKE_READELF)
if(CONFIG_CPLUSPLUS)
set(cplusplus_compiler g++)
else()
if(EXISTS g++)
set(cplusplus_compiler g++)
else()
# When the toolchain doesn't support C++, and we aren't building
# with C++ support just set it to something so CMake doesn't
# crash, it won't actually be called
set(cplusplus_compiler ${CMAKE_C_COMPILER})
endif()
endif()
set(CMAKE_CXX_COMPILER ${cplusplus_compiler} CACHE INTERNAL " " FORCE)

View file

@ -9,7 +9,11 @@ set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY)
# Configure the toolchain based on what SDK/toolchain is in use.
if(ZEPHYR_GCC_VARIANT STREQUAL "host" OR CONFIG_ARCH_POSIX)
set(COMPILER host-gcc)
else()
include($ENV{ZEPHYR_BASE}/cmake/toolchain-${ZEPHYR_GCC_VARIANT}.cmake)
endif()
# Configure the toolchain based on what toolchain technology is used
# (gcc clang etc.)

View file

@ -21,6 +21,8 @@
#include <arch/riscv32/arch.h>
#elif defined(CONFIG_XTENSA)
#include <arch/xtensa/arch.h>
#elif defined(CONFIG_ARCH_POSIX)
#include <arch/posix/arch.h>
#else
#error "Unknown Architecture"
#endif

63
include/arch/posix/arch.h Normal file
View file

@ -0,0 +1,63 @@
/*
* Copyright (c) 2010-2014 Wind River Systems, Inc.
* Copyright (c) 2017 Oticon A/S
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief POSIX arch specific kernel interface header
* This header contains the POSIX arch specific kernel interface.
* It is included by the generic kernel interface header (include/arch/cpu.h)
*
*/
#ifndef _ARCH_IFACE_H
#define _ARCH_IFACE_H
#include <toolchain.h>
#include <irq.h>
#include <arch/posix/asm_inline.h>
#include <board_irq.h> /* Each board must define this */
#include <sw_isr_table.h>
#ifdef __cplusplus
extern "C" {
#endif
#define STACK_ALIGN 4
#define STACK_ALIGN_SIZE 4
#define OCTET_TO_SIZEOFUNIT(X) (X)
#define SIZEOFUNIT_TO_OCTET(X) (X)
#define _NANO_ERR_CPU_EXCEPTION (0) /* Any unhandled exception */
#define _NANO_ERR_INVALID_TASK_EXIT (1) /* Invalid task exit */
#define _NANO_ERR_STACK_CHK_FAIL (2) /* Stack corruption detected */
#define _NANO_ERR_ALLOCATION_FAIL (3) /* Kernel Allocation Failure */
#define _NANO_ERR_SPURIOUS_INT (4) /* Spurious interrupt */
#define _NANO_ERR_KERNEL_OOPS (5) /* Kernel oops (fatal to thread) */
#define _NANO_ERR_KERNEL_PANIC (6) /* Kernel panic (fatal to system) */
struct __esf {
u32_t dummy; /*maybe we will want to add somethign someday*/
};
typedef struct __esf NANO_ESF;
extern const NANO_ESF _default_esf;
extern u32_t _timer_cycle_get_32(void);
#define _arch_k_cycle_get_32() _timer_cycle_get_32()
FUNC_NORETURN void _SysFatalErrorHandler(unsigned int reason,
const NANO_ESF *esf);
FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,
const NANO_ESF *esf);
#ifdef __cplusplus
}
#endif
#endif /* _ARCH_IFACE_H */

View file

@ -0,0 +1,23 @@
/* POSIX inline "assembler" functions and macros for public functions */
/*
* Copyright (c) 2015, Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef _ASM_INLINE_PUBLIC_H
#define _ASM_INLINE_PUBLIC_H
/*
* The file must not be included directly
* Include kernel.h instead
*/
#if defined(__GNUC__)
#include <arch/posix/asm_inline_gcc.h>
#else
#error "Only a compiler with GNU C extensions is supported for the POSIX arch"
#endif
#endif /* _ASM_INLINE_PUBLIC_H */

View file

@ -0,0 +1,198 @@
/*
* Copyright (c) 2015, Wind River Systems, Inc.
* Copyright (c) 2017, Oticon A/S
*
* SPDX-License-Identifier: Apache-2.0
*/
/*
* POSIX ARCH specific public inline "assembler" functions and macros
*/
/* Either public functions or macros or invoked by public functions */
#ifndef _ASM_INLINE_GCC_PUBLIC_GCC_H_INCLUDE_ARCH_POSIX
#define _ASM_INLINE_GCC_PUBLIC_GCC_H_INCLUDE_ARCH_POSIX
/*
* The file must not be included directly
* Include kernel.h instead
*/
#ifdef __cplusplus
extern "C" {
#endif
#ifndef _ASMLANGUAGE
#include <toolchain/common.h>
#include <zephyr/types.h>
#include <sys_io.h>
#include "posix_soc_if.h"
/**
*
* @brief find most significant bit set in a 32-bit word
*
* This routine finds the first bit set starting from the most significant bit
* in the argument passed in and returns the index of that bit. Bits are
* numbered starting at 1 from the least significant bit. A return value of
* zero indicates that the value passed is zero.
*
* @return most significant bit set, 0 if @a op is 0
*/
static ALWAYS_INLINE unsigned int find_msb_set(u32_t op)
{
if (!op) {
return 0;
}
return 32 - __builtin_clz(op);
}
/**
*
* @brief find least significant bit set in a 32-bit word
*
* This routine finds the first bit set starting from the least significant bit
* in the argument passed in and returns the index of that bit. Bits are
* numbered starting at 1 from the least significant bit. A return value of
* zero indicates that the value passed is zero.
*
* @return least significant bit set, 0 if @a op is 0
*/
static ALWAYS_INLINE unsigned int find_lsb_set(u32_t op)
{
return __builtin_ffs(op);
}
static ALWAYS_INLINE u8_t sys_read8(mem_addr_t addr)
{
return *(volatile u8_t *)addr;
}
static ALWAYS_INLINE void sys_write8(u8_t data, mem_addr_t addr)
{
*(volatile u8_t *)addr = data;
}
static ALWAYS_INLINE u16_t sys_read16(mem_addr_t addr)
{
return *(volatile u16_t *)addr;
}
static ALWAYS_INLINE void sys_write16(u16_t data, mem_addr_t addr)
{
*(volatile u16_t *)addr = data;
}
static ALWAYS_INLINE u32_t sys_read32(mem_addr_t addr)
{
return *(volatile u32_t *)addr;
}
static ALWAYS_INLINE void sys_write32(u32_t data, mem_addr_t addr)
{
*(volatile u32_t *)addr = data;
}
/* Memory bit manipulation functions */
static ALWAYS_INLINE void sys_set_bit(mem_addr_t addr, unsigned int bit)
{
u32_t temp = *(volatile u32_t *)addr;
*(volatile u32_t *)addr = temp | (1 << bit);
}
static ALWAYS_INLINE void sys_clear_bit(mem_addr_t addr, unsigned int bit)
{
u32_t temp = *(volatile u32_t *)addr;
*(volatile u32_t *)addr = temp & ~(1 << bit);
}
static ALWAYS_INLINE int sys_test_bit(mem_addr_t addr, unsigned int bit)
{
u32_t temp = *(volatile u32_t *)addr;
return temp & (1 << bit);
}
static ALWAYS_INLINE
void sys_bitfield_set_bit(mem_addr_t addr, unsigned int bit)
{
/* Doing memory offsets in terms of 32-bit values to prevent
* alignment issues
*/
sys_set_bit(addr + ((bit >> 5) << 2), bit & 0x1F);
}
static ALWAYS_INLINE
void sys_bitfield_clear_bit(mem_addr_t addr, unsigned int bit)
{
sys_clear_bit(addr + ((bit >> 5) << 2), bit & 0x1F);
}
static ALWAYS_INLINE
int sys_bitfield_test_bit(mem_addr_t addr, unsigned int bit)
{
return sys_test_bit(addr + ((bit >> 5) << 2), bit & 0x1F);
}
static ALWAYS_INLINE
int sys_test_and_set_bit(mem_addr_t addr, unsigned int bit)
{
int ret;
ret = sys_test_bit(addr, bit);
sys_set_bit(addr, bit);
return ret;
}
static ALWAYS_INLINE
int sys_test_and_clear_bit(mem_addr_t addr, unsigned int bit)
{
int ret;
ret = sys_test_bit(addr, bit);
sys_clear_bit(addr, bit);
return ret;
}
static ALWAYS_INLINE
int sys_bitfield_test_and_set_bit(mem_addr_t addr, unsigned int bit)
{
int ret;
ret = sys_bitfield_test_bit(addr, bit);
sys_bitfield_set_bit(addr, bit);
return ret;
}
static ALWAYS_INLINE
int sys_bitfield_test_and_clear_bit(mem_addr_t addr, unsigned int bit)
{
int ret;
ret = sys_bitfield_test_bit(addr, bit);
sys_bitfield_clear_bit(addr, bit);
return ret;
}
#endif /* _ASMLANGUAGE */
#ifdef __cplusplus
}
#endif
#endif /* _ASM_INLINE_GCC_PUBLIC_GCC_H_INCLUDE_ARCH_POSIX */

View file

@ -0,0 +1,37 @@
/*
* Copyright (c) 2016 Intel Corporation
* Copyright (c) 2017 Oticon A/S
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Linker command/script file
*
* Linker script for the Nios II platform
*/
#define _LINKER
#define _ASMLANGUAGE
#include <autoconf.h>
#include <linker/sections.h>
#include <linker/linker-defs.h>
#include <linker/linker-tool.h>
SECTIONS
{
/* Ideally we would have here the platform default linker script SECTIONS */
/* content */
#include <linker/common-rom.ld>
#include <linker/common-ram.ld>
__data_ram_end = .;
}

View file

@ -36,6 +36,8 @@
/* Nothing yet to include */
#elif defined(CONFIG_XTENSA)
/* Nothing yet to include */
#elif defined(CONFIG_ARCH_POSIX)
/* Nothing yet to include */
#else
#error Arch not supported.
#endif

View file

@ -34,6 +34,8 @@
OUTPUT_FORMAT("elf32-littleriscv")
#elif defined(CONFIG_XTENSA)
/* Not needed */
#elif defined(CONFIG_ARCH_POSIX)
/* Not needed */
#else
#error Arch not supported.
#endif
@ -51,7 +53,11 @@
* description and tells the linker that this section is located in
* the memory area specified by <where> argument.
*/
#if defined(CONFIG_ARCH_POSIX)
#define GROUP_LINK_IN(where)
#else
#define GROUP_LINK_IN(where) > where
#endif
/*
* As GROUP_LINK_IN(), but takes a second argument indicating the
@ -64,11 +70,15 @@
* section, specifying the same memory region (e.g. "RAM") for both
* vregion and lregion.
*/
#if defined(CONFIG_ARCH_POSIX)
#define GROUP_DATA_LINK_IN(vregion, lregion)
#else
#ifdef CONFIG_XIP
#define GROUP_DATA_LINK_IN(vregion, lregion) > vregion AT> lregion
#else
#define GROUP_DATA_LINK_IN(vregion, lregion) > vregion
#endif
#endif /*CONFIG_ARCH_POSIX*/
/*
* The GROUP_FOLLOWS_AT() macro is located at the end of the section
@ -76,7 +86,11 @@
* it is to be loaded, but that it follows a section which did specify
* such an address
*/
#ifdef CONFIG_ARCH_POSIX
#define GROUP_FOLLOWS_AT(where)
#else
#define GROUP_FOLLOWS_AT(where) AT > where
#endif
/*
* The SECTION_PROLOGUE() macro is used to define the beginning of a section.

View file

@ -76,6 +76,8 @@
defined(CONFIG_XTENSA)
#define PERFOPT_ALIGN .balign 4
#elif defined(CONFIG_ARCH_POSIX)
#else
#error Architecture unsupported

View file

@ -27,7 +27,17 @@
#define FUNC_ALIAS(real_func, new_alias, return_type) \
return_type new_alias() ALIAS_OF(real_func)
#if defined(CONFIG_ARCH_POSIX)
/*let's not segfault if this were to happen for some reason*/
#define CODE_UNREACHABLE \
{\
posix_print_error_and_exit("CODE_UNREACHABLE reached from %s:%d\n",\
__FILE__, __LINE__);\
__builtin_unreachable(); \
}
#else
#define CODE_UNREACHABLE __builtin_unreachable()
#endif
#define FUNC_NORETURN __attribute__((__noreturn__))
/* The GNU assembler for Cortex-M3 uses # for immediate values, not
@ -309,6 +319,11 @@ A##a:
",%0" \
"\n\t.type\t" #name ",%%object" : : "n"(value))
#elif defined(CONFIG_ARCH_POSIX)
#define GEN_ABSOLUTE_SYM(name, value) \
__asm__(".globl\t" #name "\n\t.equ\t" #name \
",%c0" \
"\n\t.type\t" #name ",@object" : : "n"(value))
#else
#error processor architecture not supported
#endif