userspace: remove APPLICATION_MEMORY feature
This was never a long-term solution, more of a gross hack to get test cases working until we could figure out a good end-to-end solution for memory domains that generated appropriate linker sections. Now that we have this with the app shared memory feature, and have converted all tests to remove it, delete this feature. To date all userspace APIs have been tagged as 'experimental' which sidesteps deprecation policies. Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
parent
525065dd8b
commit
41f6011c36
151
CMakeLists.txt
151
CMakeLists.txt
|
@ -341,12 +341,9 @@ zephyr_cc_option(-Wpointer-arith)
|
|||
|
||||
# Declare MPU userspace dependencies before the linker scripts to make
|
||||
# sure the order of dependencies are met
|
||||
if(CONFIG_APP_SHARED_MEM AND CONFIG_USERSPACE)
|
||||
if(CONFIG_USERSPACE)
|
||||
if(CONFIG_APP_SHARED_MEM)
|
||||
set(APP_SMEM_DEP app_smem_linker)
|
||||
endif()
|
||||
if(CONFIG_CPU_HAS_MPU AND CONFIG_USERSPACE)
|
||||
if(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT AND CONFIG_ARC AND CONFIG_APPLICATION_MEMORY)
|
||||
set(ALIGN_SIZING_DEP app_sizing_prebuilt linker_app_sizing_script)
|
||||
endif()
|
||||
if(CONFIG_ARM)
|
||||
set(PRIV_STACK_DEP priv_stacks_prebuilt)
|
||||
|
@ -745,101 +742,6 @@ endforeach()
|
|||
|
||||
get_property(OUTPUT_FORMAT GLOBAL PROPERTY PROPERTY_OUTPUT_FORMAT)
|
||||
|
||||
if(CONFIG_APPLICATION_MEMORY)
|
||||
# Objects default to being in kernel space, and then we exclude
|
||||
# certain items.
|
||||
set(kernel_object_file_list
|
||||
${ZEPHYR_LIBS_PROPERTY}
|
||||
kernel
|
||||
)
|
||||
list(
|
||||
REMOVE_ITEM
|
||||
kernel_object_file_list
|
||||
app
|
||||
)
|
||||
|
||||
# The zephyr libraries in zephyr/lib/ and zephyr/test/ belong in
|
||||
# userspace.
|
||||
|
||||
# NB: The business logic for determing what source files are in
|
||||
# kernel space and what source files are in user space is
|
||||
# fragile. Fix ASAP.
|
||||
#
|
||||
# The intended design is that certain directories are designated as
|
||||
# containing userspace code and others for kernel space code. The
|
||||
# implementation we have however is not working on directories of
|
||||
# code, it is working on zephyr libraries. It is exploiting the fact
|
||||
# that zephyr libraries follow a naming scheme as described in
|
||||
# extensions.cmake:zephyr_library_get_current_dir_lib_name
|
||||
#
|
||||
# But code from test/ and lib/ that is placed in the "zephyr"
|
||||
# library (with zephyr_sources()) will not be in a library that is
|
||||
# prefixed with lib__ or test__ and will end up in the wrong address
|
||||
# space.
|
||||
set(application_space_dirs
|
||||
lib
|
||||
tests
|
||||
)
|
||||
foreach(f ${kernel_object_file_list})
|
||||
foreach(app_dir ${application_space_dirs})
|
||||
if(${f} MATCHES "^${app_dir}__") # Begins with ${app_dir}__, e.g. lib__libc
|
||||
list(
|
||||
REMOVE_ITEM
|
||||
kernel_object_file_list
|
||||
${f}
|
||||
)
|
||||
endif()
|
||||
endforeach()
|
||||
endforeach()
|
||||
|
||||
# Create a list ks, with relative paths to kernel space libs.
|
||||
foreach(f ${kernel_object_file_list})
|
||||
get_target_property(target_name ${f} NAME)
|
||||
get_target_property(target_binary_dir ${f} BINARY_DIR)
|
||||
|
||||
string(REPLACE
|
||||
${PROJECT_BINARY_DIR}
|
||||
""
|
||||
fixed_path
|
||||
${target_binary_dir}
|
||||
)
|
||||
|
||||
# Append / if not empty
|
||||
if(fixed_path)
|
||||
set(fixed_path "${fixed_path}/")
|
||||
endif()
|
||||
|
||||
# Cut off leading / if present
|
||||
if(fixed_path MATCHES "^/.+")
|
||||
string(SUBSTRING ${fixed_path} 1 -1 fixed_path)
|
||||
endif()
|
||||
|
||||
set(fixed_path "${fixed_path}lib${target_name}.a")
|
||||
|
||||
if(CMAKE_GENERATOR STREQUAL "Ninja")
|
||||
# Ninja invokes the linker from the root of the build directory
|
||||
# (APPLICATION_BINARY_DIR) instead of from the build/zephyr
|
||||
# directory (PROJECT_BINARY_DIR). So for linker-defs.h to get
|
||||
# the correct path we need to prefix with zephyr/.
|
||||
set(fixed_path "zephyr/${fixed_path}")
|
||||
endif()
|
||||
|
||||
list(APPEND ks ${fixed_path})
|
||||
endforeach()
|
||||
|
||||
# We are done constructing kernel_object_file_list, now we inject
|
||||
# this list into the linker script through the define
|
||||
# KERNELSPACE_OBJECT_FILES
|
||||
set(def -DKERNELSPACE_OBJECT_FILES=)
|
||||
foreach(f ${ks})
|
||||
set(def "${def} ${f}")
|
||||
endforeach()
|
||||
set_property(GLOBAL APPEND PROPERTY
|
||||
PROPERTY_LINKER_SCRIPT_DEFINES
|
||||
${def}
|
||||
)
|
||||
endif() # CONFIG_APPLICATION_MEMORY
|
||||
|
||||
if (CONFIG_CODE_DATA_RELOCATION)
|
||||
set(CODE_RELOCATION_DEP code_relocation_source_lib)
|
||||
endif() # CONFIG_CODE_DATA_RELOCATION
|
||||
|
@ -1244,52 +1146,7 @@ if(CONFIG_APP_SHARED_MEM AND CONFIG_USERSPACE)
|
|||
)
|
||||
endif()
|
||||
|
||||
if(CONFIG_CPU_HAS_MPU AND CONFIG_USERSPACE)
|
||||
|
||||
if(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT AND CONFIG_ARC AND CONFIG_APPLICATION_MEMORY)
|
||||
|
||||
construct_add_custom_command_for_linker_pass(linker_app_sizing custom_command)
|
||||
add_custom_command(
|
||||
${custom_command}
|
||||
)
|
||||
|
||||
add_custom_target(
|
||||
linker_app_sizing_script
|
||||
DEPENDS
|
||||
linker_app_sizing.cmd
|
||||
${OFFSETS_H_TARGET}
|
||||
${APP_SMEM_DEP}
|
||||
${CODE_RELOCATION_DEP}
|
||||
)
|
||||
|
||||
set_property(TARGET
|
||||
linker_app_sizing_script
|
||||
PROPERTY INCLUDE_DIRECTORIES
|
||||
${ZEPHYR_INCLUDE_DIRS}
|
||||
)
|
||||
|
||||
# For systems with MPUs, the size of the application data section must
|
||||
# be determined so that MPU alignment requirements can be met.
|
||||
# Create a app_sizing_prebuilt target so we can do this before the
|
||||
# other ELF files are built
|
||||
set(GEN_APP_ALIGN $ENV{ZEPHYR_BASE}/scripts/gen_alignment_script.py)
|
||||
add_executable( app_sizing_prebuilt misc/empty_file.c)
|
||||
target_link_libraries(app_sizing_prebuilt ${TOPT} ${PROJECT_BINARY_DIR}/linker_app_sizing.cmd ${zephyr_lnk} ${CODE_RELOCATION_DEP})
|
||||
set_property(TARGET app_sizing_prebuilt PROPERTY LINK_DEPENDS ${PROJECT_BINARY_DIR}/linker_app_sizing.cmd)
|
||||
add_dependencies( app_sizing_prebuilt linker_app_sizing_script ${OFFSETS_LIB} ${CODE_RELOCATION_DEP} )
|
||||
|
||||
add_custom_command(
|
||||
TARGET app_sizing_prebuilt
|
||||
POST_BUILD
|
||||
COMMAND ${PYTHON_EXECUTABLE} ${GEN_APP_ALIGN}
|
||||
--output ./include/generated/app_data_alignment.ld
|
||||
--kernel $<TARGET_FILE:app_sizing_prebuilt>
|
||||
VERBATIM
|
||||
WORKING_DIRECTORY ${PROJECT_BINARY_DIR}/
|
||||
)
|
||||
endif()
|
||||
|
||||
if(CONFIG_ARM)
|
||||
if(CONFIG_USERSPACE AND CONFIG_ARM)
|
||||
construct_add_custom_command_for_linker_pass(linker_priv_stacks custom_command)
|
||||
add_custom_command(
|
||||
${custom_command}
|
||||
|
@ -1317,8 +1174,6 @@ if(CONFIG_ARM)
|
|||
add_dependencies( priv_stacks_prebuilt ${ALIGN_SIZING_DEP} linker_priv_stacks_script ${OFFSETS_LIB})
|
||||
endif()
|
||||
|
||||
endif()
|
||||
|
||||
# FIXME: Is there any way to get rid of empty_file.c?
|
||||
add_executable( ${ZEPHYR_PREBUILT_EXECUTABLE} misc/empty_file.c)
|
||||
target_link_libraries(${ZEPHYR_PREBUILT_EXECUTABLE} ${TOPT} ${PROJECT_BINARY_DIR}/linker.cmd ${PRIV_STACK_LIB} ${zephyr_lnk} ${CODE_RELOCATION_DEP})
|
||||
|
|
|
@ -447,51 +447,19 @@ void arc_core_mpu_configure_user_context(struct k_thread *thread)
|
|||
/* for kernel threads, no need to configure user context */
|
||||
if (!(thread->base.user_options & K_USER)) {
|
||||
#if defined(CONFIG_APP_SHARED_MEM) && CONFIG_ARC_MPU_VER == 3
|
||||
/*
|
||||
* APP_SHARED_MEM is handled here, all privileged threads have the right
|
||||
* to access it. APPLICATION_MEMORY will be handled as a static memory region
|
||||
*/
|
||||
/* APP_SHARED_MEM is handled here, all privileged threads have
|
||||
* the right to access it.
|
||||
*/
|
||||
base = (u32_t)&_app_smem_start;
|
||||
size = (u32_t)&_app_smem_size;
|
||||
_region_init(_get_region_index_by_type(THREAD_APP_DATA_REGION)
|
||||
, base, size, _get_region_attr_by_type(THREAD_APP_DATA_REGION));
|
||||
_region_init(_get_region_index_by_type(THREAD_APP_DATA_REGION),
|
||||
base, size,
|
||||
_get_region_attr_by_type(THREAD_APP_DATA_REGION));
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
|
||||
arc_core_mpu_configure(THREAD_STACK_USER_REGION, base, size);
|
||||
|
||||
/* configure app data portion */
|
||||
#ifdef CONFIG_APPLICATION_MEMORY
|
||||
#if CONFIG_ARC_MPU_VER == 2
|
||||
/*
|
||||
* _app_ram_size is guaranteed to be power of two, and
|
||||
* _app_ram_start is guaranteed to be aligned _app_ram_size
|
||||
* in linker template
|
||||
*/
|
||||
base = (u32_t)&__app_ram_start;
|
||||
size = (u32_t)&__app_ram_size;
|
||||
|
||||
/* set up app data region if exists, otherwise disable */
|
||||
if (size > 0) {
|
||||
arc_core_mpu_configure(THREAD_APP_DATA_REGION, base, size);
|
||||
}
|
||||
#elif CONFIG_ARC_MPU_VER == 3
|
||||
/*
|
||||
* ARC MPV v3 doesn't support MPU region overlap.
|
||||
* Application memory should be a static memory, defined in mpu_config
|
||||
*
|
||||
* here, need to clear THREAD_APP_DATA_REGION for user thread as it will
|
||||
* be set by kernel thread to to access app_shared mem. For user thread
|
||||
* the handling of app_shared mem is done by
|
||||
* THREAD_DOMAIN_PARTITION_REGION
|
||||
*/
|
||||
#if defined(CONFIG_APP_SHARED_MEM)
|
||||
_region_init(_get_region_index_by_type(THREAD_APP_DATA_REGION)
|
||||
, 0, 0, 0);
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -65,13 +65,6 @@ void _arch_configure_static_mpu_regions(void)
|
|||
* MPU regions.
|
||||
*/
|
||||
const struct k_mem_partition static_regions[] = {
|
||||
#if defined(CONFIG_APPLICATION_MEMORY)
|
||||
{
|
||||
.start = (u32_t)&__app_ram_start,
|
||||
.size = (u32_t)&__app_ram_end - (u32_t)&__app_ram_start,
|
||||
.attr = K_MEM_PARTITION_P_RW_U_RW,
|
||||
},
|
||||
#endif /* CONFIG_APPLICATION_MEMORY */
|
||||
#if defined(CONFIG_COVERAGE_GCOV) && defined(CONFIG_USERSPACE)
|
||||
{
|
||||
.start = (u32_t)&__gcov_bss_start,
|
||||
|
@ -105,13 +98,6 @@ void _arch_configure_static_mpu_regions(void)
|
|||
* initialization.
|
||||
*/
|
||||
const struct k_mem_partition dyn_region_areas[] = {
|
||||
#if defined(CONFIG_APPLICATION_MEMORY)
|
||||
/* Dynamic areas are also allowed in Application Memory. */
|
||||
{
|
||||
.start = (u32_t)&__app_ram_start,
|
||||
.size = (u32_t)&__app_ram_end - (u32_t)&__app_ram_start,
|
||||
},
|
||||
#endif /* CONFIG_APPLICATION_MEMORY */
|
||||
{
|
||||
.start = _MPU_DYNAMIC_REGIONS_AREA_START,
|
||||
.size = _MPU_DYNAMIC_REGIONS_AREA_SIZE,
|
||||
|
|
|
@ -314,14 +314,6 @@ __csSet:
|
|||
|
||||
call _x86_data_copy
|
||||
|
||||
#ifdef CONFIG_APPLICATION_MEMORY
|
||||
movl $__app_data_ram_start, %edi /* DATA in RAM (dest) */
|
||||
movl $__app_data_rom_start, %esi /* DATA in ROM (src) */
|
||||
movl $__app_data_num_words, %ecx /* Size of DATA in quad bytes */
|
||||
|
||||
call _x86_data_copy
|
||||
#endif /* CONFIG_APPLICATION_MEMORY */
|
||||
|
||||
#ifdef CONFIG_APP_SHARED_MEM
|
||||
movl $_app_smem_start, %edi /* DATA in RAM (dest) */
|
||||
movl $_app_smem_rom_start, %esi /* DATA in ROM (src) */
|
||||
|
@ -348,12 +340,6 @@ __csSet:
|
|||
call _x86_bss_zero
|
||||
#endif /* CONFIG_COVERAGE_GCOV */
|
||||
|
||||
#ifdef CONFIG_APPLICATION_MEMORY
|
||||
movl $__app_bss_start, %edi /* load app BSS start address */
|
||||
movl $__app_bss_num_words, %ecx /* number of quad bytes */
|
||||
call _x86_bss_zero
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_GDT_DYNAMIC
|
||||
/* activate RAM-based Global Descriptor Table (GDT) */
|
||||
lgdt %ds:_gdt
|
||||
|
|
|
@ -20,11 +20,6 @@
|
|||
MMU_BOOT_REGION((u32_t)&_image_rom_start, (u32_t)&_image_rom_size,
|
||||
MMU_ENTRY_READ | MMU_ENTRY_USER);
|
||||
|
||||
#ifdef CONFIG_APPLICATION_MEMORY
|
||||
/* User threads by default can read/write app-level memory. */
|
||||
MMU_BOOT_REGION((u32_t)&__app_ram_start, (u32_t)&__app_ram_size,
|
||||
MMU_ENTRY_WRITE | MMU_ENTRY_USER | MMU_ENTRY_EXECUTE_DISABLE);
|
||||
#endif
|
||||
#ifdef CONFIG_APP_SHARED_MEM
|
||||
MMU_BOOT_REGION((u32_t)&_app_smem_start, (u32_t)&_app_smem_size,
|
||||
MMU_ENTRY_WRITE | MMU_ENTRY_USER | MMU_ENTRY_EXECUTE_DISABLE);
|
||||
|
|
|
@ -11,22 +11,6 @@
|
|||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
static struct arc_mpu_region mpu_regions[] = {
|
||||
#if CONFIG_ARC_MPU_VER == 3 && defined(CONFIG_APPLICATION_MEMORY)
|
||||
/* Region ICCM */
|
||||
MPU_REGION_ENTRY("IMAGE ROM",
|
||||
(u32_t) _image_rom_start,
|
||||
(u32_t) _image_rom_size,
|
||||
REGION_FLASH_ATTR),
|
||||
MPU_REGION_ENTRY("APP MEMORY",
|
||||
(u32_t) __app_ram_start,
|
||||
(u32_t) __app_ram_size,
|
||||
REGION_RAM_ATTR),
|
||||
MPU_REGION_ENTRY("KERNEL MEMORY",
|
||||
(u32_t) __kernel_ram_start,
|
||||
(u32_t) __kernel_ram_size,
|
||||
AUX_MPU_RDP_KW | AUX_MPU_RDP_KR),
|
||||
|
||||
#else
|
||||
#if DT_ICCM_SIZE > 0
|
||||
/* Region ICCM */
|
||||
MPU_REGION_ENTRY("ICCM",
|
||||
|
@ -49,7 +33,6 @@ static struct arc_mpu_region mpu_regions[] = {
|
|||
AUX_MPU_RDP_KW | AUX_MPU_RDP_KR |
|
||||
AUX_MPU_RDP_KE | AUX_MPU_RDP_UE),
|
||||
#endif
|
||||
#endif /* ARC_MPU_VER == 3 */
|
||||
/* Region Peripheral */
|
||||
MPU_REGION_ENTRY("PERIPHERAL",
|
||||
0xF0000000,
|
||||
|
|
|
@ -11,16 +11,12 @@
|
|||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
static struct arc_mpu_region mpu_regions[] = {
|
||||
#if CONFIG_ARC_MPU_VER == 3 && defined(CONFIG_APPLICATION_MEMORY)
|
||||
#if CONFIG_ARC_MPU_VER == 3
|
||||
/* Region ICCM */
|
||||
MPU_REGION_ENTRY("IMAGE ROM",
|
||||
(u32_t) _image_rom_start,
|
||||
(u32_t) _image_rom_size,
|
||||
REGION_FLASH_ATTR),
|
||||
MPU_REGION_ENTRY("APP MEMORY",
|
||||
(u32_t) __app_ram_start,
|
||||
(u32_t) __app_ram_size,
|
||||
REGION_RAM_ATTR),
|
||||
MPU_REGION_ENTRY("KERNEL MEMORY",
|
||||
(u32_t) __kernel_ram_start,
|
||||
(u32_t) __kernel_ram_size,
|
||||
|
|
|
@ -56,17 +56,8 @@ call APIs, several conditions must be met on how the kernel object is declared:
|
|||
in a special table of kernel object metadata. Kernel objects may be members
|
||||
of arrays or embedded within other data structures.
|
||||
|
||||
* Kernel objects must be located in memory reserved for the kernel. If
|
||||
:option:`CONFIG_APPLICATION_MEMORY` is used, all declarations of kernel
|
||||
objects inside application code must be prefixed with the :c:macro:`__kernel`
|
||||
attribute so that they are placed in the right memory sections. The APIs for
|
||||
statically declaring and initializing kernel objects (such as
|
||||
:c:macro:`K_SEM_DEFINE()`) automatically do this. However, uninitialized
|
||||
kernel objects need to be tagged like this:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
__kernel struct k_sem my_sem;
|
||||
* Kernel objects must be located in memory reserved for the kernel. They
|
||||
must not be located in any memory partitions that are user-accessible.
|
||||
|
||||
* Any memory reserved for a kernel object must be used exclusively for that
|
||||
object. Kernel objects may not be members of a union data type.
|
||||
|
@ -232,7 +223,7 @@ are embedded within some larger struct and initialized statically.
|
|||
...
|
||||
};
|
||||
|
||||
__kernel struct foo my_foo = {
|
||||
struct foo my_foo = {
|
||||
.sem = _K_SEM_INITIALIZER(my_foo.sem, 0, 1),
|
||||
...
|
||||
};
|
||||
|
@ -274,7 +265,7 @@ Configuration Options
|
|||
Related configuration options:
|
||||
|
||||
* :option:`CONFIG_USERSPACE`
|
||||
* :option:`CONFIG_APPLICATION_MEMORY`
|
||||
* :option:`CONFIG_APP_SHARED_MEM`
|
||||
* :option:`CONFIG_MAX_THREAD_BYTES`
|
||||
|
||||
API Reference
|
||||
|
|
|
@ -8,14 +8,6 @@ the threads in the same memory domain and protect sensitive data from threads
|
|||
outside their domain. Memory domains are not only used for improving security,
|
||||
but are also useful for debugging (unexpected access would cause an exception).
|
||||
|
||||
An alternative to using memory domains is the
|
||||
:option:`CONFIG_APPLICATION_MEMORY` option, which will grant access to user
|
||||
threads at boot to all global memory defined in object files that are not
|
||||
part of the core kernel. This is useful for very simple applications which
|
||||
will allow all threads to use global data defined within the application, but
|
||||
each thread's stack is still protected from other user threads and there is
|
||||
no access to private kernel data structures.
|
||||
|
||||
Since architectures generally have constraints on how many partitions can be
|
||||
defined, and the size/alignment of each partition, users may need to group
|
||||
related data together using linker sections.
|
||||
|
|
|
@ -129,9 +129,6 @@ through the following mechanisms:
|
|||
by having shared membership of the same memory domains, or via kernel objects
|
||||
such as semaphores and pipes.
|
||||
|
||||
- If the optional :option:`CONFIG_APPLICATION_MEMORY` feature is enabled, all
|
||||
threads will have read/write access to non-kernel globals.
|
||||
|
||||
- User threads cannot directly access memory belonging to kernel objects.
|
||||
Although pointers to kernel objects are used to reference them, actual
|
||||
manipulation of kernel objects is done through system call interfaces. Device
|
||||
|
|
|
@ -84,7 +84,7 @@ extern "C" {
|
|||
#if CONFIG_ARC_MPU_VER == 2
|
||||
|
||||
#define _ARCH_THREAD_STACK_DEFINE(sym, size) \
|
||||
struct _k_thread_stack_element __kernel_noinit \
|
||||
struct _k_thread_stack_element __noinit \
|
||||
__aligned(POW2_CEIL(STACK_SIZE_ALIGN(size))) \
|
||||
sym[POW2_CEIL(STACK_SIZE_ALIGN(size)) + \
|
||||
+ STACK_GUARD_SIZE + CONFIG_PRIVILEGED_STACK_SIZE]
|
||||
|
@ -95,7 +95,7 @@ extern "C" {
|
|||
POW2_CEIL(STACK_GUARD_SIZE + CONFIG_PRIVILEGED_STACK_SIZE)))
|
||||
|
||||
#define _ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
|
||||
struct _k_thread_stack_element __kernel_noinit \
|
||||
struct _k_thread_stack_element __noinit \
|
||||
__aligned(POW2_CEIL(STACK_SIZE_ALIGN(size))) \
|
||||
sym[nmemb][_ARCH_THREAD_STACK_LEN(size)]
|
||||
|
||||
|
@ -108,7 +108,7 @@ extern "C" {
|
|||
#elif CONFIG_ARC_MPU_VER == 3
|
||||
|
||||
#define _ARCH_THREAD_STACK_DEFINE(sym, size) \
|
||||
struct _k_thread_stack_element __kernel_noinit __aligned(STACK_ALIGN) \
|
||||
struct _k_thread_stack_element __noinit __aligned(STACK_ALIGN) \
|
||||
sym[size + \
|
||||
+ STACK_GUARD_SIZE + CONFIG_PRIVILEGED_STACK_SIZE]
|
||||
|
||||
|
@ -116,7 +116,7 @@ extern "C" {
|
|||
((size) + STACK_GUARD_SIZE + CONFIG_PRIVILEGED_STACK_SIZE)
|
||||
|
||||
#define _ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
|
||||
struct _k_thread_stack_element __kernel_noinit __aligned(STACK_ALIGN) \
|
||||
struct _k_thread_stack_element __noinit __aligned(STACK_ALIGN) \
|
||||
sym[nmemb][_ARCH_THREAD_STACK_LEN(size)]
|
||||
|
||||
#define _ARCH_THREAD_STACK_MEMBER(sym, size) \
|
||||
|
@ -135,13 +135,13 @@ extern "C" {
|
|||
#else /* CONFIG_USERSPACE */
|
||||
|
||||
#define _ARCH_THREAD_STACK_DEFINE(sym, size) \
|
||||
struct _k_thread_stack_element __kernel_noinit __aligned(STACK_ALIGN) \
|
||||
struct _k_thread_stack_element __noinit __aligned(STACK_ALIGN) \
|
||||
sym[size + STACK_GUARD_SIZE]
|
||||
|
||||
#define _ARCH_THREAD_STACK_LEN(size) ((size) + STACK_GUARD_SIZE)
|
||||
|
||||
#define _ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
|
||||
struct _k_thread_stack_element __kernel_noinit __aligned(STACK_ALIGN) \
|
||||
struct _k_thread_stack_element __noinit __aligned(STACK_ALIGN) \
|
||||
sym[nmemb][_ARCH_THREAD_STACK_LEN(size)]
|
||||
|
||||
#define _ARCH_THREAD_STACK_MEMBER(sym, size) \
|
||||
|
|
|
@ -149,52 +149,6 @@ SECTIONS {
|
|||
_app_smem_rom_start = LOADADDR(_APP_SMEM_SECTION_NAME);
|
||||
#endif /* CONFIG_APP_SHARED_MEM */
|
||||
|
||||
#ifdef CONFIG_APPLICATION_MEMORY
|
||||
SECTION_DATA_PROLOGUE(_APP_DATA_SECTION_NAME, (OPTIONAL),)
|
||||
{
|
||||
MPU_MIN_SIZE_ALIGN
|
||||
#include <app_data_alignment.ld>
|
||||
|
||||
__app_ram_start = .;
|
||||
__app_data_ram_start = .;
|
||||
_image_ram_start = .;
|
||||
APP_INPUT_SECTION(.data)
|
||||
APP_INPUT_SECTION(".data.*")
|
||||
__app_data_ram_end = .;
|
||||
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
|
||||
|
||||
__app_data_rom_start = LOADADDR(_APP_DATA_SECTION_NAME);
|
||||
|
||||
SECTION_PROLOGUE(_APP_BSS_SECTION_NAME, (NOLOAD OPTIONAL),)
|
||||
{
|
||||
__app_bss_start = .;
|
||||
APP_INPUT_SECTION(.bss)
|
||||
APP_INPUT_SECTION(".bss.*")
|
||||
APP_INPUT_SECTION(COMMON)
|
||||
__app_bss_end = .;
|
||||
} GROUP_DATA_LINK_IN(RAMABLE_REGION, RAMABLE_REGION)
|
||||
|
||||
__app_bss_num_words = (__app_bss_end - __app_bss_start) >> 2;
|
||||
|
||||
SECTION_PROLOGUE(_APP_NOINIT_SECTION_NAME, (NOLOAD OPTIONAL),)
|
||||
{
|
||||
APP_INPUT_SECTION(.noinit)
|
||||
APP_INPUT_SECTION(".noinit.*")
|
||||
|
||||
} GROUP_DATA_LINK_IN(RAMABLE_REGION, RAMABLE_REGION)
|
||||
|
||||
__app_last_address_used = .;
|
||||
|
||||
/* Pad out application ram area to make MPU friendly */
|
||||
SECTION_PROLOGUE(app_pad, (NOLOAD OPTIONAL),)
|
||||
{
|
||||
. = ALIGN(_app_data_align);
|
||||
MPU_MIN_SIZE_ALIGN
|
||||
} GROUP_DATA_LINK_IN(RAMABLE_REGION, RAMABLE_REGION)
|
||||
__app_ram_end = .;
|
||||
__app_ram_size = __app_ram_end - __app_ram_start;
|
||||
#endif /* CONFIG_APPLICATION_MEMORY */
|
||||
|
||||
SECTION_DATA_PROLOGUE(_BSS_SECTION_NAME,(NOLOAD),) {
|
||||
MPU_MIN_SIZE_ALIGN
|
||||
/*
|
||||
|
@ -203,14 +157,11 @@ SECTIONS {
|
|||
*/
|
||||
. = ALIGN(4);
|
||||
__bss_start = .;
|
||||
|
||||
#ifndef CONFIG_APPLICATION_MEMORY
|
||||
_image_ram_start = .;
|
||||
#endif
|
||||
__kernel_ram_start = .;
|
||||
KERNEL_INPUT_SECTION(.bss)
|
||||
KERNEL_INPUT_SECTION(".bss.*")
|
||||
KERNEL_INPUT_SECTION(COMMON)
|
||||
*(.bss)
|
||||
*(".bss.*")
|
||||
*(COMMON)
|
||||
*(".kernel_bss.*")
|
||||
|
||||
/*
|
||||
|
@ -225,8 +176,8 @@ SECTIONS {
|
|||
* This section is used for non-initialized objects that
|
||||
* will not be cleared during the boot process.
|
||||
*/
|
||||
KERNEL_INPUT_SECTION(.noinit)
|
||||
KERNEL_INPUT_SECTION(".noinit.*")
|
||||
*(.noinit)
|
||||
*(".noinit.*")
|
||||
*(".kernel_noinit.*")
|
||||
|
||||
#ifdef CONFIG_SOC_NOINIT_LD
|
||||
|
@ -239,8 +190,8 @@ SECTIONS {
|
|||
|
||||
/* when XIP, .text is in ROM, but vector table must be at start of .data */
|
||||
__data_ram_start = .;
|
||||
KERNEL_INPUT_SECTION(.data)
|
||||
KERNEL_INPUT_SECTION(".data.*")
|
||||
*(.data)
|
||||
*(".data.*")
|
||||
*(".kernel.*")
|
||||
|
||||
#ifdef CONFIG_SOC_RWDATA_LD
|
||||
|
|
|
@ -143,11 +143,11 @@ extern "C" {
|
|||
#if defined(CONFIG_USERSPACE) && \
|
||||
defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT)
|
||||
#define _ARCH_THREAD_STACK_DEFINE(sym, size) \
|
||||
struct _k_thread_stack_element __kernel_noinit \
|
||||
struct _k_thread_stack_element __noinit \
|
||||
__aligned(POW2_CEIL(size)) sym[POW2_CEIL(size)]
|
||||
#else
|
||||
#define _ARCH_THREAD_STACK_DEFINE(sym, size) \
|
||||
struct _k_thread_stack_element __kernel_noinit __aligned(STACK_ALIGN) \
|
||||
struct _k_thread_stack_element __noinit __aligned(STACK_ALIGN) \
|
||||
sym[size+MPU_GUARD_ALIGN_AND_SIZE]
|
||||
#endif
|
||||
|
||||
|
@ -184,12 +184,12 @@ extern "C" {
|
|||
#if defined(CONFIG_USERSPACE) && \
|
||||
defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT)
|
||||
#define _ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
|
||||
struct _k_thread_stack_element __kernel_noinit \
|
||||
struct _k_thread_stack_element __noinit \
|
||||
__aligned(POW2_CEIL(size)) \
|
||||
sym[nmemb][_ARCH_THREAD_STACK_LEN(size)]
|
||||
#else
|
||||
#define _ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
|
||||
struct _k_thread_stack_element __kernel_noinit \
|
||||
struct _k_thread_stack_element __noinit \
|
||||
__aligned(STACK_ALIGN) \
|
||||
sym[nmemb][_ARCH_THREAD_STACK_LEN(size)]
|
||||
#endif
|
||||
|
|
|
@ -354,55 +354,14 @@ SECTIONS
|
|||
|
||||
#endif /* CONFIG_COVERAGE_GCOV */
|
||||
|
||||
#ifdef CONFIG_APPLICATION_MEMORY
|
||||
SECTION_DATA_PROLOGUE(_APP_DATA_SECTION_NAME, (OPTIONAL),)
|
||||
{
|
||||
|
||||
MPU_ALIGN(__app_ram_size);
|
||||
__app_ram_start = .;
|
||||
__app_data_ram_start = .;
|
||||
APP_INPUT_SECTION(.data)
|
||||
APP_INPUT_SECTION(".data.*")
|
||||
__app_data_ram_end = .;
|
||||
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
|
||||
|
||||
__app_data_rom_start = LOADADDR(_APP_DATA_SECTION_NAME);
|
||||
|
||||
SECTION_PROLOGUE(_APP_BSS_SECTION_NAME, (NOLOAD OPTIONAL),)
|
||||
{
|
||||
__app_bss_start = .;
|
||||
APP_INPUT_SECTION(.bss)
|
||||
APP_INPUT_SECTION(".bss.*")
|
||||
APP_INPUT_SECTION(COMMON)
|
||||
__app_bss_end = .;
|
||||
} GROUP_DATA_LINK_IN(RAMABLE_REGION, RAMABLE_REGION)
|
||||
|
||||
__app_bss_num_words = (__app_bss_end - __app_bss_start) >> 2;
|
||||
|
||||
SECTION_PROLOGUE(_APP_NOINIT_SECTION_NAME, (NOLOAD OPTIONAL),)
|
||||
{
|
||||
APP_INPUT_SECTION(.noinit)
|
||||
APP_INPUT_SECTION(".noinit.*")
|
||||
|
||||
__app_last_address_used = .;
|
||||
|
||||
/* Align the end of the application memory
|
||||
* with MPU alignment requirement.
|
||||
*/
|
||||
MPU_ALIGN(__app_ram_size);
|
||||
__app_ram_end = .;
|
||||
} GROUP_DATA_LINK_IN(RAMABLE_REGION, RAMABLE_REGION)
|
||||
__app_ram_size = __app_ram_end - __app_ram_start;
|
||||
#endif /* CONFIG_APPLICATION_MEMORY */
|
||||
|
||||
#if defined(CONFIG_NOCACHE_MEMORY)
|
||||
/* Non-cached region of RAM */
|
||||
SECTION_PROLOGUE(_NOCACHE_SECTION_NAME,(NOLOAD),)
|
||||
{
|
||||
MPU_ALIGN(_nocache_ram_size);
|
||||
_nocache_ram_start = .;
|
||||
KERNEL_INPUT_SECTION(.nocache)
|
||||
KERNEL_INPUT_SECTION(".nocache.*")
|
||||
*(.nocache)
|
||||
*(".nocache.*")
|
||||
MPU_ALIGN(_nocache_ram_size);
|
||||
_nocache_ram_end = .;
|
||||
} GROUP_LINK_IN(RAMABLE_REGION)
|
||||
|
@ -429,9 +388,9 @@ SECTIONS
|
|||
__bss_start = .;
|
||||
__kernel_ram_start = .;
|
||||
|
||||
KERNEL_INPUT_SECTION(.bss)
|
||||
KERNEL_INPUT_SECTION(".bss.*")
|
||||
KERNEL_INPUT_SECTION(COMMON)
|
||||
*(.bss)
|
||||
*(".bss.*")
|
||||
*(COMMON)
|
||||
*(".kernel_bss.*")
|
||||
|
||||
/*
|
||||
|
@ -447,8 +406,8 @@ SECTIONS
|
|||
* This section is used for non-initialized objects that
|
||||
* will not be cleared during the boot process.
|
||||
*/
|
||||
KERNEL_INPUT_SECTION(.noinit)
|
||||
KERNEL_INPUT_SECTION(".noinit.*")
|
||||
*(.noinit)
|
||||
*(".noinit.*")
|
||||
*(".kernel_noinit.*")
|
||||
|
||||
#ifdef CONFIG_SOC_NOINIT_LD
|
||||
|
@ -460,8 +419,8 @@ SECTIONS
|
|||
SECTION_DATA_PROLOGUE(_DATA_SECTION_NAME,,)
|
||||
{
|
||||
__data_ram_start = .;
|
||||
KERNEL_INPUT_SECTION(.data)
|
||||
KERNEL_INPUT_SECTION(".data.*")
|
||||
*(.data)
|
||||
*(".data.*")
|
||||
*(".kernel.*")
|
||||
|
||||
#ifdef CONFIG_SOC_RWDATA_LD
|
||||
|
|
|
@ -607,7 +607,7 @@ extern struct task_state_segment _main_tss;
|
|||
#endif
|
||||
|
||||
#define _ARCH_THREAD_STACK_DEFINE(sym, size) \
|
||||
struct _k_thread_stack_element __kernel_noinit \
|
||||
struct _k_thread_stack_element __noinit \
|
||||
__aligned(_STACK_BASE_ALIGN) \
|
||||
sym[ROUND_UP((size), _STACK_SIZE_ALIGN) + _STACK_GUARD_SIZE]
|
||||
|
||||
|
@ -617,7 +617,7 @@ extern struct task_state_segment _main_tss;
|
|||
_STACK_GUARD_SIZE)
|
||||
|
||||
#define _ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
|
||||
struct _k_thread_stack_element __kernel_noinit \
|
||||
struct _k_thread_stack_element __noinit \
|
||||
__aligned(_STACK_BASE_ALIGN) \
|
||||
sym[nmemb][_ARCH_THREAD_STACK_LEN(size)]
|
||||
|
||||
|
|
|
@ -193,7 +193,7 @@ SECTIONS
|
|||
{
|
||||
MMU_PAGE_ALIGN
|
||||
__gcov_bss_start = .;
|
||||
KERNEL_INPUT_SECTION(".bss.__gcov0.*");
|
||||
*(".bss.__gcov0.*");
|
||||
. = ALIGN(4);
|
||||
MMU_PAGE_ALIGN
|
||||
__gcov_bss_end = .;
|
||||
|
@ -217,50 +217,10 @@ SECTIONS
|
|||
_app_smem_num_words = _app_smem_size >> 2;
|
||||
#endif /* CONFIG_APP_SHARED_MEM */
|
||||
|
||||
#ifdef CONFIG_APPLICATION_MEMORY
|
||||
SECTION_DATA_PROLOGUE(_APP_DATA_SECTION_NAME, (OPTIONAL),)
|
||||
{
|
||||
#ifndef CONFIG_XIP
|
||||
MMU_PAGE_ALIGN
|
||||
#endif
|
||||
#if !defined(CONFIG_APP_SHARED_MEM)
|
||||
_image_ram_start = .;
|
||||
#endif
|
||||
__app_ram_start = .;
|
||||
__app_data_ram_start = .;
|
||||
APP_INPUT_SECTION(.data)
|
||||
APP_INPUT_SECTION(".data.*")
|
||||
__app_data_ram_end = .;
|
||||
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
|
||||
|
||||
__app_data_rom_start = LOADADDR(_APP_DATA_SECTION_NAME);
|
||||
|
||||
SECTION_PROLOGUE(_APP_BSS_SECTION_NAME, (NOLOAD OPTIONAL),)
|
||||
{
|
||||
__app_bss_start = .;
|
||||
APP_INPUT_SECTION(.bss)
|
||||
APP_INPUT_SECTION(".bss.*")
|
||||
APP_INPUT_SECTION(COMMON)
|
||||
__app_bss_end = .;
|
||||
} GROUP_DATA_LINK_IN(RAMABLE_REGION, RAMABLE_REGION)
|
||||
|
||||
__app_bss_num_words = (__app_bss_end - __app_bss_start) >> 2;
|
||||
|
||||
SECTION_PROLOGUE(_APP_NOINIT_SECTION_NAME, (NOLOAD OPTIONAL),)
|
||||
{
|
||||
APP_INPUT_SECTION(.noinit)
|
||||
APP_INPUT_SECTION(".noinit.*")
|
||||
MMU_PAGE_ALIGN
|
||||
} GROUP_DATA_LINK_IN(RAMABLE_REGION, RAMABLE_REGION)
|
||||
|
||||
__app_ram_end = .;
|
||||
__app_ram_size = __app_ram_end - __app_ram_start;
|
||||
#endif /* CONFIG_APPLICATION_MEMORY */
|
||||
|
||||
SECTION_PROLOGUE(_BSS_SECTION_NAME, (NOLOAD OPTIONAL),)
|
||||
{
|
||||
MMU_PAGE_ALIGN
|
||||
#if !defined(CONFIG_APP_SHARED_MEM) && !defined(CONFIG_APPLICATION_MEMORY)
|
||||
#if !defined(CONFIG_APP_SHARED_MEM)
|
||||
_image_ram_start = .;
|
||||
#endif
|
||||
/*
|
||||
|
@ -271,9 +231,9 @@ SECTIONS
|
|||
__kernel_ram_start = .;
|
||||
__bss_start = .;
|
||||
|
||||
KERNEL_INPUT_SECTION(.bss)
|
||||
KERNEL_INPUT_SECTION(".bss.*")
|
||||
KERNEL_INPUT_SECTION(COMMON)
|
||||
*(.bss)
|
||||
*(".bss.*")
|
||||
*(COMMON)
|
||||
*(".kernel_bss.*")
|
||||
|
||||
/*
|
||||
|
@ -292,8 +252,8 @@ SECTIONS
|
|||
* This section is used for non-initialized objects that
|
||||
* will not be cleared during the boot process.
|
||||
*/
|
||||
KERNEL_INPUT_SECTION(.noinit)
|
||||
KERNEL_INPUT_SECTION(".noinit.*")
|
||||
*(.noinit)
|
||||
*(".noinit.*")
|
||||
*(".kernel_noinit.*")
|
||||
|
||||
#ifdef CONFIG_SOC_NOINIT_LD
|
||||
|
@ -309,8 +269,8 @@ SECTIONS
|
|||
|
||||
__data_ram_start = .;
|
||||
|
||||
KERNEL_INPUT_SECTION(.data)
|
||||
KERNEL_INPUT_SECTION(".data.*")
|
||||
*(.data)
|
||||
*(".data.*")
|
||||
*(".kernel.*")
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_INTERRUPTS
|
||||
|
@ -469,9 +429,4 @@ SECTIONS
|
|||
*/
|
||||
__data_size = (__data_ram_end - __data_ram_start);
|
||||
__data_num_words = (__data_size + 3) >> 2;
|
||||
#ifdef CONFIG_APPLICATION_MEMORY
|
||||
__app_data_size = (__app_data_ram_end - __app_data_ram_start);
|
||||
__app_data_num_words = (__app_data_size + 3) >> 2;
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
|
@ -956,7 +956,7 @@ struct _static_thread_data {
|
|||
entry, p1, p2, p3, \
|
||||
prio, options, delay) \
|
||||
K_THREAD_STACK_DEFINE(_k_thread_stack_##name, stack_size); \
|
||||
struct k_thread __kernel _k_thread_obj_##name; \
|
||||
struct k_thread _k_thread_obj_##name; \
|
||||
struct _static_thread_data _k_thread_data_##name __aligned(4) \
|
||||
__in_section(_static_thread_data, static, name) = \
|
||||
_THREAD_INITIALIZER(&_k_thread_obj_##name, \
|
||||
|
@ -3213,7 +3213,7 @@ struct k_msgq_attrs {
|
|||
* @req K-MSGQ-001
|
||||
*/
|
||||
#define K_MSGQ_DEFINE(q_name, q_msg_size, q_max_msgs, q_align) \
|
||||
static char __kernel_noinit __aligned(q_align) \
|
||||
static char __noinit __aligned(q_align) \
|
||||
_k_fifo_buf_##q_name[(q_max_msgs) * (q_msg_size)]; \
|
||||
struct k_msgq q_name \
|
||||
__in_section(_k_msgq, static, q_name) = \
|
||||
|
@ -3674,7 +3674,7 @@ struct k_pipe {
|
|||
* @req K-PIPE-001
|
||||
*/
|
||||
#define K_PIPE_DEFINE(name, pipe_buffer_size, pipe_align) \
|
||||
static unsigned char __kernel_noinit __aligned(pipe_align) \
|
||||
static unsigned char __noinit __aligned(pipe_align) \
|
||||
_k_pipe_buf_##name[pipe_buffer_size]; \
|
||||
struct k_pipe name \
|
||||
__in_section(_k_pipe, static, name) = \
|
||||
|
@ -4695,11 +4695,11 @@ static inline char *K_THREAD_STACK_BUFFER(k_thread_stack_t *sym)
|
|||
#ifdef _ARCH_MEM_PARTITION_ALIGN_CHECK
|
||||
#define K_MEM_PARTITION_DEFINE(name, start, size, attr) \
|
||||
_ARCH_MEM_PARTITION_ALIGN_CHECK(start, size); \
|
||||
__kernel struct k_mem_partition name =\
|
||||
struct k_mem_partition name =\
|
||||
{ (u32_t)start, size, attr}
|
||||
#else
|
||||
#define K_MEM_PARTITION_DEFINE(name, start, size, attr) \
|
||||
__kernel struct k_mem_partition name =\
|
||||
struct k_mem_partition name =\
|
||||
{ (u32_t)start, size, attr}
|
||||
#endif /* _ARCH_MEM_PARTITION_ALIGN_CHECK */
|
||||
|
||||
|
@ -4716,7 +4716,6 @@ struct k_mem_partition {
|
|||
};
|
||||
|
||||
/* memory domain
|
||||
* Note: Always declare this structure with __kernel prefix
|
||||
*/
|
||||
struct k_mem_domain {
|
||||
#ifdef CONFIG_USERSPACE
|
||||
|
|
|
@ -94,28 +94,6 @@
|
|||
* their shell commands are automatically initialized by the kernel.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_APPLICATION_MEMORY
|
||||
/*
|
||||
* KERNELSPACE_OBJECT_FILES is a space-separated list of object files
|
||||
* and libraries that belong in kernelspace.
|
||||
*/
|
||||
#define MAYBE_EXCLUDE_SOME_FILES EXCLUDE_FILE (KERNELSPACE_OBJECT_FILES)
|
||||
#else
|
||||
#define MAYBE_EXCLUDE_SOME_FILES
|
||||
#endif /* CONFIG_APPLICATION_MEMORY */
|
||||
|
||||
/*
|
||||
* APP_INPUT_SECTION should be invoked on sections that should be in
|
||||
* 'app' space. KERNEL_INPUT_SECTION should be invoked on sections
|
||||
* that should be in 'kernel' space.
|
||||
*
|
||||
* NB: APP_INPUT_SECTION must be invoked before
|
||||
* KERNEL_INPUT_SECTION. If it is not all sections will end up in
|
||||
* kernelspace.
|
||||
*/
|
||||
#define APP_INPUT_SECTION(sect) *(MAYBE_EXCLUDE_SOME_FILES sect)
|
||||
#define KERNEL_INPUT_SECTION(sect) *(sect)
|
||||
|
||||
#define APP_SMEM_SECTION() KEEP(*(SORT("data_smem_*")))
|
||||
|
||||
#ifdef CONFIG_X86 /* LINKER FILES: defines used by linker script */
|
||||
|
@ -169,24 +147,11 @@ extern char _app_smem_size[];
|
|||
extern char _app_smem_rom_start[];
|
||||
extern char _app_smem_num_words[];
|
||||
|
||||
#ifdef CONFIG_APPLICATION_MEMORY
|
||||
/* Memory owned by the application. Start and end will be aligned for memory
|
||||
* management/protection hardware for the target architecture.
|
||||
|
||||
* The policy for this memory will be to configure all of it as user thread
|
||||
* accessible. It consists of all non-kernel globals.
|
||||
*/
|
||||
extern char __app_ram_start[];
|
||||
extern char __app_ram_end[];
|
||||
extern char __app_ram_size[];
|
||||
#endif
|
||||
|
||||
/* Memory owned by the kernel. Start and end will be aligned for memory
|
||||
* management/protection hardware for the target architecture.
|
||||
*
|
||||
* Consists of all kernel-side globals, all kernel objects, all thread stacks,
|
||||
* and all currently unused RAM. If CONFIG_APPLICATION_MEMORY is not enabled,
|
||||
* has all globals, not just kernel side.
|
||||
* and all currently unused RAM.
|
||||
*
|
||||
* Except for the stack of the currently executing thread, none of this memory
|
||||
* is normally accessible to user threads unless specifically granted at
|
||||
|
@ -199,21 +164,12 @@ extern char __kernel_ram_size[];
|
|||
/* Used by _bss_zero or arch-specific implementation */
|
||||
extern char __bss_start[];
|
||||
extern char __bss_end[];
|
||||
#ifdef CONFIG_APPLICATION_MEMORY
|
||||
extern char __app_bss_start[];
|
||||
extern char __app_bss_end[];
|
||||
#endif
|
||||
|
||||
/* Used by _data_copy() or arch-specific implementation */
|
||||
#ifdef CONFIG_XIP
|
||||
extern char __data_rom_start[];
|
||||
extern char __data_ram_start[];
|
||||
extern char __data_ram_end[];
|
||||
#ifdef CONFIG_APPLICATION_MEMORY
|
||||
extern char __app_data_rom_start[];
|
||||
extern char __app_data_ram_start[];
|
||||
extern char __app_data_ram_end[];
|
||||
#endif /* CONFIG_APPLICATION_MEMORY */
|
||||
#endif /* CONFIG_XIP */
|
||||
|
||||
/* Includes text and rodata */
|
||||
|
|
|
@ -115,16 +115,6 @@ do { \
|
|||
|
||||
#define __in_section_unique(seg) ___in_section(seg, __FILE__, __COUNTER__)
|
||||
|
||||
#ifdef CONFIG_APPLICATION_MEMORY
|
||||
#define __kernel __in_section_unique(kernel)
|
||||
#define __kernel_noinit __in_section_unique(kernel_noinit)
|
||||
#define __kernel_bss __in_section_unique(kernel_bss)
|
||||
#else
|
||||
#define __kernel
|
||||
#define __kernel_noinit __noinit
|
||||
#define __kernel_bss
|
||||
#endif
|
||||
|
||||
#ifndef __packed
|
||||
#define __packed __attribute__((__packed__))
|
||||
#endif
|
||||
|
|
|
@ -207,14 +207,6 @@ config ERRNO
|
|||
symbol. The C library must access the per-thread errno via the
|
||||
_get_errno() symbol.
|
||||
|
||||
config APPLICATION_MEMORY
|
||||
bool "Split kernel and application memory"
|
||||
help
|
||||
For all read-write memory sections (namely bss, noinit, data),
|
||||
separate them into application and kernel areas. The application area
|
||||
will have the project-level application objects and any libraries
|
||||
including the C library in it.
|
||||
|
||||
choice SCHED_ALGORITHM
|
||||
prompt "Scheduler priority queue algorithm"
|
||||
default SCHED_DUMB
|
||||
|
|
|
@ -153,10 +153,6 @@ void _bss_zero(void)
|
|||
|
||||
bss_zeroing_relocation();
|
||||
#endif /* CONFIG_CODE_DATA_RELOCATION */
|
||||
#ifdef CONFIG_APPLICATION_MEMORY
|
||||
(void)memset(&__app_bss_start, 0,
|
||||
((u32_t) &__app_bss_end - (u32_t) &__app_bss_start));
|
||||
#endif
|
||||
#ifdef CONFIG_COVERAGE_GCOV
|
||||
(void)memset(&__gcov_bss_start, 0,
|
||||
((u32_t) &__gcov_bss_end - (u32_t) &__gcov_bss_start));
|
||||
|
@ -190,10 +186,6 @@ void _data_copy(void)
|
|||
(void)memcpy(&_app_smem_start, &_app_smem_rom_start,
|
||||
((u32_t) &_app_smem_end - (u32_t) &_app_smem_start));
|
||||
#endif
|
||||
#ifdef CONFIG_APPLICATION_MEMORY
|
||||
(void)memcpy(&__app_data_ram_start, &__app_data_rom_start,
|
||||
((u32_t) &__app_data_ram_end - (u32_t) &__app_data_ram_start));
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -37,7 +37,7 @@ K_APPMEM_PARTITION_DEFINE(z_malloc_partition);
|
|||
|
||||
#if CONFIG_NEWLIB_LIBC_ALIGNED_HEAP_SIZE
|
||||
/* Compiler will throw an error if the provided value isn't a power of two */
|
||||
MALLOC_BSS static unsigned char __kernel __aligned(CONFIG_NEWLIB_LIBC_ALIGNED_HEAP_SIZE)
|
||||
MALLOC_BSS static unsigned char __aligned(CONFIG_NEWLIB_LIBC_ALIGNED_HEAP_SIZE)
|
||||
heap_base[CONFIG_NEWLIB_LIBC_ALIGNED_HEAP_SIZE];
|
||||
#define MAX_HEAP_SIZE CONFIG_NEWLIB_LIBC_ALIGNED_HEAP_SIZE
|
||||
#else
|
||||
|
|
|
@ -33,7 +33,7 @@ struct fd_entry {
|
|||
static const struct fd_op_vtable stdinout_fd_op_vtable;
|
||||
#endif
|
||||
|
||||
__kernel static struct fd_entry fdtable[CONFIG_POSIX_MAX_FDS] = {
|
||||
static struct fd_entry fdtable[CONFIG_POSIX_MAX_FDS] = {
|
||||
#ifdef CONFIG_POSIX_API
|
||||
/*
|
||||
* Predefine entries for stdin/stdout/stderr. Object pointer
|
||||
|
|
|
@ -23,7 +23,7 @@ K_THREAD_STACK_ARRAY_DEFINE(app_stack, 3, STACKSIZE);
|
|||
|
||||
struct k_thread app_thread_id[3];
|
||||
|
||||
__kernel struct k_mem_domain app_domain[2];
|
||||
struct k_mem_domain app_domain[2];
|
||||
|
||||
/* the start address of the MPU region needs to align with its size */
|
||||
#ifdef CONFIG_ARM
|
||||
|
|
|
@ -143,7 +143,7 @@
|
|||
#define obj_init_type "static"
|
||||
#else
|
||||
#define obj_init_type "dynamic"
|
||||
fork_obj_t __kernel fork_objs[NUM_PHIL];
|
||||
fork_obj_t fork_objs[NUM_PHIL];
|
||||
#endif
|
||||
|
||||
static fork_t forks[NUM_PHIL] = {
|
||||
|
@ -157,6 +157,6 @@ static fork_t forks[NUM_PHIL] = {
|
|||
};
|
||||
|
||||
static K_THREAD_STACK_ARRAY_DEFINE(stacks, NUM_PHIL, STACK_SIZE);
|
||||
static struct k_thread __kernel threads[NUM_PHIL];
|
||||
static struct k_thread threads[NUM_PHIL];
|
||||
|
||||
#endif /* phil_obj_abstract__h */
|
||||
|
|
|
@ -68,13 +68,13 @@ _app_enc_b BYTE W3R[26];
|
|||
*/
|
||||
K_SEM_DEFINE(allforone, 0, 3);
|
||||
|
||||
__kernel struct k_thread enc_thread;
|
||||
struct k_thread enc_thread;
|
||||
K_THREAD_STACK_DEFINE(enc_stack, STACKSIZE);
|
||||
|
||||
__kernel struct k_thread pt_thread;
|
||||
struct k_thread pt_thread;
|
||||
K_THREAD_STACK_DEFINE(pt_stack, STACKSIZE);
|
||||
|
||||
__kernel struct k_thread ct_thread;
|
||||
struct k_thread ct_thread;
|
||||
K_THREAD_STACK_DEFINE(ct_stack, STACKSIZE);
|
||||
|
||||
_app_enc_d char encMSG[] = "ENC!\n";
|
||||
|
|
|
@ -343,7 +343,6 @@ our $Ident = qr{
|
|||
our $Storage = qr{extern|static|asmlinkage};
|
||||
our $Sparse = qr{
|
||||
__user|
|
||||
__kernel|
|
||||
__force|
|
||||
__iomem|
|
||||
__must_check|
|
||||
|
|
|
@ -1,79 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright (c) 2017 Linaro Limited
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import sys
|
||||
import argparse
|
||||
import pprint
|
||||
import os
|
||||
import struct
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
import elftools
|
||||
from elftools.elf.elffile import ELFFile
|
||||
from elftools.dwarf import descriptions
|
||||
from elftools.elf.sections import SymbolTableSection
|
||||
|
||||
if LooseVersion(elftools.__version__) < LooseVersion('0.24'):
|
||||
sys.stderr.write("pyelftools is out of date, need version 0.24 or later\n")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def get_symbols(obj):
|
||||
for section in obj.iter_sections():
|
||||
if isinstance(section, SymbolTableSection):
|
||||
return {sym.name: sym.entry.st_value
|
||||
for sym in section.iter_symbols()}
|
||||
|
||||
raise LookupError("Could not find symbol table")
|
||||
|
||||
|
||||
def parse_args():
|
||||
global args
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description=__doc__,
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter)
|
||||
|
||||
parser.add_argument(
|
||||
"-k", "--kernel", required=True,
|
||||
help="Input zephyr ELF binary")
|
||||
parser.add_argument(
|
||||
"-o", "--output", required=True,
|
||||
help="Output linker file")
|
||||
parser.add_argument(
|
||||
"-v", "--verbose", action="store_true",
|
||||
help="Print extra debugging information")
|
||||
args = parser.parse_args()
|
||||
|
||||
|
||||
def main():
|
||||
parse_args()
|
||||
|
||||
bit_len = None
|
||||
|
||||
with open(args.kernel, "rb") as fp:
|
||||
elf = ELFFile(fp)
|
||||
args.little_endian = elf.little_endian
|
||||
syms = get_symbols(elf)
|
||||
|
||||
app_ram_size = syms['__app_last_address_used'] - \
|
||||
syms['__app_ram_start']
|
||||
bit_len = app_ram_size.bit_length()
|
||||
|
||||
if bit_len:
|
||||
align_size = 1 << bit_len
|
||||
else:
|
||||
align_size = 32
|
||||
|
||||
with open(args.output, "w") as fp:
|
||||
fp.write("/***********************************************\n")
|
||||
fp.write(" * Generated file, do not modify\n")
|
||||
fp.write(" **********************************************/\n")
|
||||
fp.write("_app_data_align = " + str(align_size) + ";\n")
|
||||
fp.write(". = ALIGN(_app_data_align);\n")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
|
@ -25,7 +25,7 @@ extern u64_t __end_drop_to_usermode_time;
|
|||
u32_t drop_to_user_mode_end_time, drop_to_user_mode_start_time;
|
||||
u32_t user_thread_creation_end_time, user_thread_creation_start_time;
|
||||
|
||||
__kernel struct k_thread my_thread_user;
|
||||
struct k_thread my_thread_user;
|
||||
K_THREAD_STACK_EXTERN(my_stack_area);
|
||||
K_THREAD_STACK_EXTERN(my_stack_area_0);
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ K_SEM_DEFINE(inherit_sem, SEMAPHORE_INIT_COUNT, SEMAPHORE_MAX_COUNT);
|
|||
K_MUTEX_DEFINE(inherit_mutex);
|
||||
K_TIMER_DEFINE(inherit_timer, dummy_start, dummy_end);
|
||||
K_MSGQ_DEFINE(inherit_msgq, MSG_Q_SIZE, MSG_Q_MAX_NUM_MSGS, MSG_Q_ALIGN);
|
||||
__kernel struct k_thread test_1_tid;
|
||||
struct k_thread test_1_tid;
|
||||
|
||||
u8_t MEM_DOMAIN_ALIGNMENT inherit_buf[MEM_REGION_ALLOC]; /* for mem domain */
|
||||
|
||||
|
@ -36,7 +36,7 @@ struct k_mem_partition *inherit_memory_partition_array[] = {
|
|||
&ztest_mem_partition
|
||||
};
|
||||
|
||||
__kernel struct k_mem_domain inherit_mem_domain;
|
||||
struct k_mem_domain inherit_mem_domain;
|
||||
|
||||
/* generic function to do check the access permissions. */
|
||||
void access_test(void)
|
||||
|
|
|
@ -16,25 +16,25 @@ K_THREAD_STACK_DEFINE(kobject_stack_4, KOBJECT_STACK_SIZE);
|
|||
K_SEM_DEFINE(kobject_sem, SEMAPHORE_INIT_COUNT, SEMAPHORE_MAX_COUNT);
|
||||
K_SEM_DEFINE(kobject_public_sem, SEMAPHORE_INIT_COUNT, SEMAPHORE_MAX_COUNT);
|
||||
K_MUTEX_DEFINE(kobject_mutex);
|
||||
__kernel struct k_thread kobject_test_4_tid;
|
||||
__kernel struct k_thread kobject_test_6_tid;
|
||||
__kernel struct k_thread kobject_test_7_tid;
|
||||
struct k_thread kobject_test_4_tid;
|
||||
struct k_thread kobject_test_6_tid;
|
||||
struct k_thread kobject_test_7_tid;
|
||||
|
||||
__kernel struct k_thread kobject_test_9_tid;
|
||||
__kernel struct k_thread kobject_test_13_tid;
|
||||
__kernel struct k_thread kobject_test_14_tid;
|
||||
struct k_thread kobject_test_9_tid;
|
||||
struct k_thread kobject_test_13_tid;
|
||||
struct k_thread kobject_test_14_tid;
|
||||
|
||||
__kernel struct k_thread kobject_test_reuse_1_tid, kobject_test_reuse_2_tid;
|
||||
__kernel struct k_thread kobject_test_reuse_3_tid, kobject_test_reuse_4_tid;
|
||||
__kernel struct k_thread kobject_test_reuse_5_tid, kobject_test_reuse_6_tid;
|
||||
__kernel struct k_thread kobject_test_reuse_7_tid, kobject_test_reuse_8_tid;
|
||||
struct k_thread kobject_test_reuse_1_tid, kobject_test_reuse_2_tid;
|
||||
struct k_thread kobject_test_reuse_3_tid, kobject_test_reuse_4_tid;
|
||||
struct k_thread kobject_test_reuse_5_tid, kobject_test_reuse_6_tid;
|
||||
struct k_thread kobject_test_reuse_7_tid, kobject_test_reuse_8_tid;
|
||||
|
||||
struct k_thread kobject_test_10_tid_uninitialized;
|
||||
|
||||
struct k_sem *random_sem_type;
|
||||
struct k_sem kobject_sem_not_hash_table;
|
||||
__kernel struct k_sem kobject_sem_no_init_no_access;
|
||||
__kernel struct k_sem kobject_sem_no_init_access;
|
||||
struct k_sem kobject_sem_no_init_no_access;
|
||||
struct k_sem kobject_sem_no_init_access;
|
||||
|
||||
|
||||
/****************************************************************************/
|
||||
|
|
|
@ -13,22 +13,22 @@
|
|||
K_THREAD_STACK_DEFINE(mem_domain_1_stack, MEM_DOMAIN_STACK_SIZE);
|
||||
K_THREAD_STACK_DEFINE(mem_domain_2_stack, MEM_DOMAIN_STACK_SIZE);
|
||||
K_THREAD_STACK_DEFINE(mem_domain_6_stack, MEM_DOMAIN_STACK_SIZE);
|
||||
__kernel struct k_thread mem_domain_1_tid, mem_domain_2_tid, mem_domain_6_tid;
|
||||
struct k_thread mem_domain_1_tid, mem_domain_2_tid, mem_domain_6_tid;
|
||||
|
||||
/****************************************************************************/
|
||||
/* The mem domains needed.*/
|
||||
__kernel u8_t MEM_DOMAIN_ALIGNMENT mem_domain_buf[MEM_REGION_ALLOC];
|
||||
__kernel u8_t MEM_DOMAIN_ALIGNMENT mem_domain_buf1[MEM_REGION_ALLOC];
|
||||
u8_t MEM_DOMAIN_ALIGNMENT mem_domain_buf[MEM_REGION_ALLOC];
|
||||
u8_t MEM_DOMAIN_ALIGNMENT mem_domain_buf1[MEM_REGION_ALLOC];
|
||||
|
||||
/* partitions added later in the test cases.*/
|
||||
__kernel u8_t MEM_DOMAIN_ALIGNMENT mem_domain_tc3_part1[MEM_REGION_ALLOC];
|
||||
__kernel u8_t MEM_DOMAIN_ALIGNMENT mem_domain_tc3_part2[MEM_REGION_ALLOC];
|
||||
__kernel u8_t MEM_DOMAIN_ALIGNMENT mem_domain_tc3_part3[MEM_REGION_ALLOC];
|
||||
__kernel u8_t MEM_DOMAIN_ALIGNMENT mem_domain_tc3_part4[MEM_REGION_ALLOC];
|
||||
__kernel u8_t MEM_DOMAIN_ALIGNMENT mem_domain_tc3_part5[MEM_REGION_ALLOC];
|
||||
__kernel u8_t MEM_DOMAIN_ALIGNMENT mem_domain_tc3_part6[MEM_REGION_ALLOC];
|
||||
__kernel u8_t MEM_DOMAIN_ALIGNMENT mem_domain_tc3_part7[MEM_REGION_ALLOC];
|
||||
__kernel u8_t MEM_DOMAIN_ALIGNMENT mem_domain_tc3_part8[MEM_REGION_ALLOC];
|
||||
u8_t MEM_DOMAIN_ALIGNMENT mem_domain_tc3_part1[MEM_REGION_ALLOC];
|
||||
u8_t MEM_DOMAIN_ALIGNMENT mem_domain_tc3_part2[MEM_REGION_ALLOC];
|
||||
u8_t MEM_DOMAIN_ALIGNMENT mem_domain_tc3_part3[MEM_REGION_ALLOC];
|
||||
u8_t MEM_DOMAIN_ALIGNMENT mem_domain_tc3_part4[MEM_REGION_ALLOC];
|
||||
u8_t MEM_DOMAIN_ALIGNMENT mem_domain_tc3_part5[MEM_REGION_ALLOC];
|
||||
u8_t MEM_DOMAIN_ALIGNMENT mem_domain_tc3_part6[MEM_REGION_ALLOC];
|
||||
u8_t MEM_DOMAIN_ALIGNMENT mem_domain_tc3_part7[MEM_REGION_ALLOC];
|
||||
u8_t MEM_DOMAIN_ALIGNMENT mem_domain_tc3_part8[MEM_REGION_ALLOC];
|
||||
|
||||
K_MEM_PARTITION_DEFINE(mem_domain_memory_partition,
|
||||
mem_domain_buf,
|
||||
|
@ -59,8 +59,8 @@ struct k_mem_partition *mem_domain_memory_partition_array1[] = {
|
|||
&mem_domain_memory_partition1,
|
||||
&ztest_mem_partition
|
||||
};
|
||||
__kernel struct k_mem_domain mem_domain_mem_domain;
|
||||
__kernel struct k_mem_domain mem_domain1;
|
||||
struct k_mem_domain mem_domain_mem_domain;
|
||||
struct k_mem_domain mem_domain1;
|
||||
|
||||
/****************************************************************************/
|
||||
/* Common init functions */
|
||||
|
@ -321,7 +321,7 @@ struct k_mem_partition *mem_domain_tc3_partition_array[] = {
|
|||
&mem_domain_tc3_part8_struct
|
||||
};
|
||||
|
||||
__kernel struct k_mem_domain mem_domain_tc3_mem_domain;
|
||||
struct k_mem_domain mem_domain_tc3_mem_domain;
|
||||
|
||||
void mem_domain_for_user_tc3(void *max_partitions, void *p2, void *p3)
|
||||
{
|
||||
|
|
|
@ -15,12 +15,12 @@
|
|||
*/
|
||||
extern struct k_sem sem1;
|
||||
|
||||
static __kernel struct k_sem semarray[SEM_ARRAY_SIZE];
|
||||
static struct k_sem semarray[SEM_ARRAY_SIZE];
|
||||
static struct k_sem *dyn_sem[SEM_ARRAY_SIZE];
|
||||
|
||||
K_SEM_DEFINE(sem1, 0, 1);
|
||||
static __kernel struct k_sem sem2;
|
||||
static __kernel char bad_sem[sizeof(struct k_sem)];
|
||||
static struct k_sem sem2;
|
||||
static char bad_sem[sizeof(struct k_sem)];
|
||||
static struct k_sem sem3;
|
||||
|
||||
static int test_object(struct k_sem *sem, int retval)
|
||||
|
@ -82,11 +82,7 @@ void test_generic_object(void)
|
|||
zassert_false(test_object(&stack_sem, -EBADF), NULL);
|
||||
zassert_false(test_object((struct k_sem *)&bad_sem, -EBADF), NULL);
|
||||
zassert_false(test_object((struct k_sem *)0xFFFFFFFF, -EBADF), NULL);
|
||||
#ifdef CONFIG_APPLICATION_MEMORY
|
||||
zassert_false(test_object(&sem3, -EBADF), NULL);
|
||||
#else
|
||||
object_permission_checks(&sem3, false);
|
||||
#endif
|
||||
object_permission_checks(&sem1, true);
|
||||
object_permission_checks(&sem2, false);
|
||||
|
||||
|
|
|
@ -11,8 +11,8 @@
|
|||
|
||||
#define BUF_SIZE 32
|
||||
|
||||
__kernel char kernel_string[BUF_SIZE];
|
||||
__kernel char kernel_buf[BUF_SIZE];
|
||||
char kernel_string[BUF_SIZE];
|
||||
char kernel_buf[BUF_SIZE];
|
||||
ZTEST_BMEM char user_string[BUF_SIZE];
|
||||
|
||||
size_t _impl_string_nlen(char *src, size_t maxlen, int *err)
|
||||
|
|
|
@ -275,7 +275,7 @@ static void write_kerntext(void)
|
|||
zassert_unreachable("Write to kernel text did not fault");
|
||||
}
|
||||
|
||||
__kernel static int kernel_data;
|
||||
static int kernel_data;
|
||||
|
||||
/**
|
||||
* @brief Testto read from kernel data section
|
||||
|
@ -292,7 +292,7 @@ static void read_kernel_data(void)
|
|||
BARRIER();
|
||||
value = kernel_data;
|
||||
printk("%d\n", value);
|
||||
zassert_unreachable("Read from __kernel data did not fault");
|
||||
zassert_unreachable("Read from data did not fault");
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -306,7 +306,7 @@ static void write_kernel_data(void)
|
|||
expected_reason = REASON_HW_EXCEPTION;
|
||||
BARRIER();
|
||||
kernel_data = 1;
|
||||
zassert_unreachable("Write to __kernel data did not fault");
|
||||
zassert_unreachable("Write to data did not fault");
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -394,7 +394,7 @@ static void pass_user_object(void)
|
|||
zassert_unreachable("Pass a user object to a syscall did not fault");
|
||||
}
|
||||
|
||||
__kernel static struct k_sem ksem;
|
||||
static struct k_sem ksem;
|
||||
|
||||
/**
|
||||
* @brief Test to pass object to a system call without permissions
|
||||
|
@ -412,7 +412,7 @@ static void pass_noperms_object(void)
|
|||
"syscall did not fault");
|
||||
}
|
||||
|
||||
__kernel struct k_thread kthread_thread;
|
||||
struct k_thread kthread_thread;
|
||||
|
||||
K_THREAD_STACK_DEFINE(kthread_stack, STACKSIZE);
|
||||
|
||||
|
@ -438,7 +438,7 @@ static void start_kernel_thread(void)
|
|||
zassert_unreachable("Create a kernel thread did not fault");
|
||||
}
|
||||
|
||||
__kernel struct k_thread uthread_thread;
|
||||
struct k_thread uthread_thread;
|
||||
K_THREAD_STACK_DEFINE(uthread_stack, STACKSIZE);
|
||||
|
||||
static void uthread_body(void)
|
||||
|
@ -626,13 +626,6 @@ static void read_kobject_user_pipe(void)
|
|||
"did not fault");
|
||||
}
|
||||
|
||||
/* Removed test for access_non_app_memory
|
||||
* due to the APPLICATION_MEMORY variable
|
||||
* defaulting to y, when enabled the
|
||||
* section app_bss is made available to
|
||||
* all threads breaking the test
|
||||
*/
|
||||
|
||||
/* Create bool in part1 partitions */
|
||||
K_APP_DMEM(part1) bool thread_bool;
|
||||
|
||||
|
|
|
@ -9,18 +9,18 @@
|
|||
/**TESTPOINT: init via K_MSGQ_DEFINE*/
|
||||
K_MSGQ_DEFINE(kmsgq, MSG_SIZE, MSGQ_LEN, 4);
|
||||
K_MSGQ_DEFINE(kmsgq_test_alloc, MSG_SIZE, MSGQ_LEN, 4);
|
||||
__kernel struct k_msgq msgq;
|
||||
__kernel struct k_msgq msgq1;
|
||||
struct k_msgq msgq;
|
||||
struct k_msgq msgq1;
|
||||
K_THREAD_STACK_DEFINE(tstack, STACK_SIZE);
|
||||
K_THREAD_STACK_DEFINE(tstack1, STACK_SIZE);
|
||||
K_THREAD_STACK_DEFINE(tstack2, STACK_SIZE);
|
||||
__kernel struct k_thread tdata;
|
||||
__kernel struct k_thread tdata1;
|
||||
__kernel struct k_thread tdata2;
|
||||
struct k_thread tdata;
|
||||
struct k_thread tdata1;
|
||||
struct k_thread tdata2;
|
||||
static ZTEST_BMEM char __aligned(4) tbuffer[MSG_SIZE * MSGQ_LEN];
|
||||
static ZTEST_DMEM char __aligned(4) tbuffer1[MSG_SIZE];
|
||||
static ZTEST_DMEM u32_t data[MSGQ_LEN] = { MSG0, MSG1 };
|
||||
__kernel struct k_sem end_sema;
|
||||
struct k_sem end_sema;
|
||||
|
||||
static void put_msgq(struct k_msgq *pmsgq)
|
||||
{
|
||||
|
|
|
@ -228,7 +228,7 @@ void thread_11(void)
|
|||
}
|
||||
|
||||
K_THREAD_STACK_DEFINE(thread_12_stack_area, STACKSIZE);
|
||||
__kernel struct k_thread thread_12_thread_data;
|
||||
struct k_thread thread_12_thread_data;
|
||||
extern void thread_12(void);
|
||||
|
||||
/**
|
||||
|
|
|
@ -114,7 +114,7 @@ static struct pipe_sequence timeout_elements[] = {
|
|||
{ PIPE_SIZE + 1, ATLEAST_1, 0, -EAGAIN }
|
||||
};
|
||||
|
||||
__kernel struct k_thread get_single_tid;
|
||||
struct k_thread get_single_tid;
|
||||
|
||||
/* Helper functions */
|
||||
|
||||
|
|
|
@ -18,14 +18,14 @@ K_PIPE_DEFINE(kpipe, PIPE_LEN, 4);
|
|||
K_PIPE_DEFINE(khalfpipe, (PIPE_LEN / 2), 4);
|
||||
K_PIPE_DEFINE(kpipe1, PIPE_LEN, 4);
|
||||
K_PIPE_DEFINE(pipe_test_alloc, PIPE_LEN, 4);
|
||||
__kernel struct k_pipe pipe;
|
||||
struct k_pipe pipe;
|
||||
|
||||
K_THREAD_STACK_DEFINE(tstack, STACK_SIZE);
|
||||
K_THREAD_STACK_DEFINE(tstack1, STACK_SIZE);
|
||||
K_THREAD_STACK_DEFINE(tstack2, STACK_SIZE);
|
||||
__kernel struct k_thread tdata;
|
||||
__kernel struct k_thread tdata1;
|
||||
__kernel struct k_thread tdata2;
|
||||
struct k_thread tdata;
|
||||
struct k_thread tdata1;
|
||||
struct k_thread tdata2;
|
||||
K_SEM_DEFINE(end_sema, 0, 1);
|
||||
|
||||
/* By design, only two blocks. We should never need more than that, one
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
|
||||
static ZTEST_DMEM unsigned char __aligned(4) data[] = "abcd1234";
|
||||
|
||||
__kernel struct k_pipe put_get_pipe;
|
||||
struct k_pipe put_get_pipe;
|
||||
|
||||
|
||||
static void put_fail(struct k_pipe *p)
|
||||
|
|
|
@ -17,9 +17,9 @@ struct fifo_msg {
|
|||
#define FIFO_MSG_VALUE 0xdeadbeef
|
||||
|
||||
/* verify k_poll() without waiting */
|
||||
static __kernel struct k_sem no_wait_sem;
|
||||
static __kernel struct k_fifo no_wait_fifo;
|
||||
static __kernel struct k_poll_signal no_wait_signal;
|
||||
static struct k_sem no_wait_sem;
|
||||
static struct k_fifo no_wait_fifo;
|
||||
static struct k_poll_signal no_wait_signal;
|
||||
|
||||
/**
|
||||
* @brief Test cases to verify poll
|
||||
|
@ -108,12 +108,12 @@ void test_poll_no_wait(void)
|
|||
|
||||
static K_SEM_DEFINE(wait_sem, 0, 1);
|
||||
static K_FIFO_DEFINE(wait_fifo);
|
||||
static __kernel struct k_poll_signal wait_signal =
|
||||
static struct k_poll_signal wait_signal =
|
||||
K_POLL_SIGNAL_INITIALIZER(wait_signal);
|
||||
|
||||
struct fifo_msg wait_msg = { NULL, FIFO_MSG_VALUE };
|
||||
|
||||
static __kernel struct k_thread poll_wait_helper_thread;
|
||||
static struct k_thread poll_wait_helper_thread;
|
||||
static K_THREAD_STACK_DEFINE(poll_wait_helper_stack, KB(1));
|
||||
|
||||
#define TAG_0 10
|
||||
|
@ -334,10 +334,10 @@ void test_poll_wait(void)
|
|||
|
||||
/* verify k_poll() that waits on object which gets cancellation */
|
||||
|
||||
static __kernel struct k_fifo cancel_fifo;
|
||||
static __kernel struct k_fifo non_cancel_fifo;
|
||||
static struct k_fifo cancel_fifo;
|
||||
static struct k_fifo non_cancel_fifo;
|
||||
|
||||
static __kernel struct k_thread poll_cancel_helper_thread;
|
||||
static struct k_thread poll_cancel_helper_thread;
|
||||
static K_THREAD_STACK_DEFINE(poll_cancel_helper_stack, 768);
|
||||
|
||||
static void poll_cancel_helper(void *p1, void *p2, void *p3)
|
||||
|
@ -428,7 +428,7 @@ void test_poll_cancel_main_high_prio(void)
|
|||
/* verify multiple pollers */
|
||||
static K_SEM_DEFINE(multi_sem, 0, 1);
|
||||
|
||||
static __kernel struct k_thread multi_thread_lowprio;
|
||||
static struct k_thread multi_thread_lowprio;
|
||||
static K_THREAD_STACK_DEFINE(multi_stack_lowprio, KB(1));
|
||||
|
||||
static void multi_lowprio(void *p1, void *p2, void *p3)
|
||||
|
@ -448,7 +448,7 @@ static void multi_lowprio(void *p1, void *p2, void *p3)
|
|||
|
||||
static K_SEM_DEFINE(multi_reply, 0, 1);
|
||||
|
||||
static __kernel struct k_thread multi_thread;
|
||||
static struct k_thread multi_thread;
|
||||
static K_THREAD_STACK_DEFINE(multi_stack, KB(1));
|
||||
|
||||
static void multi(void *p1, void *p2, void *p3)
|
||||
|
@ -525,9 +525,9 @@ void test_poll_multi(void)
|
|||
k_sleep(250);
|
||||
}
|
||||
|
||||
static __kernel struct k_thread signal_thread;
|
||||
static struct k_thread signal_thread;
|
||||
static K_THREAD_STACK_DEFINE(signal_stack, KB(1));
|
||||
static __kernel struct k_poll_signal signal;
|
||||
static struct k_poll_signal signal;
|
||||
|
||||
static void threadstate(void *p1, void *p2, void *p3)
|
||||
{
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
#define LIST_LEN 5
|
||||
|
||||
static K_THREAD_STACK_DEFINE(child_stack, STACK_SIZE);
|
||||
static __kernel struct k_thread child_thread;
|
||||
static struct k_thread child_thread;
|
||||
static ZTEST_BMEM struct qdata qdata[LIST_LEN * 2];
|
||||
|
||||
K_MEM_POOL_DEFINE(test_pool, 16, 96, 4, 4);
|
||||
|
|
|
@ -13,9 +13,9 @@
|
|||
#define SEM_LIMIT 2
|
||||
/**TESTPOINT: init via K_SEM_DEFINE*/
|
||||
K_SEM_DEFINE(ksema, SEM_INITIAL, SEM_LIMIT);
|
||||
__kernel struct k_sem sema;
|
||||
struct k_sem sema;
|
||||
static K_THREAD_STACK_DEFINE(tstack, STACK_SIZE);
|
||||
__kernel struct k_thread tdata;
|
||||
struct k_thread tdata;
|
||||
|
||||
/*entry of contexts*/
|
||||
static void tisr_entry(void *p)
|
||||
|
|
|
@ -38,8 +38,8 @@ K_PIPE_DEFINE(timeout_info_pipe,
|
|||
sizeof(struct timeout_info) * TOTAL_THREADS_WAITING, 4);
|
||||
|
||||
|
||||
__kernel struct k_thread sem_tid, sem_tid_1, sem_tid_2;
|
||||
__kernel struct k_thread multiple_tid[TOTAL_THREADS_WAITING];
|
||||
struct k_thread sem_tid, sem_tid_1, sem_tid_2;
|
||||
struct k_thread multiple_tid[TOTAL_THREADS_WAITING];
|
||||
|
||||
/******************************************************************************/
|
||||
/* Helper functions */
|
||||
|
|
|
@ -12,12 +12,12 @@
|
|||
/**TESTPOINT: init via K_STACK_DEFINE*/
|
||||
K_STACK_DEFINE(kstack, STACK_LEN);
|
||||
K_STACK_DEFINE(kstack_test_alloc, STACK_LEN);
|
||||
__kernel struct k_stack stack;
|
||||
struct k_stack stack;
|
||||
|
||||
K_THREAD_STACK_DEFINE(threadstack, STACK_SIZE);
|
||||
__kernel struct k_thread thread_data;
|
||||
struct k_thread thread_data;
|
||||
static ZTEST_DMEM u32_t data[STACK_LEN] = { 0xABCD, 0x1234 };
|
||||
__kernel struct k_sem end_sema;
|
||||
struct k_sem end_sema;
|
||||
|
||||
static void tstack_push(struct k_stack *pstack)
|
||||
{
|
||||
|
|
|
@ -44,7 +44,7 @@ K_STACK_DEFINE(stack2, STACK_LEN);
|
|||
|
||||
/* thread info * */
|
||||
K_THREAD_STACK_DEFINE(threadstack, TSTACK_SIZE);
|
||||
__kernel struct k_thread thread_data;
|
||||
struct k_thread thread_data;
|
||||
|
||||
/* Data pushed to stack */
|
||||
static ZTEST_DMEM u32_t data1[STACK_LEN] = { 0xAAAA, 0xBBBB, 0xCCCC, 0xDDDD };
|
||||
|
@ -53,7 +53,7 @@ static ZTEST_DMEM u32_t data_isr[STACK_LEN] = { 0xABCD, 0xABCD, 0xABCD,
|
|||
0xABCD };
|
||||
|
||||
/* semaphore to sync threads */
|
||||
static __kernel struct k_sem end_sema;
|
||||
static struct k_sem end_sema;
|
||||
|
||||
/* entry of contexts */
|
||||
static void tIsr_entry_push(void *p)
|
||||
|
|
|
@ -36,7 +36,7 @@ extern void test_delayed_thread_abort(void);
|
|||
extern void test_k_thread_foreach(void);
|
||||
extern void test_threads_cpu_mask(void);
|
||||
|
||||
__kernel struct k_thread tdata;
|
||||
struct k_thread tdata;
|
||||
#define STACK_SIZE (256 + CONFIG_TEST_EXTRA_STACKSIZE)
|
||||
K_THREAD_STACK_DEFINE(tstack, STACK_SIZE);
|
||||
size_t tstack_size = K_THREAD_STACK_SIZEOF(tstack);
|
||||
|
@ -44,8 +44,8 @@ size_t tstack_size = K_THREAD_STACK_SIZEOF(tstack);
|
|||
/*local variables*/
|
||||
static K_THREAD_STACK_DEFINE(tstack_custom, STACK_SIZE);
|
||||
static K_THREAD_STACK_DEFINE(tstack_name, STACK_SIZE);
|
||||
__kernel static struct k_thread tdata_custom;
|
||||
__kernel static struct k_thread tdata_name;
|
||||
static struct k_thread tdata_custom;
|
||||
static struct k_thread tdata_name;
|
||||
|
||||
static int main_prio;
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
#include <kernel_structs.h>
|
||||
#include <kernel_internal.h>
|
||||
|
||||
__kernel struct k_thread kthread_thread;
|
||||
struct k_thread kthread_thread;
|
||||
|
||||
#define STACKSIZE 1024
|
||||
K_THREAD_STACK_DEFINE(kthread_stack, STACKSIZE);
|
||||
|
|
|
@ -42,8 +42,8 @@ K_THREAD_ACCESS_GRANT(T_KDEFINE_PREEMPT_THREAD, &start_sema, &end_sema);
|
|||
/*local variables*/
|
||||
static K_THREAD_STACK_DEFINE(stack_coop, INIT_COOP_STACK_SIZE);
|
||||
static K_THREAD_STACK_DEFINE(stack_preempt, INIT_PREEMPT_STACK_SIZE);
|
||||
__kernel static struct k_thread thread_coop;
|
||||
__kernel static struct k_thread thread_preempt;
|
||||
static struct k_thread thread_coop;
|
||||
static struct k_thread thread_preempt;
|
||||
static ZTEST_BMEM u64_t t_create;
|
||||
static ZTEST_BMEM struct thread_data {
|
||||
int init_prio;
|
||||
|
|
|
@ -21,13 +21,13 @@
|
|||
|
||||
static K_THREAD_STACK_DEFINE(tstack, STACK_SIZE);
|
||||
static K_THREAD_STACK_DEFINE(user_tstack, STACK_SIZE);
|
||||
__kernel static struct k_work_q workq;
|
||||
__kernel static struct k_work_q user_workq;
|
||||
static struct k_work_q workq;
|
||||
static struct k_work_q user_workq;
|
||||
static ZTEST_BMEM struct k_work work[NUM_OF_WORK];
|
||||
static struct k_delayed_work new_work;
|
||||
static struct k_delayed_work delayed_work[NUM_OF_WORK], delayed_work_sleepy;
|
||||
__kernel static struct k_sem sync_sema;
|
||||
__kernel static struct k_sem dummy_sema;
|
||||
static struct k_sem sync_sema;
|
||||
static struct k_sem dummy_sema;
|
||||
static struct k_thread *main_thread;
|
||||
|
||||
static void work_sleepy(struct k_work *w)
|
||||
|
|
|
@ -3,7 +3,6 @@ CONFIG_ZTEST_STACKSIZE=2048
|
|||
CONFIG_MAIN_STACK_SIZE=1024
|
||||
CONFIG_HEAP_MEM_POOL_SIZE=1024
|
||||
CONFIG_TEST_USERSPACE=n
|
||||
CONFIG_APPLICATION_MEMORY=n
|
||||
|
||||
CONFIG_FLASH=y
|
||||
CONFIG_BT=n
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
#endif
|
||||
|
||||
#ifdef KERNEL
|
||||
__kernel static struct k_thread ztest_thread;
|
||||
static struct k_thread ztest_thread;
|
||||
#endif
|
||||
|
||||
/* ZTEST_DMEM and ZTEST_BMEM are used for the application shared memory test */
|
||||
|
@ -158,7 +158,7 @@ K_THREAD_STACK_DEFINE(ztest_thread_stack, CONFIG_ZTEST_STACKSIZE +
|
|||
CONFIG_TEST_EXTRA_STACKSIZE);
|
||||
static ZTEST_BMEM int test_result;
|
||||
|
||||
__kernel static struct k_sem test_end_signal;
|
||||
static struct k_sem test_end_signal;
|
||||
|
||||
void ztest_test_fail(void)
|
||||
{
|
||||
|
|
Loading…
Reference in a new issue