678b76e4b0
Zeroing the BSS and copying data to RAM with regular memset/memcpy may
cause problems when those functions are assuming a fully initialized
system for their optimizations to work e.g. some instructions require
an active MMU, but turning the MMU on needs the .bss section to be
cleared first, etc.
Commit c5b898743a
("aarch64: Fix alignment fault on z_bss_zero()")
provides a detailed explanation of such a case.
Replacing z_bss_zero() with an architecture specific one is problematic
as the former may see new sections added to it that would be missed by
the later. The same reasoning goes for z_data_copy().
Let's make maintenance much easier by providing weak versions of
memset/memcpy that can be overridden by architecture-specific safe
versions when needed.
Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
73 lines
2.2 KiB
C
73 lines
2.2 KiB
C
/*
|
|
* Copyright (c) 2010-2014 Wind River Systems, Inc.
|
|
* Copyright (c) 2020 Intel Corporation
|
|
*
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
*/
|
|
|
|
|
|
#include <zephyr.h>
|
|
#include <kernel.h>
|
|
#include <kernel_internal.h>
|
|
#include <linker/linker-defs.h>
|
|
|
|
#ifdef CONFIG_STACK_CANARIES
|
|
extern volatile uintptr_t __stack_chk_guard;
|
|
#endif /* CONFIG_STACK_CANARIES */
|
|
|
|
/**
|
|
* @brief Copy the data section from ROM to RAM
|
|
*
|
|
* This routine copies the data section from ROM to RAM.
|
|
*/
|
|
void z_data_copy(void)
|
|
{
|
|
z_early_memcpy(&__data_region_start, &__data_region_load_start,
|
|
__data_region_end - __data_region_start);
|
|
#ifdef CONFIG_ARCH_HAS_RAMFUNC_SUPPORT
|
|
z_early_memcpy(&__ramfunc_start, &__ramfunc_load_start,
|
|
(uintptr_t) &__ramfunc_size);
|
|
#endif /* CONFIG_ARCH_HAS_RAMFUNC_SUPPORT */
|
|
#if DT_NODE_HAS_STATUS(DT_CHOSEN(zephyr_ccm), okay)
|
|
z_early_memcpy(&__ccm_data_start, &__ccm_data_rom_start,
|
|
__ccm_data_end - __ccm_data_start);
|
|
#endif
|
|
#if DT_NODE_HAS_STATUS(DT_CHOSEN(zephyr_itcm), okay)
|
|
z_early_memcpy(&__itcm_start, &__itcm_load_start,
|
|
(uintptr_t) &__itcm_size);
|
|
#endif
|
|
#if DT_NODE_HAS_STATUS(DT_CHOSEN(zephyr_dtcm), okay)
|
|
z_early_memcpy(&__dtcm_data_start, &__dtcm_data_load_start,
|
|
__dtcm_data_end - __dtcm_data_start);
|
|
#endif
|
|
#ifdef CONFIG_CODE_DATA_RELOCATION
|
|
extern void data_copy_xip_relocation(void);
|
|
|
|
data_copy_xip_relocation();
|
|
#endif /* CONFIG_CODE_DATA_RELOCATION */
|
|
#ifdef CONFIG_USERSPACE
|
|
#ifdef CONFIG_STACK_CANARIES
|
|
/* stack canary checking is active for all C functions.
|
|
* __stack_chk_guard is some uninitialized value living in the
|
|
* app shared memory sections. Preserve it, and don't make any
|
|
* function calls to perform the memory copy. The true canary
|
|
* value gets set later in z_cstart().
|
|
*/
|
|
uintptr_t guard_copy = __stack_chk_guard;
|
|
uint8_t *src = (uint8_t *)&_app_smem_rom_start;
|
|
uint8_t *dst = (uint8_t *)&_app_smem_start;
|
|
uint32_t count = _app_smem_end - _app_smem_start;
|
|
|
|
guard_copy = __stack_chk_guard;
|
|
while (count > 0) {
|
|
*(dst++) = *(src++);
|
|
count--;
|
|
}
|
|
__stack_chk_guard = guard_copy;
|
|
#else
|
|
z_early_memcpy(&_app_smem_start, &_app_smem_rom_start,
|
|
_app_smem_end - _app_smem_start);
|
|
#endif /* CONFIG_STACK_CANARIES */
|
|
#endif /* CONFIG_USERSPACE */
|
|
}
|