2017-07-07 14:29:30 +02:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2017 Linaro Limited
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <init.h>
|
|
|
|
#include <kernel.h>
|
|
|
|
#include <kernel_structs.h>
|
2018-01-31 05:41:47 +01:00
|
|
|
#include <kernel_internal.h>
|
2017-10-18 02:01:48 +02:00
|
|
|
#include <misc/__assert.h>
|
2018-09-18 21:32:27 +02:00
|
|
|
#include <stdbool.h>
|
2018-07-24 20:35:55 +02:00
|
|
|
#include <spinlock.h>
|
2017-07-07 14:29:30 +02:00
|
|
|
|
2018-07-24 20:35:55 +02:00
|
|
|
static struct k_spinlock lock;
|
2017-07-07 14:29:30 +02:00
|
|
|
static u8_t max_partitions;
|
|
|
|
|
2018-11-13 16:28:55 +01:00
|
|
|
#if (defined(CONFIG_EXECUTE_XOR_WRITE) || \
|
|
|
|
defined(CONFIG_MPU_REQUIRES_NON_OVERLAPPING_REGIONS)) && __ASSERT_ON
|
2018-02-28 23:11:41 +01:00
|
|
|
static bool sane_partition(const struct k_mem_partition *part,
|
|
|
|
const struct k_mem_partition *parts,
|
|
|
|
u32_t num_parts)
|
|
|
|
{
|
|
|
|
bool exec, write;
|
2018-11-13 17:52:45 +01:00
|
|
|
u32_t last;
|
2018-02-28 23:11:41 +01:00
|
|
|
u32_t i;
|
|
|
|
|
2018-11-13 17:52:45 +01:00
|
|
|
last = part->start + part->size - 1;
|
2018-03-05 17:13:39 +01:00
|
|
|
exec = K_MEM_PARTITION_IS_EXECUTABLE(part->attr);
|
|
|
|
write = K_MEM_PARTITION_IS_WRITABLE(part->attr);
|
2018-02-28 23:11:41 +01:00
|
|
|
|
|
|
|
if (exec && write) {
|
2018-09-18 21:32:27 +02:00
|
|
|
__ASSERT(false,
|
stdint.h: streamline type definitions
Compilers (at least gcc and clang) already provide definitions to
create standard types and their range. For example, __INT16_TYPE__ is
normally defined as a short to be used with the int16_t typedef, and
__INT16_MAX__ is defined as 32767. So it makes sense to rely on them
rather than hardcoding our own, especially for the fast types where
the compiler itself knows what basic type is best.
Using compiler provided definitions makes even more sense when dealing
with 64-bit targets where some types such as intptr_t and size_t must
have a different size and range. Those definitions are then adjusted
by the compiler directly.
However there are two cases for which we should override those
definitions:
* The __INT32_TYPE__ definition on 32-bit targets vary between an int
and a long int depending on the architecture and configuration.
Notably, all compilers shipped with the Zephyr SDK, except for the
i586-zephyr-elfiamcu variant, define __INT32_TYPE__ to a long int.
Whereas, all Linux configurations for gcc, both 32-bit and 64-bit,
always define __INT32_TYPE__ as an int. Having variability here is
not welcome as pointers to a long int and to an int are not deemed
compatible by the compiler, and printing an int32_t defined with a
long using %d makes the compiler to complain, even if they're the
same size on 32-bit targets. Given that an int is always 32 bits
on all targets we might care about, and given that Zephyr hardcoded
int32_t to an int before, then we just redefine __INT32_TYPE__ and
derrivatives to an int to keep the peace in the code.
* The confusion also exists with __INTPTR_TYPE__. Looking again at the
Zephyr SDK, it is defined as an int, even even when __INT32_TYPE__ is
initially a long int. One notable exception is i586-zephyr-elf where
__INTPTR_TYPE__ is a long int even when using -m32. On 64-bit targets
this is always a long int. So let's redefine __INTPTR_TYPE__ to always
be a long int on Zephyr which simplifies the code, works for both
32-bit and 64-bit targets, and mimics what the Linux kernel does.
Only a few print format strings needed adjustment.
In those two cases, there is a safeguard to ensure the type we're
enforcing has the right size and fail the build otherwise.
Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
2019-06-05 18:19:37 +02:00
|
|
|
"partition is writable and executable <start %lx>",
|
2018-02-28 23:11:41 +01:00
|
|
|
part->start);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-11-29 20:13:40 +01:00
|
|
|
for (i = 0U; i < num_parts; i++) {
|
2018-02-28 23:11:41 +01:00
|
|
|
bool cur_write, cur_exec;
|
2018-11-13 17:52:45 +01:00
|
|
|
u32_t cur_last;
|
2018-02-28 23:11:41 +01:00
|
|
|
|
2018-11-13 17:52:45 +01:00
|
|
|
cur_last = parts[i].start + parts[i].size - 1;
|
2018-02-28 23:11:41 +01:00
|
|
|
|
2018-11-13 17:52:45 +01:00
|
|
|
if (last < parts[i].start || cur_last < part->start) {
|
2018-02-28 23:11:41 +01:00
|
|
|
continue;
|
|
|
|
}
|
2018-11-13 16:28:55 +01:00
|
|
|
#if defined(CONFIG_MPU_REQUIRES_NON_OVERLAPPING_REGIONS)
|
|
|
|
/* Partitions overlap */
|
2019-06-26 11:01:50 +02:00
|
|
|
__ASSERT(false, "overlapping partitions <%lx...%x>, <%lx...%x>",
|
2018-11-13 16:28:55 +01:00
|
|
|
part->start, last,
|
|
|
|
parts[i].start, cur_last);
|
|
|
|
return false;
|
|
|
|
#endif
|
2018-02-28 23:11:41 +01:00
|
|
|
|
2018-03-05 17:13:39 +01:00
|
|
|
cur_write = K_MEM_PARTITION_IS_WRITABLE(parts[i].attr);
|
|
|
|
cur_exec = K_MEM_PARTITION_IS_EXECUTABLE(parts[i].attr);
|
2018-02-28 23:11:41 +01:00
|
|
|
|
|
|
|
if ((cur_write && exec) || (cur_exec && write)) {
|
2018-09-18 21:32:27 +02:00
|
|
|
__ASSERT(false, "overlapping partitions are "
|
2018-02-28 23:11:41 +01:00
|
|
|
"writable and executable "
|
stdint.h: streamline type definitions
Compilers (at least gcc and clang) already provide definitions to
create standard types and their range. For example, __INT16_TYPE__ is
normally defined as a short to be used with the int16_t typedef, and
__INT16_MAX__ is defined as 32767. So it makes sense to rely on them
rather than hardcoding our own, especially for the fast types where
the compiler itself knows what basic type is best.
Using compiler provided definitions makes even more sense when dealing
with 64-bit targets where some types such as intptr_t and size_t must
have a different size and range. Those definitions are then adjusted
by the compiler directly.
However there are two cases for which we should override those
definitions:
* The __INT32_TYPE__ definition on 32-bit targets vary between an int
and a long int depending on the architecture and configuration.
Notably, all compilers shipped with the Zephyr SDK, except for the
i586-zephyr-elfiamcu variant, define __INT32_TYPE__ to a long int.
Whereas, all Linux configurations for gcc, both 32-bit and 64-bit,
always define __INT32_TYPE__ as an int. Having variability here is
not welcome as pointers to a long int and to an int are not deemed
compatible by the compiler, and printing an int32_t defined with a
long using %d makes the compiler to complain, even if they're the
same size on 32-bit targets. Given that an int is always 32 bits
on all targets we might care about, and given that Zephyr hardcoded
int32_t to an int before, then we just redefine __INT32_TYPE__ and
derrivatives to an int to keep the peace in the code.
* The confusion also exists with __INTPTR_TYPE__. Looking again at the
Zephyr SDK, it is defined as an int, even even when __INT32_TYPE__ is
initially a long int. One notable exception is i586-zephyr-elf where
__INTPTR_TYPE__ is a long int even when using -m32. On 64-bit targets
this is always a long int. So let's redefine __INTPTR_TYPE__ to always
be a long int on Zephyr which simplifies the code, works for both
32-bit and 64-bit targets, and mimics what the Linux kernel does.
Only a few print format strings needed adjustment.
In those two cases, there is a safeguard to ensure the type we're
enforcing has the right size and fail the build otherwise.
Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
2019-06-05 18:19:37 +02:00
|
|
|
"<%lx...%x>, <%lx...%x>",
|
2018-11-13 17:52:45 +01:00
|
|
|
part->start, last,
|
|
|
|
parts[i].start, cur_last);
|
2018-02-28 23:11:41 +01:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
2017-10-18 02:01:48 +02:00
|
|
|
|
2018-02-28 23:11:41 +01:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool sane_partition_domain(const struct k_mem_domain *domain,
|
|
|
|
const struct k_mem_partition *part)
|
|
|
|
{
|
|
|
|
return sane_partition(part, domain->partitions,
|
|
|
|
domain->num_partitions);
|
|
|
|
}
|
2017-10-18 02:01:48 +02:00
|
|
|
#else
|
2018-02-28 23:11:41 +01:00
|
|
|
#define sane_partition(...) (true)
|
|
|
|
#define sane_partition_domain(...) (true)
|
2017-10-18 02:01:48 +02:00
|
|
|
#endif
|
2017-10-20 20:27:37 +02:00
|
|
|
|
2018-02-28 23:22:57 +01:00
|
|
|
void k_mem_domain_init(struct k_mem_domain *domain, u8_t num_parts,
|
|
|
|
struct k_mem_partition *parts[])
|
2017-07-07 14:29:30 +02:00
|
|
|
{
|
2018-07-24 20:35:55 +02:00
|
|
|
k_spinlock_key_t key;
|
2017-07-07 14:29:30 +02:00
|
|
|
|
2018-02-14 03:02:06 +01:00
|
|
|
__ASSERT(domain != NULL, "");
|
2019-03-27 02:57:45 +01:00
|
|
|
__ASSERT(num_parts == 0U || parts != NULL, "");
|
2017-07-07 14:29:30 +02:00
|
|
|
__ASSERT(num_parts <= max_partitions, "");
|
|
|
|
|
2018-07-24 20:35:55 +02:00
|
|
|
key = k_spin_lock(&lock);
|
2017-07-07 14:29:30 +02:00
|
|
|
|
2019-03-27 02:57:45 +01:00
|
|
|
domain->num_partitions = 0U;
|
2018-09-12 04:09:03 +02:00
|
|
|
(void)memset(domain->partitions, 0, sizeof(domain->partitions));
|
2017-07-07 14:29:30 +02:00
|
|
|
|
2019-03-27 02:57:45 +01:00
|
|
|
if (num_parts != 0U) {
|
2017-07-07 14:29:30 +02:00
|
|
|
u32_t i;
|
|
|
|
|
2018-11-29 20:13:40 +01:00
|
|
|
for (i = 0U; i < num_parts; i++) {
|
2018-09-18 21:40:54 +02:00
|
|
|
__ASSERT(parts[i] != NULL, "");
|
2018-02-28 23:11:41 +01:00
|
|
|
__ASSERT((parts[i]->start + parts[i]->size) >
|
2019-03-02 03:12:49 +01:00
|
|
|
parts[i]->start,
|
|
|
|
"invalid partition %p size %d",
|
|
|
|
parts[i], parts[i]->size);
|
2017-10-18 02:01:48 +02:00
|
|
|
|
2018-11-13 16:28:55 +01:00
|
|
|
#if defined(CONFIG_EXECUTE_XOR_WRITE) || \
|
|
|
|
defined(CONFIG_MPU_REQUIRES_NON_OVERLAPPING_REGIONS)
|
2018-02-28 23:11:41 +01:00
|
|
|
__ASSERT(sane_partition_domain(domain,
|
2018-11-13 18:00:25 +01:00
|
|
|
parts[i]),
|
2018-02-28 23:11:41 +01:00
|
|
|
"");
|
|
|
|
#endif
|
2018-11-13 18:00:25 +01:00
|
|
|
domain->partitions[i] = *parts[i];
|
|
|
|
domain->num_partitions++;
|
|
|
|
}
|
2017-07-07 14:29:30 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
sys_dlist_init(&domain->mem_domain_q);
|
|
|
|
|
2018-07-24 20:35:55 +02:00
|
|
|
k_spin_unlock(&lock, key);
|
2017-07-07 14:29:30 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void k_mem_domain_destroy(struct k_mem_domain *domain)
|
|
|
|
{
|
2018-07-24 20:35:55 +02:00
|
|
|
k_spinlock_key_t key;
|
2017-07-07 14:29:30 +02:00
|
|
|
sys_dnode_t *node, *next_node;
|
|
|
|
|
2018-02-14 03:02:06 +01:00
|
|
|
__ASSERT(domain != NULL, "");
|
2017-07-07 14:29:30 +02:00
|
|
|
|
2018-07-24 20:35:55 +02:00
|
|
|
key = k_spin_lock(&lock);
|
2017-07-07 14:29:30 +02:00
|
|
|
|
2018-10-29 12:55:41 +01:00
|
|
|
/* Handle architecture-specific destroy
|
|
|
|
* only if it is the current thread.
|
|
|
|
*/
|
2017-10-09 08:07:31 +02:00
|
|
|
if (_current->mem_domain_info.mem_domain == domain) {
|
2019-03-08 22:19:05 +01:00
|
|
|
z_arch_mem_domain_destroy(domain);
|
2017-10-09 08:07:31 +02:00
|
|
|
}
|
|
|
|
|
2017-07-07 14:29:30 +02:00
|
|
|
SYS_DLIST_FOR_EACH_NODE_SAFE(&domain->mem_domain_q, node, next_node) {
|
|
|
|
struct k_thread *thread =
|
|
|
|
CONTAINER_OF(node, struct k_thread, mem_domain_info);
|
|
|
|
|
|
|
|
sys_dlist_remove(&thread->mem_domain_info.mem_domain_q_node);
|
|
|
|
thread->mem_domain_info.mem_domain = NULL;
|
|
|
|
}
|
|
|
|
|
2018-07-24 20:35:55 +02:00
|
|
|
k_spin_unlock(&lock, key);
|
2017-07-07 14:29:30 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void k_mem_domain_add_partition(struct k_mem_domain *domain,
|
2018-02-28 23:11:41 +01:00
|
|
|
struct k_mem_partition *part)
|
2017-07-07 14:29:30 +02:00
|
|
|
{
|
|
|
|
int p_idx;
|
2018-07-24 20:35:55 +02:00
|
|
|
k_spinlock_key_t key;
|
2017-07-07 14:29:30 +02:00
|
|
|
|
2018-02-14 02:57:48 +01:00
|
|
|
__ASSERT(domain != NULL, "");
|
|
|
|
__ASSERT(part != NULL, "");
|
2019-03-02 03:12:49 +01:00
|
|
|
__ASSERT((part->start + part->size) > part->start,
|
|
|
|
"invalid partition %p size %d", part, part->size);
|
2017-07-07 14:29:30 +02:00
|
|
|
|
2018-11-13 16:28:55 +01:00
|
|
|
#if defined(CONFIG_EXECUTE_XOR_WRITE) || \
|
|
|
|
defined(CONFIG_MPU_REQUIRES_NON_OVERLAPPING_REGIONS)
|
2018-02-28 23:11:41 +01:00
|
|
|
__ASSERT(sane_partition_domain(domain, part), "");
|
|
|
|
#endif
|
2017-10-18 02:01:48 +02:00
|
|
|
|
2018-07-24 20:35:55 +02:00
|
|
|
key = k_spin_lock(&lock);
|
2017-07-07 14:29:30 +02:00
|
|
|
|
|
|
|
for (p_idx = 0; p_idx < max_partitions; p_idx++) {
|
|
|
|
/* A zero-sized partition denotes it's a free partition */
|
2019-03-27 02:57:45 +01:00
|
|
|
if (domain->partitions[p_idx].size == 0U) {
|
2017-07-07 14:29:30 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Assert if there is no free partition */
|
|
|
|
__ASSERT(p_idx < max_partitions, "");
|
|
|
|
|
|
|
|
domain->partitions[p_idx].start = part->start;
|
|
|
|
domain->partitions[p_idx].size = part->size;
|
|
|
|
domain->partitions[p_idx].attr = part->attr;
|
|
|
|
|
|
|
|
domain->num_partitions++;
|
|
|
|
|
2019-05-02 10:00:30 +02:00
|
|
|
/* Handle architecture-specific add
|
2019-03-02 00:58:14 +01:00
|
|
|
* only if it is the current thread.
|
|
|
|
*/
|
|
|
|
if (_current->mem_domain_info.mem_domain == domain) {
|
2019-05-02 10:00:30 +02:00
|
|
|
z_arch_mem_domain_partition_add(domain, p_idx);
|
2019-03-02 00:58:14 +01:00
|
|
|
}
|
|
|
|
|
2018-07-24 20:35:55 +02:00
|
|
|
k_spin_unlock(&lock, key);
|
2017-07-07 14:29:30 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void k_mem_domain_remove_partition(struct k_mem_domain *domain,
|
|
|
|
struct k_mem_partition *part)
|
|
|
|
{
|
|
|
|
int p_idx;
|
2018-07-24 20:35:55 +02:00
|
|
|
k_spinlock_key_t key;
|
2017-07-07 14:29:30 +02:00
|
|
|
|
2018-02-14 03:02:06 +01:00
|
|
|
__ASSERT(domain != NULL, "");
|
|
|
|
__ASSERT(part != NULL, "");
|
2017-07-07 14:29:30 +02:00
|
|
|
|
2018-07-24 20:35:55 +02:00
|
|
|
key = k_spin_lock(&lock);
|
2017-07-07 14:29:30 +02:00
|
|
|
|
|
|
|
/* find a partition that matches the given start and size */
|
|
|
|
for (p_idx = 0; p_idx < max_partitions; p_idx++) {
|
|
|
|
if (domain->partitions[p_idx].start == part->start &&
|
|
|
|
domain->partitions[p_idx].size == part->size) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Assert if not found */
|
2019-03-02 03:12:49 +01:00
|
|
|
__ASSERT(p_idx < max_partitions, "no matching partition found");
|
2017-07-07 14:29:30 +02:00
|
|
|
|
2018-10-29 12:55:41 +01:00
|
|
|
/* Handle architecture-specific remove
|
|
|
|
* only if it is the current thread.
|
|
|
|
*/
|
2017-10-09 08:07:31 +02:00
|
|
|
if (_current->mem_domain_info.mem_domain == domain) {
|
2019-03-08 22:19:05 +01:00
|
|
|
z_arch_mem_domain_partition_remove(domain, p_idx);
|
2017-10-09 08:07:31 +02:00
|
|
|
}
|
|
|
|
|
2018-12-03 14:21:54 +01:00
|
|
|
/* A zero-sized partition denotes it's a free partition */
|
2019-03-27 02:57:45 +01:00
|
|
|
domain->partitions[p_idx].size = 0U;
|
2017-07-07 14:29:30 +02:00
|
|
|
|
|
|
|
domain->num_partitions--;
|
|
|
|
|
2018-07-24 20:35:55 +02:00
|
|
|
k_spin_unlock(&lock, key);
|
2017-07-07 14:29:30 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void k_mem_domain_add_thread(struct k_mem_domain *domain, k_tid_t thread)
|
|
|
|
{
|
2018-07-24 20:35:55 +02:00
|
|
|
k_spinlock_key_t key;
|
2017-07-07 14:29:30 +02:00
|
|
|
|
2018-02-14 03:02:06 +01:00
|
|
|
__ASSERT(domain != NULL, "");
|
|
|
|
__ASSERT(thread != NULL, "");
|
|
|
|
__ASSERT(thread->mem_domain_info.mem_domain == NULL,
|
|
|
|
"mem domain unset");
|
2017-07-07 14:29:30 +02:00
|
|
|
|
2018-07-24 20:35:55 +02:00
|
|
|
key = k_spin_lock(&lock);
|
2017-07-07 14:29:30 +02:00
|
|
|
|
|
|
|
sys_dlist_append(&domain->mem_domain_q,
|
|
|
|
&thread->mem_domain_info.mem_domain_q_node);
|
|
|
|
thread->mem_domain_info.mem_domain = domain;
|
|
|
|
|
2017-12-06 12:18:28 +01:00
|
|
|
if (_current == thread) {
|
2019-03-08 22:19:05 +01:00
|
|
|
z_arch_mem_domain_configure(thread);
|
2017-12-06 12:18:28 +01:00
|
|
|
}
|
|
|
|
|
2018-07-24 20:35:55 +02:00
|
|
|
k_spin_unlock(&lock, key);
|
2017-07-07 14:29:30 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void k_mem_domain_remove_thread(k_tid_t thread)
|
|
|
|
{
|
2018-07-24 20:35:55 +02:00
|
|
|
k_spinlock_key_t key;
|
2017-07-07 14:29:30 +02:00
|
|
|
|
2018-02-14 03:02:06 +01:00
|
|
|
__ASSERT(thread != NULL, "");
|
|
|
|
__ASSERT(thread->mem_domain_info.mem_domain != NULL, "mem domain set");
|
2017-07-07 14:29:30 +02:00
|
|
|
|
2018-07-24 20:35:55 +02:00
|
|
|
key = k_spin_lock(&lock);
|
2017-11-20 09:10:14 +01:00
|
|
|
if (_current == thread) {
|
2019-03-08 22:19:05 +01:00
|
|
|
z_arch_mem_domain_destroy(thread->mem_domain_info.mem_domain);
|
2017-11-20 09:10:14 +01:00
|
|
|
}
|
2017-07-07 14:29:30 +02:00
|
|
|
|
|
|
|
sys_dlist_remove(&thread->mem_domain_info.mem_domain_q_node);
|
|
|
|
thread->mem_domain_info.mem_domain = NULL;
|
|
|
|
|
2018-07-24 20:35:55 +02:00
|
|
|
k_spin_unlock(&lock, key);
|
2017-07-07 14:29:30 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static int init_mem_domain_module(struct device *arg)
|
|
|
|
{
|
|
|
|
ARG_UNUSED(arg);
|
|
|
|
|
2019-03-08 22:19:05 +01:00
|
|
|
max_partitions = z_arch_mem_domain_max_partitions_get();
|
2017-07-07 14:29:30 +02:00
|
|
|
/*
|
|
|
|
* max_partitions must be less than or equal to
|
|
|
|
* CONFIG_MAX_DOMAIN_PARTITIONS, or would encounter array index
|
|
|
|
* out of bounds error.
|
|
|
|
*/
|
|
|
|
__ASSERT(max_partitions <= CONFIG_MAX_DOMAIN_PARTITIONS, "");
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
SYS_INIT(init_mem_domain_module, PRE_KERNEL_1,
|
|
|
|
CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
|