xtensa: move to use system cache API support for coherency

Remove custom implementation and use system cache interface instead.

Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
Anas Nashif 2024-01-24 21:20:26 -05:00
parent 9183ceaf91
commit d7678f1694
19 changed files with 218 additions and 218 deletions

View file

@ -8,11 +8,12 @@
#include <zephyr/arch/xtensa/arch.h> #include <zephyr/arch/xtensa/arch.h>
#include <zephyr/arch/xtensa/cache.h> #include <zephyr/arch/xtensa/cache.h>
#include <zephyr/kernel/mm.h> #include <zephyr/kernel/mm.h>
#include <zephyr/cache.h>
__weak bool sys_mm_is_phys_addr_in_range(uintptr_t phys) __weak bool sys_mm_is_phys_addr_in_range(uintptr_t phys)
{ {
bool valid; bool valid;
uintptr_t cached = (uintptr_t)arch_xtensa_cached_ptr((void *)phys); uintptr_t cached = (uintptr_t)sys_cache_cached_ptr_get((void *)phys);
valid = ((phys >= CONFIG_SRAM_BASE_ADDRESS) && valid = ((phys >= CONFIG_SRAM_BASE_ADDRESS) &&
(phys < (CONFIG_SRAM_BASE_ADDRESS + (CONFIG_SRAM_SIZE * 1024UL)))); (phys < (CONFIG_SRAM_BASE_ADDRESS + (CONFIG_SRAM_SIZE * 1024UL))));
@ -28,7 +29,7 @@ __weak bool sys_mm_is_virt_addr_in_range(void *virt)
bool valid; bool valid;
uintptr_t addr = (uintptr_t)virt; uintptr_t addr = (uintptr_t)virt;
uintptr_t cached = (uintptr_t)arch_xtensa_cached_ptr(virt); uintptr_t cached = (uintptr_t)sys_cache_cached_ptr_get(virt);
valid = ((addr >= CONFIG_KERNEL_VM_BASE) && valid = ((addr >= CONFIG_KERNEL_VM_BASE) &&
(addr < (CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_SIZE))); (addr < (CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_SIZE)));

View file

@ -255,13 +255,13 @@ static void map_memory(const uint32_t start, const uint32_t end,
map_memory_range(start, end, attrs, shared); map_memory_range(start, end, attrs, shared);
#ifdef CONFIG_XTENSA_MMU_DOUBLE_MAP #ifdef CONFIG_XTENSA_MMU_DOUBLE_MAP
if (arch_xtensa_is_ptr_uncached((void *)start)) { if (sys_cache_is_ptr_uncached((void *)start)) {
map_memory_range(POINTER_TO_UINT(z_soc_cached_ptr((void *)start)), map_memory_range(POINTER_TO_UINT(sys_cache_cached_ptr_get((void *)start)),
POINTER_TO_UINT(z_soc_cached_ptr((void *)end)), POINTER_TO_UINT(sys_cache_cached_ptr_get((void *)end)),
attrs | XTENSA_MMU_CACHED_WB, shared); attrs | XTENSA_MMU_CACHED_WB, shared);
} else if (arch_xtensa_is_ptr_cached((void *)start)) { } else if (sys_cache_is_ptr_cached((void *)start)) {
map_memory_range(POINTER_TO_UINT(z_soc_uncached_ptr((void *)start)), map_memory_range(POINTER_TO_UINT(sys_cache_uncached_ptr_get((void *)start)),
POINTER_TO_UINT(z_soc_uncached_ptr((void *)end)), attrs, shared); POINTER_TO_UINT(sys_cache_uncached_ptr_get((void *)end)), attrs, shared);
} }
#endif #endif
} }
@ -413,19 +413,19 @@ static inline void __arch_mem_map(void *va, uintptr_t pa, uint32_t xtensa_flags,
uint32_t flags, flags_uc; uint32_t flags, flags_uc;
if (IS_ENABLED(CONFIG_XTENSA_MMU_DOUBLE_MAP)) { if (IS_ENABLED(CONFIG_XTENSA_MMU_DOUBLE_MAP)) {
if (arch_xtensa_is_ptr_cached(va)) { if (sys_cache_is_ptr_cached(va)) {
vaddr = va; vaddr = va;
vaddr_uc = arch_xtensa_uncached_ptr(va); vaddr_uc = sys_cache_uncached_ptr_get(va);
} else { } else {
vaddr = arch_xtensa_cached_ptr(va); vaddr = sys_cache_cached_ptr_get(va);
vaddr_uc = va; vaddr_uc = va;
} }
if (arch_xtensa_is_ptr_cached((void *)pa)) { if (sys_cache_is_ptr_cached((void *)pa)) {
paddr = pa; paddr = pa;
paddr_uc = (uintptr_t)arch_xtensa_uncached_ptr((void *)pa); paddr_uc = (uintptr_t)sys_cache_uncached_ptr_get((void *)pa);
} else { } else {
paddr = (uintptr_t)arch_xtensa_cached_ptr((void *)pa); paddr = (uintptr_t)sys_cache_cached_ptr_get((void *)pa);
paddr_uc = pa; paddr_uc = pa;
} }
@ -588,11 +588,11 @@ static inline void __arch_mem_unmap(void *va)
void *vaddr, *vaddr_uc; void *vaddr, *vaddr_uc;
if (IS_ENABLED(CONFIG_XTENSA_MMU_DOUBLE_MAP)) { if (IS_ENABLED(CONFIG_XTENSA_MMU_DOUBLE_MAP)) {
if (arch_xtensa_is_ptr_cached(va)) { if (sys_cache_is_ptr_cached(va)) {
vaddr = va; vaddr = va;
vaddr_uc = arch_xtensa_uncached_ptr(va); vaddr_uc = sys_cache_uncached_ptr_get(va);
} else { } else {
vaddr = arch_xtensa_cached_ptr(va); vaddr = sys_cache_cached_ptr_get(va);
vaddr_uc = va; vaddr_uc = va;
} }
} else { } else {
@ -866,11 +866,11 @@ static inline int update_region(uint32_t *ptables, uintptr_t start,
uintptr_t va, va_uc; uintptr_t va, va_uc;
uint32_t new_flags, new_flags_uc; uint32_t new_flags, new_flags_uc;
if (arch_xtensa_is_ptr_cached((void *)start)) { if (sys_cache_is_ptr_cached((void *)start)) {
va = start; va = start;
va_uc = (uintptr_t)arch_xtensa_uncached_ptr((void *)start); va_uc = (uintptr_t)sys_cache_uncached_ptr_get((void *)start);
} else { } else {
va = (uintptr_t)arch_xtensa_cached_ptr((void *)start); va = (uintptr_t)sys_cache_cached_ptr_get((void *)start);
va_uc = start; va_uc = start;
} }

View file

@ -8,10 +8,10 @@
#include <zephyr/kernel.h> #include <zephyr/kernel.h>
#include <zephyr/sys/winstream.h> #include <zephyr/sys/winstream.h>
#include <zephyr/devicetree.h> #include <zephyr/devicetree.h>
#include <zephyr/cache.h>
#include <adsp_memory.h> #include <adsp_memory.h>
#include <mem_window.h> #include <mem_window.h>
#include <soc.h>
struct k_spinlock trace_lock; struct k_spinlock trace_lock;
@ -78,7 +78,7 @@ static int winstream_console_init(void)
} }
const struct mem_win_config *config = dev->config; const struct mem_win_config *config = dev->config;
void *buf = void *buf =
arch_xtensa_uncached_ptr((__sparse_force void __sparse_cache *)config->mem_base); sys_cache_uncached_ptr_get((__sparse_force void __sparse_cache *)config->mem_base);
winstream = sys_winstream_init(buf, config->size); winstream = sys_winstream_init(buf, config->size);
winstream_console_hook_install(); winstream_console_hook_install();

View file

@ -7,6 +7,7 @@
#include <adsp_shim.h> #include <adsp_shim.h>
#include <intel_adsp_ipc.h> #include <intel_adsp_ipc.h>
#include <mem_window.h> #include <mem_window.h>
#include <zephyr/cache.h>
/* Matches SOF_IPC_MSG_MAX_SIZE, though in practice nothing anywhere /* Matches SOF_IPC_MSG_MAX_SIZE, though in practice nothing anywhere
* near that big is ever sent. Should maybe consider making this a * near that big is ever sent. Should maybe consider making this a
@ -49,8 +50,9 @@ static int send(const struct device *dev, int wait, uint32_t id,
return -ENODEV; return -ENODEV;
} }
const struct mem_win_config *mw0_config = mw0->config; const struct mem_win_config *mw0_config = mw0->config;
uint32_t *buf = (uint32_t *)arch_xtensa_uncached_ptr((void *)((uint32_t)mw0_config->mem_base uint32_t *buf = (uint32_t *)sys_cache_uncached_ptr_get(
+ CONFIG_IPM_CAVS_HOST_OUTBOX_OFFSET)); (void *)((uint32_t)mw0_config->mem_base
+ CONFIG_IPM_CAVS_HOST_OUTBOX_OFFSET));
if (!intel_adsp_ipc_is_complete(INTEL_ADSP_IPC_HOST_DEV)) { if (!intel_adsp_ipc_is_complete(INTEL_ADSP_IPC_HOST_DEV)) {
return -EBUSY; return -EBUSY;
@ -108,7 +110,7 @@ static bool ipc_handler(const struct device *dev, void *arg,
return -ENODEV; return -ENODEV;
} }
const struct mem_win_config *mw1_config = mw1->config; const struct mem_win_config *mw1_config = mw1->config;
uint32_t *msg = arch_xtensa_uncached_ptr((void *)mw1_config->mem_base); uint32_t *msg = sys_cache_uncached_ptr_get((void *)mw1_config->mem_base);
/* We play tricks to leave one word available before the /* We play tricks to leave one word available before the
* beginning of the SRAM window, this way the host can see the * beginning of the SRAM window, this way the host can see the

View file

@ -16,7 +16,7 @@
* Note that all passed in addresses should be in cached range * Note that all passed in addresses should be in cached range
* (aka cached addresses). Due to the need to calculate TLB * (aka cached addresses). Due to the need to calculate TLB
* indexes, virtual addresses will be converted internally to * indexes, virtual addresses will be converted internally to
* cached one via z_soc_cached_ptr(). However, physical addresses * cached one via sys_cache_cached_ptr_get(). However, physical addresses
* are untouched. * are untouched.
*/ */
@ -183,8 +183,8 @@ int sys_mm_drv_map_page(void *virt, uintptr_t phys, uint32_t flags)
* the cached physical address is needed to perform * the cached physical address is needed to perform
* bound check. * bound check.
*/ */
uintptr_t pa = POINTER_TO_UINT(z_soc_cached_ptr(UINT_TO_POINTER(phys))); uintptr_t pa = POINTER_TO_UINT(sys_cache_cached_ptr_get(UINT_TO_POINTER(phys)));
uintptr_t va = POINTER_TO_UINT(z_soc_cached_ptr(virt)); uintptr_t va = POINTER_TO_UINT(sys_cache_cached_ptr_get(virt));
ARG_UNUSED(flags); ARG_UNUSED(flags);
@ -215,7 +215,7 @@ int sys_mm_drv_map_page(void *virt, uintptr_t phys, uint32_t flags)
"unable to assign free phys page %d\n", ret); "unable to assign free phys page %d\n", ret);
goto out; goto out;
} }
pa = POINTER_TO_UINT(z_soc_cached_ptr(phys_block_ptr)); pa = POINTER_TO_UINT(sys_cache_cached_ptr_get(phys_block_ptr));
} }
/* Check bounds of physical address space */ /* Check bounds of physical address space */
@ -296,7 +296,7 @@ int sys_mm_drv_map_region(void *virt, uintptr_t phys,
goto out; goto out;
} }
va = (__sparse_force uint8_t *)z_soc_cached_ptr(virt); va = (__sparse_force uint8_t *)sys_cache_cached_ptr_get(virt);
pa = phys; pa = phys;
key = k_spin_lock(&sys_mm_drv_common_lock); key = k_spin_lock(&sys_mm_drv_common_lock);
@ -324,7 +324,7 @@ out:
int sys_mm_drv_map_array(void *virt, uintptr_t *phys, int sys_mm_drv_map_array(void *virt, uintptr_t *phys,
size_t cnt, uint32_t flags) size_t cnt, uint32_t flags)
{ {
void *va = (__sparse_force void *)z_soc_cached_ptr(virt); void *va = (__sparse_force void *)sys_cache_cached_ptr_get(virt);
return sys_mm_drv_simple_map_array(va, phys, cnt, flags); return sys_mm_drv_simple_map_array(va, phys, cnt, flags);
} }
@ -339,7 +339,7 @@ int sys_mm_drv_unmap_page(void *virt)
int ret = 0; int ret = 0;
/* Use cached virtual address */ /* Use cached virtual address */
uintptr_t va = POINTER_TO_UINT(z_soc_cached_ptr(virt)); uintptr_t va = POINTER_TO_UINT(sys_cache_cached_ptr_get(virt));
/* Check bounds of virtual address space */ /* Check bounds of virtual address space */
CHECKIF((va < UNUSED_L2_START_ALIGNED) || CHECKIF((va < UNUSED_L2_START_ALIGNED) ||
@ -396,7 +396,7 @@ out:
int sys_mm_drv_unmap_region(void *virt, size_t size) int sys_mm_drv_unmap_region(void *virt, size_t size)
{ {
void *va = (__sparse_force void *)z_soc_cached_ptr(virt); void *va = (__sparse_force void *)sys_cache_cached_ptr_get(virt);
return sys_mm_drv_simple_unmap_region(va, size); return sys_mm_drv_simple_unmap_region(va, size);
} }
@ -408,7 +408,7 @@ int sys_mm_drv_page_phys_get(void *virt, uintptr_t *phys)
int ret = 0; int ret = 0;
/* Use cached address */ /* Use cached address */
uintptr_t va = POINTER_TO_UINT(z_soc_cached_ptr(virt)); uintptr_t va = POINTER_TO_UINT(sys_cache_cached_ptr_get(virt));
CHECKIF(!sys_mm_drv_is_addr_aligned(va)) { CHECKIF(!sys_mm_drv_is_addr_aligned(va)) {
ret = -EINVAL; ret = -EINVAL;
@ -449,7 +449,7 @@ int sys_mm_drv_page_flag_get(void *virt, uint32_t *flags)
uint16_t ent; uint16_t ent;
/* Use cached address */ /* Use cached address */
uintptr_t va = POINTER_TO_UINT(z_soc_cached_ptr(virt)); uintptr_t va = POINTER_TO_UINT(sys_cache_cached_ptr_get(virt));
CHECKIF(!sys_mm_drv_is_addr_aligned(va)) { CHECKIF(!sys_mm_drv_is_addr_aligned(va)) {
ret = -EINVAL; ret = -EINVAL;
@ -487,8 +487,8 @@ out:
int sys_mm_drv_remap_region(void *virt_old, size_t size, int sys_mm_drv_remap_region(void *virt_old, size_t size,
void *virt_new) void *virt_new)
{ {
void *va_new = (__sparse_force void *)z_soc_cached_ptr(virt_new); void *va_new = (__sparse_force void *)sys_cache_cached_ptr_get(virt_new);
void *va_old = (__sparse_force void *)z_soc_cached_ptr(virt_old); void *va_old = (__sparse_force void *)sys_cache_cached_ptr_get(virt_old);
return sys_mm_drv_simple_remap_region(va_old, size, va_new); return sys_mm_drv_simple_remap_region(va_old, size, va_new);
} }
@ -500,8 +500,8 @@ int sys_mm_drv_move_region(void *virt_old, size_t size, void *virt_new,
size_t offset; size_t offset;
int ret = 0; int ret = 0;
virt_new = (__sparse_force void *)z_soc_cached_ptr(virt_new); virt_new = (__sparse_force void *)sys_cache_cached_ptr_get(virt_new);
virt_old = (__sparse_force void *)z_soc_cached_ptr(virt_old); virt_old = (__sparse_force void *)sys_cache_cached_ptr_get(virt_old);
CHECKIF(!sys_mm_drv_is_virt_addr_aligned(virt_old) || CHECKIF(!sys_mm_drv_is_virt_addr_aligned(virt_old) ||
!sys_mm_drv_is_virt_addr_aligned(virt_new) || !sys_mm_drv_is_virt_addr_aligned(virt_new) ||
@ -598,8 +598,8 @@ int sys_mm_drv_move_array(void *virt_old, size_t size, void *virt_new,
{ {
int ret; int ret;
void *va_new = (__sparse_force void *)z_soc_cached_ptr(virt_new); void *va_new = (__sparse_force void *)sys_cache_cached_ptr_get(virt_new);
void *va_old = (__sparse_force void *)z_soc_cached_ptr(virt_old); void *va_old = (__sparse_force void *)sys_cache_cached_ptr_get(virt_old);
ret = sys_mm_drv_simple_move_array(va_old, size, va_new, ret = sys_mm_drv_simple_move_array(va_old, size, va_new,
phys_new, phys_cnt); phys_new, phys_cnt);
@ -783,7 +783,7 @@ __imr void adsp_mm_restore_context(void *storage_buffer)
while (phys_addr != 0) { while (phys_addr != 0) {
uint32_t phys_addr_uncached = uint32_t phys_addr_uncached =
POINTER_TO_UINT(z_soc_uncached_ptr( POINTER_TO_UINT(sys_cache_uncached_ptr_get(
(void __sparse_cache *)UINT_TO_POINTER(phys_addr))); (void __sparse_cache *)UINT_TO_POINTER(phys_addr)));
uint32_t phys_offset = phys_addr - L2_SRAM_BASE; uint32_t phys_offset = phys_addr - L2_SRAM_BASE;
uint32_t bank_idx = (phys_offset / SRAM_BANK_SIZE); uint32_t bank_idx = (phys_offset / SRAM_BANK_SIZE);

View file

@ -16,7 +16,7 @@
* Note that all passed in addresses should be in cached range * Note that all passed in addresses should be in cached range
* (aka cached addresses). Due to the need to calculate TLB * (aka cached addresses). Due to the need to calculate TLB
* indexes, virtual addresses will be converted internally to * indexes, virtual addresses will be converted internally to
* cached one via z_soc_cached_ptr(). However, physical addresses * cached one via sys_cache_cached_ptr_get(). However, physical addresses
* are untouched. * are untouched.
*/ */
@ -32,7 +32,6 @@
#include <zephyr/debug/sparse.h> #include <zephyr/debug/sparse.h>
#include <zephyr/cache.h> #include <zephyr/cache.h>
#include <soc.h>
#include <adsp_memory.h> #include <adsp_memory.h>
#include <zephyr/drivers/mm/system_mm.h> #include <zephyr/drivers/mm/system_mm.h>
@ -80,8 +79,8 @@ int sys_mm_drv_map_page(void *virt, uintptr_t phys, uint32_t flags)
* the cached physical address is needed to perform * the cached physical address is needed to perform
* bound check. * bound check.
*/ */
uintptr_t pa = POINTER_TO_UINT(z_soc_cached_ptr(UINT_TO_POINTER(phys))); uintptr_t pa = POINTER_TO_UINT(sys_cache_cached_ptr_get(UINT_TO_POINTER(phys)));
uintptr_t va = POINTER_TO_UINT(z_soc_cached_ptr(virt)); uintptr_t va = POINTER_TO_UINT(sys_cache_cached_ptr_get(virt));
ARG_UNUSED(flags); ARG_UNUSED(flags);
@ -145,7 +144,7 @@ out:
int sys_mm_drv_map_region(void *virt, uintptr_t phys, int sys_mm_drv_map_region(void *virt, uintptr_t phys,
size_t size, uint32_t flags) size_t size, uint32_t flags)
{ {
void *va = (__sparse_force void *)z_soc_cached_ptr(virt); void *va = (__sparse_force void *)sys_cache_cached_ptr_get(virt);
return sys_mm_drv_simple_map_region(va, phys, size, flags); return sys_mm_drv_simple_map_region(va, phys, size, flags);
} }
@ -153,7 +152,7 @@ int sys_mm_drv_map_region(void *virt, uintptr_t phys,
int sys_mm_drv_map_array(void *virt, uintptr_t *phys, int sys_mm_drv_map_array(void *virt, uintptr_t *phys,
size_t cnt, uint32_t flags) size_t cnt, uint32_t flags)
{ {
void *va = (__sparse_force void *)z_soc_cached_ptr(virt); void *va = (__sparse_force void *)sys_cache_cached_ptr_get(virt);
return sys_mm_drv_simple_map_array(va, phys, cnt, flags); return sys_mm_drv_simple_map_array(va, phys, cnt, flags);
} }
@ -166,7 +165,7 @@ int sys_mm_drv_unmap_page(void *virt)
int ret = 0; int ret = 0;
/* Use cached virtual address */ /* Use cached virtual address */
uintptr_t va = POINTER_TO_UINT(z_soc_cached_ptr(virt)); uintptr_t va = POINTER_TO_UINT(sys_cache_cached_ptr_get(virt));
/* Check bounds of virtual address space */ /* Check bounds of virtual address space */
CHECKIF((va < CONFIG_KERNEL_VM_BASE) || CHECKIF((va < CONFIG_KERNEL_VM_BASE) ||
@ -202,7 +201,7 @@ out:
int sys_mm_drv_unmap_region(void *virt, size_t size) int sys_mm_drv_unmap_region(void *virt, size_t size)
{ {
void *va = (__sparse_force void *)z_soc_cached_ptr(virt); void *va = (__sparse_force void *)sys_cache_cached_ptr_get(virt);
return sys_mm_drv_simple_unmap_region(va, size); return sys_mm_drv_simple_unmap_region(va, size);
} }
@ -214,7 +213,7 @@ int sys_mm_drv_page_phys_get(void *virt, uintptr_t *phys)
int ret = 0; int ret = 0;
/* Use cached address */ /* Use cached address */
uintptr_t va = POINTER_TO_UINT(z_soc_cached_ptr(virt)); uintptr_t va = POINTER_TO_UINT(sys_cache_cached_ptr_get(virt));
CHECKIF(!sys_mm_drv_is_addr_aligned(va)) { CHECKIF(!sys_mm_drv_is_addr_aligned(va)) {
ret = -EINVAL; ret = -EINVAL;
@ -274,7 +273,7 @@ int sys_mm_drv_update_page_flags(void *virt, uint32_t flags)
int sys_mm_drv_update_region_flags(void *virt, size_t size, int sys_mm_drv_update_region_flags(void *virt, size_t size,
uint32_t flags) uint32_t flags)
{ {
void *va = (__sparse_force void *)z_soc_cached_ptr(virt); void *va = (__sparse_force void *)sys_cache_cached_ptr_get(virt);
return sys_mm_drv_simple_update_region_flags(va, size, flags); return sys_mm_drv_simple_update_region_flags(va, size, flags);
} }
@ -283,8 +282,8 @@ int sys_mm_drv_update_region_flags(void *virt, size_t size,
int sys_mm_drv_remap_region(void *virt_old, size_t size, int sys_mm_drv_remap_region(void *virt_old, size_t size,
void *virt_new) void *virt_new)
{ {
void *va_new = (__sparse_force void *)z_soc_cached_ptr(virt_new); void *va_new = (__sparse_force void *)sys_cache_cached_ptr_get(virt_new);
void *va_old = (__sparse_force void *)z_soc_cached_ptr(virt_old); void *va_old = (__sparse_force void *)sys_cache_cached_ptr_get(virt_old);
return sys_mm_drv_simple_remap_region(va_old, size, va_new); return sys_mm_drv_simple_remap_region(va_old, size, va_new);
} }
@ -294,8 +293,8 @@ int sys_mm_drv_move_region(void *virt_old, size_t size, void *virt_new,
{ {
int ret; int ret;
void *va_new = (__sparse_force void *)z_soc_cached_ptr(virt_new); void *va_new = (__sparse_force void *)sys_cache_cached_ptr_get(virt_new);
void *va_old = (__sparse_force void *)z_soc_cached_ptr(virt_old); void *va_old = (__sparse_force void *)sys_cache_cached_ptr_get(virt_old);
ret = sys_mm_drv_simple_move_region(va_old, size, va_new, phys_new); ret = sys_mm_drv_simple_move_region(va_old, size, va_new, phys_new);
@ -314,8 +313,8 @@ int sys_mm_drv_move_array(void *virt_old, size_t size, void *virt_new,
{ {
int ret; int ret;
void *va_new = (__sparse_force void *)z_soc_cached_ptr(virt_new); void *va_new = (__sparse_force void *)sys_cache_cached_ptr_get(virt_new);
void *va_old = (__sparse_force void *)z_soc_cached_ptr(virt_old); void *va_old = (__sparse_force void *)sys_cache_cached_ptr_get(virt_old);
ret = sys_mm_drv_simple_move_array(va_old, size, va_new, ret = sys_mm_drv_simple_move_array(va_old, size, va_new,
phys_new, phys_cnt); phys_new, phys_cnt);

View file

@ -160,118 +160,6 @@ static inline bool arch_mem_coherent(void *ptr)
} }
#endif #endif
/**
* @brief Test if a pointer is in cached region.
*
* Some hardware may map the same physical memory twice
* so that it can be seen in both (incoherent) cached mappings
* and a coherent "shared" area. This tests if a particular
* pointer is within the cached, coherent area.
*
* @param ptr Pointer
*
* @retval True if pointer is in cached region.
* @retval False if pointer is not in cached region.
*/
static inline bool arch_xtensa_is_ptr_cached(void *ptr)
{
size_t addr = (size_t) ptr;
return (addr >> 29) == CONFIG_XTENSA_CACHED_REGION;
}
/**
* @brief Test if a pointer is in un-cached region.
*
* Some hardware may map the same physical memory twice
* so that it can be seen in both (incoherent) cached mappings
* and a coherent "shared" area. This tests if a particular
* pointer is within the un-cached, incoherent area.
*
* @param ptr Pointer
*
* @retval True if pointer is not in cached region.
* @retval False if pointer is in cached region.
*/
static inline bool arch_xtensa_is_ptr_uncached(void *ptr)
{
size_t addr = (size_t) ptr;
return (addr >> 29) == CONFIG_XTENSA_UNCACHED_REGION;
}
static ALWAYS_INLINE uint32_t z_xtrpoflip(uint32_t addr, uint32_t rto, uint32_t rfrom)
{
/* The math here is all compile-time: when the two regions
* differ by a power of two, we can convert between them by
* setting or clearing just one bit. Otherwise it needs two
* operations.
*/
uint32_t rxor = (rto ^ rfrom) << 29;
rto <<= 29;
if (Z_IS_POW2(rxor)) {
if ((rxor & rto) == 0) {
return addr & ~rxor;
} else {
return addr | rxor;
}
} else {
return (addr & ~(7U << 29)) | rto;
}
}
/**
* @brief Return cached pointer to a RAM address
*
* The Xtensa coherence architecture maps addressable RAM twice, in
* two different 512MB regions whose L1 cache settings can be
* controlled independently. So for any given pointer, it is possible
* to convert it to and from a cached version.
*
* This function takes a pointer to any addressable object (either in
* cacheable memory or not) and returns a pointer that can be used to
* refer to the same memory through the L1 data cache. Data read
* through the resulting pointer will reflect locally cached values on
* the current CPU if they exist, and writes will go first into the
* cache and be written back later.
*
* @see arch_xtensa_uncached_ptr()
*
* @param ptr A pointer to a valid C object
* @return A pointer to the same object via the L1 dcache
*/
static inline void __sparse_cache *arch_xtensa_cached_ptr(void *ptr)
{
return (__sparse_force void __sparse_cache *)z_xtrpoflip((uint32_t) ptr,
CONFIG_XTENSA_CACHED_REGION,
CONFIG_XTENSA_UNCACHED_REGION);
}
/**
* @brief Return uncached pointer to a RAM address
*
* The Xtensa coherence architecture maps addressable RAM twice, in
* two different 512MB regions whose L1 cache settings can be
* controlled independently. So for any given pointer, it is possible
* to convert it to and from a cached version.
*
* This function takes a pointer to any addressable object (either in
* cacheable memory or not) and returns a pointer that can be used to
* refer to the same memory while bypassing the L1 data cache. Data
* in the L1 cache will not be inspected nor modified by the access.
*
* @see arch_xtensa_cached_ptr()
*
* @param ptr A pointer to a valid C object
* @return A pointer to the same object bypassing the L1 dcache
*/
static inline void *arch_xtensa_uncached_ptr(void __sparse_cache *ptr)
{
return (void *)z_xtrpoflip((__sparse_force uint32_t)ptr,
CONFIG_XTENSA_UNCACHED_REGION,
CONFIG_XTENSA_CACHED_REGION);
}
/* Utility to generate an unrolled and optimal[1] code sequence to set /* Utility to generate an unrolled and optimal[1] code sequence to set
* the RPO TLB registers (contra the HAL cacheattr macros, which * the RPO TLB registers (contra the HAL cacheattr macros, which
@ -327,33 +215,6 @@ static inline void *arch_xtensa_uncached_ptr(void __sparse_cache *ptr)
register uint32_t addr = 0, addrincr = 0x20000000; \ register uint32_t addr = 0, addrincr = 0x20000000; \
FOR_EACH(_SET_ONE_TLB, (;), 0, 1, 2, 3, 4, 5, 6, 7); \ FOR_EACH(_SET_ONE_TLB, (;), 0, 1, 2, 3, 4, 5, 6, 7); \
} while (0) } while (0)
#else /* CONFIG_XTENSA_RPO_CACHE */
static inline bool arch_xtensa_is_ptr_cached(void *ptr)
{
ARG_UNUSED(ptr);
return false;
}
static inline bool arch_xtensa_is_ptr_uncached(void *ptr)
{
ARG_UNUSED(ptr);
return false;
}
static inline void *arch_xtensa_cached_ptr(void *ptr)
{
return ptr;
}
static inline void *arch_xtensa_uncached_ptr(void *ptr)
{
return ptr;
}
#endif /* CONFIG_XTENSA_RPO_CACHE */ #endif /* CONFIG_XTENSA_RPO_CACHE */
#if defined(CONFIG_XTENSA_MMU) || defined(__DOXYGEN__) #if defined(CONFIG_XTENSA_MMU) || defined(__DOXYGEN__)

View file

@ -192,6 +192,144 @@ static ALWAYS_INLINE void arch_icache_disable(void)
#endif /* CONFIG_ICACHE */ #endif /* CONFIG_ICACHE */
#if defined(CONFIG_CACHE_DOUBLEMAP)
/**
* @brief Test if a pointer is in cached region.
*
* Some hardware may map the same physical memory twice
* so that it can be seen in both (incoherent) cached mappings
* and a coherent "shared" area. This tests if a particular
* pointer is within the cached, coherent area.
*
* @param ptr Pointer
*
* @retval True if pointer is in cached region.
* @retval False if pointer is not in cached region.
*/
static inline bool arch_cache_is_ptr_cached(void *ptr)
{
size_t addr = (size_t) ptr;
return (addr >> 29) == CONFIG_XTENSA_CACHED_REGION;
}
/**
* @brief Test if a pointer is in un-cached region.
*
* Some hardware may map the same physical memory twice
* so that it can be seen in both (incoherent) cached mappings
* and a coherent "shared" area. This tests if a particular
* pointer is within the un-cached, incoherent area.
*
* @param ptr Pointer
*
* @retval True if pointer is not in cached region.
* @retval False if pointer is in cached region.
*/
static inline bool arch_cache_is_ptr_uncached(void *ptr)
{
size_t addr = (size_t) ptr;
return (addr >> 29) == CONFIG_XTENSA_UNCACHED_REGION;
}
static ALWAYS_INLINE uint32_t z_xtrpoflip(uint32_t addr, uint32_t rto, uint32_t rfrom)
{
/* The math here is all compile-time: when the two regions
* differ by a power of two, we can convert between them by
* setting or clearing just one bit. Otherwise it needs two
* operations.
*/
uint32_t rxor = (rto ^ rfrom) << 29;
rto <<= 29;
if (Z_IS_POW2(rxor)) {
if ((rxor & rto) == 0) {
return addr & ~rxor;
} else {
return addr | rxor;
}
} else {
return (addr & ~(7U << 29)) | rto;
}
}
/**
* @brief Return cached pointer to a RAM address
*
* The Xtensa coherence architecture maps addressable RAM twice, in
* two different 512MB regions whose L1 cache settings can be
* controlled independently. So for any given pointer, it is possible
* to convert it to and from a cached version.
*
* This function takes a pointer to any addressable object (either in
* cacheable memory or not) and returns a pointer that can be used to
* refer to the same memory through the L1 data cache. Data read
* through the resulting pointer will reflect locally cached values on
* the current CPU if they exist, and writes will go first into the
* cache and be written back later.
*
* @see arch_uncached_ptr()
*
* @param ptr A pointer to a valid C object
* @return A pointer to the same object via the L1 dcache
*/
static inline void __sparse_cache *arch_cache_cached_ptr_get(void *ptr)
{
return (__sparse_force void __sparse_cache *)z_xtrpoflip((uint32_t) ptr,
CONFIG_XTENSA_CACHED_REGION,
CONFIG_XTENSA_UNCACHED_REGION);
}
/**
* @brief Return uncached pointer to a RAM address
*
* The Xtensa coherence architecture maps addressable RAM twice, in
* two different 512MB regions whose L1 cache settings can be
* controlled independently. So for any given pointer, it is possible
* to convert it to and from a cached version.
*
* This function takes a pointer to any addressable object (either in
* cacheable memory or not) and returns a pointer that can be used to
* refer to the same memory while bypassing the L1 data cache. Data
* in the L1 cache will not be inspected nor modified by the access.
*
* @see arch_cached_ptr()
*
* @param ptr A pointer to a valid C object
* @return A pointer to the same object bypassing the L1 dcache
*/
static inline void *arch_cache_uncached_ptr_get(void __sparse_cache *ptr)
{
return (void *)z_xtrpoflip((__sparse_force uint32_t)ptr,
CONFIG_XTENSA_UNCACHED_REGION,
CONFIG_XTENSA_CACHED_REGION);
}
#else
static inline bool arch_cache_is_ptr_cached(void *ptr)
{
ARG_UNUSED(ptr);
return false;
}
static inline bool arch_cache_is_ptr_uncached(void *ptr)
{
ARG_UNUSED(ptr);
return false;
}
static inline void *arch_cache_cached_ptr_get(void *ptr)
{
return ptr;
}
static inline void *arch_cache_uncached_ptr_get(void *ptr)
{
return ptr;
}
#endif
#ifdef __cplusplus #ifdef __cplusplus

View file

@ -7,6 +7,8 @@ config SOC_FAMILY_INTEL_ADSP
select WINSTREAM select WINSTREAM
select ARCH_SUPPORTS_COREDUMP select ARCH_SUPPORTS_COREDUMP
select CPU_HAS_DCACHE select CPU_HAS_DCACHE
select ARCH_HAS_USERSPACE if XTENSA_MMU
select CPU_CACHE_INCOHERENT
bool bool
if SOC_FAMILY_INTEL_ADSP if SOC_FAMILY_INTEL_ADSP

View file

@ -94,7 +94,7 @@ void soc_mp_init(void)
* Only when more than 1 CPUs is enabled, then this is in uncached area. * Only when more than 1 CPUs is enabled, then this is in uncached area.
* Otherwise, this is in cached area and will fail this test. * Otherwise, this is in cached area and will fail this test.
*/ */
__ASSERT(!arch_xtensa_is_ptr_cached(&g_key_read_holder), __ASSERT(!sys_cache_is_ptr_cached(&g_key_read_holder),
"g_key_read_holder must be uncached"); "g_key_read_holder must be uncached");
#endif /* defined(CONFIG_SMP) && (CONFIG_MP_MAX_NUM_CPUS > 1) */ #endif /* defined(CONFIG_SMP) && (CONFIG_MP_MAX_NUM_CPUS > 1) */
g_key_read_holder = INTEL_ADSP_ACE15_MAGIC_KEY; g_key_read_holder = INTEL_ADSP_ACE15_MAGIC_KEY;

View file

@ -227,7 +227,7 @@ __imr void pm_state_imr_restore(void)
{ {
struct imr_layout *imr_layout = (struct imr_layout *)(IMR_LAYOUT_ADDRESS); struct imr_layout *imr_layout = (struct imr_layout *)(IMR_LAYOUT_ADDRESS);
/* restore lpsram power and contents */ /* restore lpsram power and contents */
bmemcpy(z_soc_uncached_ptr((__sparse_force void __sparse_cache *) bmemcpy(sys_cache_uncached_ptr_get((__sparse_force void __sparse_cache *)
UINT_TO_POINTER(LP_SRAM_BASE)), UINT_TO_POINTER(LP_SRAM_BASE)),
imr_layout->imr_state.header.imr_ram_storage, imr_layout->imr_state.header.imr_ram_storage,
LP_SRAM_SIZE); LP_SRAM_SIZE);

View file

@ -5,9 +5,9 @@
#include <cavs-idc.h> #include <cavs-idc.h>
#include <adsp_memory.h> #include <adsp_memory.h>
#include <adsp_shim.h> #include <adsp_shim.h>
#include <soc.h>
#include <zephyr/irq.h> #include <zephyr/irq.h>
#include <zephyr/pm/pm.h> #include <zephyr/pm/pm.h>
#include <zephyr/cache.h>
/* IDC power up message to the ROM firmware. This isn't documented /* IDC power up message to the ROM firmware. This isn't documented
* anywhere, it's basically just a magic number (except the high bit, * anywhere, it's basically just a magic number (except the high bit,
@ -62,7 +62,8 @@ void soc_start_core(int cpu_num)
* such that the standard system bootstrap out of IMR can * such that the standard system bootstrap out of IMR can
* place it there. But this is fine for now. * place it there. But this is fine for now.
*/ */
void **lpsram = z_soc_uncached_ptr((__sparse_force void __sparse_cache *)LP_SRAM_BASE); void **lpsram = sys_cache_uncached_ptr_get(
(__sparse_force void __sparse_cache *)LP_SRAM_BASE);
uint8_t tramp[] = { uint8_t tramp[] = {
0x06, 0x01, 0x00, /* J <PC+8> (jump to L32R) */ 0x06, 0x01, 0x00, /* J <PC+8> (jump to L32R) */
0, /* (padding to align entry_addr) */ 0, /* (padding to align entry_addr) */

View file

@ -20,7 +20,6 @@
#include <adsp_clk.h> #include <adsp_clk.h>
#include <adsp_imr_layout.h> #include <adsp_imr_layout.h>
#include <cavs-idc.h> #include <cavs-idc.h>
#include "soc.h"
#ifdef CONFIG_DYNAMIC_INTERRUPTS #ifdef CONFIG_DYNAMIC_INTERRUPTS
#include <zephyr/sw_isr_table.h> #include <zephyr/sw_isr_table.h>
@ -150,7 +149,7 @@ void pm_state_set(enum pm_state state, uint8_t substate_id)
.imr_restore_vector = rom_entry, .imr_restore_vector = rom_entry,
}; };
struct imr_layout *imr_layout = struct imr_layout *imr_layout =
z_soc_uncached_ptr((__sparse_force void __sparse_cache *) sys_cache_uncached_ptr_get((__sparse_force void __sparse_cache *)
L3_MEM_BASE_ADDR); L3_MEM_BASE_ADDR);
imr_layout->imr_state.header = hdr; imr_layout->imr_state.header = hdr;

View file

@ -7,7 +7,7 @@
#include <zephyr/devicetree.h> #include <zephyr/devicetree.h>
#include <zephyr/init.h> #include <zephyr/init.h>
#include <errno.h> #include <errno.h>
#include <soc.h> #include <zephyr/cache.h>
#include <mem_window.h> #include <mem_window.h>
@ -22,7 +22,7 @@ int boot_complete(void)
} }
config = dev->config; config = dev->config;
win = z_soc_uncached_ptr((__sparse_force void __sparse_cache *)config->mem_base); win = sys_cache_uncached_ptr_get((__sparse_force void __sparse_cache *)config->mem_base);
/* Software protocol: "firmware entered" has the value 5 */ /* Software protocol: "firmware entered" has the value 5 */
win[0] = 5; win[0] = 5;

View file

@ -6,6 +6,7 @@
#include <mem_window.h> #include <mem_window.h>
#include <zephyr/debug/sparse.h> #include <zephyr/debug/sparse.h>
#include <zephyr/cache.h>
/* /*
* SRAM window for debug info (window 2) is organized in slots, * SRAM window for debug info (window 2) is organized in slots,
@ -67,7 +68,7 @@ struct adsp_debug_window {
#define WIN2_MBASE DT_REG_ADDR(DT_PHANDLE(DT_NODELABEL(mem_window2), memory)) #define WIN2_MBASE DT_REG_ADDR(DT_PHANDLE(DT_NODELABEL(mem_window2), memory))
#define ADSP_DW ((volatile struct adsp_debug_window *) \ #define ADSP_DW ((volatile struct adsp_debug_window *) \
(z_soc_uncached_ptr((__sparse_force void __sparse_cache *) \ (sys_cache_uncached_ptr_get((__sparse_force void __sparse_cache *) \
(WIN2_MBASE + WIN2_OFFSET)))) (WIN2_MBASE + WIN2_OFFSET))))
#endif #endif

View file

@ -159,7 +159,7 @@ static inline int intel_adsp_hda_set_buffer(uint32_t base,
* region or not, we do need a consistent address space to check * region or not, we do need a consistent address space to check
* against for our assertion. This is cheap. * against for our assertion. This is cheap.
*/ */
uint32_t addr = (uint32_t)arch_xtensa_cached_ptr(buf); uint32_t addr = (uint32_t)sys_cache_cached_ptr_get(buf);
uint32_t aligned_addr = addr & HDA_ALIGN_MASK; uint32_t aligned_addr = addr & HDA_ALIGN_MASK;
uint32_t aligned_size = buf_size & HDA_BUFFER_SIZE_MASK; uint32_t aligned_size = buf_size & HDA_BUFFER_SIZE_MASK;

View file

@ -26,10 +26,6 @@ extern void soc_start_core(int cpu_num);
extern bool soc_cpus_active[CONFIG_MP_MAX_NUM_CPUS]; extern bool soc_cpus_active[CONFIG_MP_MAX_NUM_CPUS];
/* Legacy cache APIs still used in a few places */
#define z_soc_cached_ptr(p) arch_xtensa_cached_ptr(p)
#define z_soc_uncached_ptr(p) arch_xtensa_uncached_ptr(p)
/** /**
* @brief Halts and offlines a running CPU * @brief Halts and offlines a running CPU
* *

View file

@ -13,7 +13,7 @@ ZTEST(adsp_cache, test_adsp_cache_flush_inv_all)
uint32_t *cached, *uncached; uint32_t *cached, *uncached;
cached = (uint32_t *)LP_SRAM_BASE; cached = (uint32_t *)LP_SRAM_BASE;
uncached = arch_xtensa_uncached_ptr(cached); uncached = sys_cache_uncached_ptr_get(cached);
*cached = 42; *cached = 42;
*uncached = 40; *uncached = 40;

View file

@ -84,19 +84,19 @@ static void core_smoke(void *arg)
zassert_equal(cpu, arch_curr_cpu()->id, "wrong cpu"); zassert_equal(cpu, arch_curr_cpu()->id, "wrong cpu");
/* Un/cached regions should be configured and distinct */ /* Un/cached regions should be configured and distinct */
zassert_equal(&tag, arch_xtensa_cached_ptr((void *)&tag), zassert_equal(&tag, sys_cache_cached_ptr_get((void *)&tag),
"stack memory not cached"); "stack memory not cached");
zassert_not_equal(&tag, arch_xtensa_uncached_ptr((void *)&tag), zassert_not_equal(&tag, sys_cache_uncached_ptr_get((void *)&tag),
"stack memory not cached"); "stack memory not cached");
zassert_not_equal(&static_tag, arch_xtensa_cached_ptr((void *)&static_tag), zassert_not_equal(&static_tag, sys_cache_cached_ptr_get((void *)&static_tag),
"stack memory not cached"); "stack memory not cached");
zassert_equal(&static_tag, arch_xtensa_uncached_ptr((void *)&static_tag), zassert_equal(&static_tag, sys_cache_uncached_ptr_get((void *)&static_tag),
"stack memory not cached"); "stack memory not cached");
/* Un/cached regions should be working */ /* Un/cached regions should be working */
printk(" Cache behavior check\n"); printk(" Cache behavior check\n");
volatile int *ctag = (volatile int *)arch_xtensa_cached_ptr((void *)&tag); volatile int *ctag = (volatile int *)sys_cache_cached_ptr_get((void *)&tag);
volatile int *utag = (volatile int *)arch_xtensa_uncached_ptr((void *)&tag); volatile int *utag = (volatile int *)sys_cache_uncached_ptr_get((void *)&tag);
tag = 99; tag = 99;
zassert_true(*ctag == 99, "variable is cached"); zassert_true(*ctag == 99, "variable is cached");