xtensa: polish doxygen and add to missing doc

This polishes doxygen to, hopefully, make it better looking
on the API doc. Also adds missing doc to various functions
and macros.

Signed-off-by: Daniel Leung <daniel.leung@intel.com>
This commit is contained in:
Daniel Leung 2023-10-09 12:31:56 -07:00 committed by Anas Nashif
parent 035c8d8ceb
commit 0ee1e28a2f
6 changed files with 196 additions and 45 deletions

View file

@ -7,7 +7,7 @@
* @file
* @brief Xtensa specific kernel interface header
* This header contains the Xtensa specific kernel interface. It is included
* by the generic kernel interface header (include/arch/cpu.h)
* by the generic kernel interface header (include/zephyr/arch/cpu.h)
*/
#ifndef ZEPHYR_INCLUDE_ARCH_XTENSA_ARCH_H_
@ -33,6 +33,8 @@
#include <zephyr/arch/xtensa/thread_stack.h>
#include <zephyr/sys/slist.h>
#include <zephyr/drivers/timer/system_timer.h>
#include <zephyr/arch/xtensa/xtensa_mmu.h>
/**
@ -61,7 +63,23 @@ struct arch_mem_domain {
sys_snode_t node;
};
/**
* @brief Generate hardware exception.
*
* This generates hardware exception which is used by ARCH_EXCEPT().
*
* @param reason_p Reason for exception.
*/
extern void xtensa_arch_except(int reason_p);
/**
* @brief Generate kernel oops.
*
* This generates kernel oops which is used by arch_syscall_oops().
*
* @param reason_p Reason for exception.
* @param ssf Stack pointer.
*/
extern void xtensa_arch_kernel_oops(int reason_p, void *ssf);
#ifdef CONFIG_USERSPACE
@ -79,9 +97,9 @@ extern void xtensa_arch_kernel_oops(int reason_p, void *ssf);
#else
#define ARCH_EXCEPT(reason_p) do { \
xtensa_arch_except(reason_p); \
CODE_UNREACHABLE; \
} while (false)
xtensa_arch_except(reason_p); \
CODE_UNREACHABLE; \
} while (false)
#endif
@ -93,44 +111,47 @@ __syscall void xtensa_user_fault(unsigned int reason);
extern void z_irq_priority_set(uint32_t irq, uint32_t prio, uint32_t flags);
#define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
{ \
Z_ISR_DECLARE(irq_p, flags_p, isr_p, isr_param_p); \
}
extern uint32_t sys_clock_cycle_get_32(void);
{ \
Z_ISR_DECLARE(irq_p, flags_p, isr_p, isr_param_p); \
}
/** Implementation of @ref arch_k_cycle_get_32. */
static inline uint32_t arch_k_cycle_get_32(void)
{
return sys_clock_cycle_get_32();
}
extern uint64_t sys_clock_cycle_get_64(void);
/** Implementation of @ref arch_k_cycle_get_64. */
static inline uint64_t arch_k_cycle_get_64(void)
{
return sys_clock_cycle_get_64();
}
/** Implementation of @ref arch_nop. */
static ALWAYS_INLINE void arch_nop(void)
{
__asm__ volatile("nop");
}
/**
* @brief Lock VECBASE if supported by hardware.
*
* The bit 0 of VECBASE acts as a lock bit on hardware supporting
* this feature. When this bit is set, VECBASE cannot be changed
* until it is cleared by hardware reset. When the hardware does not
* support this bit, it is hardwired to 0.
*/
static ALWAYS_INLINE void xtensa_vecbase_lock(void)
{
int vecbase;
__asm__ volatile("rsr.vecbase %0" : "=r" (vecbase));
/* In some targets the bit 0 of VECBASE works as lock bit.
* When this bit set, VECBASE can't be changed until it is cleared by
* reset. When the target does not have it, it is hardwired to 0.
**/
__asm__ volatile("wsr.vecbase %0; rsync" : : "r" (vecbase | 1));
}
#if defined(CONFIG_XTENSA_RPO_CACHE)
#if defined(CONFIG_ARCH_HAS_COHERENCE)
#if defined(CONFIG_XTENSA_RPO_CACHE) || defined(__DOXYGEN__)
#if defined(CONFIG_ARCH_HAS_COHERENCE) || defined(__DOXYGEN__)
/** Implementation of @ref arch_mem_coherent. */
static inline bool arch_mem_coherent(void *ptr)
{
size_t addr = (size_t) ptr;
@ -139,6 +160,19 @@ static inline bool arch_mem_coherent(void *ptr)
}
#endif
/**
* @brief Test if a pointer is in cached region.
*
* Some hardware may map the same physical memory twice
* so that it can be seen in both (incoherent) cached mappings
* and a coherent "shared" area. This tests if a particular
* pointer is within the cached, coherent area.
*
* @param ptr Pointer
*
* @retval True if pointer is in cached region.
* @retval False if pointer is not in cached region.
*/
static inline bool arch_xtensa_is_ptr_cached(void *ptr)
{
size_t addr = (size_t) ptr;
@ -146,6 +180,19 @@ static inline bool arch_xtensa_is_ptr_cached(void *ptr)
return (addr >> 29) == CONFIG_XTENSA_CACHED_REGION;
}
/**
* @brief Test if a pointer is in un-cached region.
*
* Some hardware may map the same physical memory twice
* so that it can be seen in both (incoherent) cached mappings
* and a coherent "shared" area. This tests if a particular
* pointer is within the un-cached, incoherent area.
*
* @param ptr Pointer
*
* @retval True if pointer is not in cached region.
* @retval False if pointer is in cached region.
*/
static inline bool arch_xtensa_is_ptr_uncached(void *ptr)
{
size_t addr = (size_t) ptr;
@ -173,6 +220,7 @@ static ALWAYS_INLINE uint32_t z_xtrpoflip(uint32_t addr, uint32_t rto, uint32_t
return (addr & ~(7U << 29)) | rto;
}
}
/**
* @brief Return cached pointer to a RAM address
*
@ -271,10 +319,14 @@ static inline void *arch_xtensa_uncached_ptr(void __sparse_cache *ptr)
addr += addrincr; \
} while (0)
#define ARCH_XTENSA_SET_RPO_TLB() do { \
register uint32_t addr = 0, addrincr = 0x20000000; \
FOR_EACH(_SET_ONE_TLB, (;), 0, 1, 2, 3, 4, 5, 6, 7); \
} while (0)
/**
* @brief Setup RPO TLB registers.
*/
#define ARCH_XTENSA_SET_RPO_TLB() \
do { \
register uint32_t addr = 0, addrincr = 0x20000000; \
FOR_EACH(_SET_ONE_TLB, (;), 0, 1, 2, 3, 4, 5, 6, 7); \
} while (0)
#else /* CONFIG_XTENSA_RPO_CACHE */
@ -304,7 +356,17 @@ static inline void *arch_xtensa_uncached_ptr(void *ptr)
#endif /* CONFIG_XTENSA_RPO_CACHE */
#ifdef CONFIG_XTENSA_MMU
#if defined(CONFIG_XTENSA_MMU) || defined(__DOXYGEN__)
/**
* @brief Peform additional steps after MMU initialization.
*
* This performs additional steps related to memory management
* after the main MMU initialization code. This needs to defined
* in the SoC layer. Default is do no nothing.
*
* @param is_core0 True if this is called while executing on
* CPU core #0.
*/
extern void arch_xtensa_mmu_post_init(bool is_core0);
#endif

View file

@ -13,26 +13,53 @@
#include <zephyr/kernel_structs.h>
#include <zsr.h>
/**
* @brief Read a special register.
*
* @param sr Name of special register.
*
* @return Value of special register.
*/
#define XTENSA_RSR(sr) \
({uint32_t v; \
__asm__ volatile ("rsr." sr " %0" : "=a"(v)); \
v; })
/**
* @brief Write to a special register.
*
* @param sr Name of special register.
* @param v Value to be written to special register.
*/
#define XTENSA_WSR(sr, v) \
do { \
__asm__ volatile ("wsr." sr " %0" : : "r"(v)); \
} while (false)
/**
* @brief Read a user register.
*
* @param ur Name of user register.
*
* @return Value of user register.
*/
#define XTENSA_RUR(ur) \
({uint32_t v; \
__asm__ volatile ("rur." ur " %0" : "=a"(v)); \
v; })
/**
* @brief Write to a user register.
*
* @param ur Name of user register.
* @param v Value to be written to user register.
*/
#define XTENSA_WUR(ur, v) \
do { \
__asm__ volatile ("wur." ur " %0" : : "r"(v)); \
} while (false)
/** Implementation of @ref arch_curr_cpu. */
static ALWAYS_INLINE _cpu_t *arch_curr_cpu(void)
{
_cpu_t *cpu;
@ -42,6 +69,7 @@ static ALWAYS_INLINE _cpu_t *arch_curr_cpu(void)
return cpu;
}
/** Implementation of @ref arch_proc_id. */
static ALWAYS_INLINE uint32_t arch_proc_id(void)
{
uint32_t prid;
@ -54,6 +82,7 @@ static ALWAYS_INLINE uint32_t arch_proc_id(void)
extern unsigned int soc_num_cpus;
#endif
/** Implementation of @ref arch_num_cpus. */
static ALWAYS_INLINE unsigned int arch_num_cpus(void)
{
#ifdef CONFIG_SOC_HAS_RUNTIME_NUM_CPUS

View file

@ -1,11 +1,12 @@
/**
/*
* Copyright (c) 2021 Intel Corporation
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_INCLUDE_ATOMIC_XTENSA_H_
#define ZEPHYR_INCLUDE_ATOMIC_XTENSA_H_
/* Included from <sys/atomic.h> */
/* Included from <zephyr/sys/atomic.h> */
/* Recent GCC versions actually do have working atomics support on
* Xtensa (and so should work with CONFIG_ATOMIC_OPERATIONS_BUILTIN),
@ -13,6 +14,7 @@
* inline implementation here that is more or less identical
*/
/** Implementation of @ref atomic_get. */
static ALWAYS_INLINE atomic_val_t atomic_get(const atomic_t *target)
{
atomic_val_t ret;
@ -28,6 +30,23 @@ static ALWAYS_INLINE atomic_val_t atomic_get(const atomic_t *target)
return ret;
}
/**
* @brief Xtensa specific atomic compare-and-set (CAS).
*
* @param addr Address of atomic variable.
* @param oldval Original value to compare against.
* @param newval New value to store.
*
* This utilizes SCOMPARE1 register and s32c1i instruction to
* perform compare-and-set atomic operation. This will
* unconditionally read from the atomic variable at @p addr
* before the comparison. This value is returned from
* the function.
*
* @return The value at the memory location before CAS.
*
* @see atomic_cas.
*/
static ALWAYS_INLINE
atomic_val_t xtensa_cas(atomic_t *addr, atomic_val_t oldval,
atomic_val_t newval)
@ -38,12 +57,14 @@ atomic_val_t xtensa_cas(atomic_t *addr, atomic_val_t oldval,
return newval; /* got swapped with the old memory by s32c1i */
}
/** Implementation of @ref atomic_cas. */
static ALWAYS_INLINE
bool atomic_cas(atomic_t *target, atomic_val_t oldval, atomic_val_t newval)
{
return oldval == xtensa_cas(target, oldval, newval);
}
/** Implementation of @ref atomic_ptr_cas. */
static ALWAYS_INLINE
bool atomic_ptr_cas(atomic_ptr_t *target, void *oldval, void *newval)
{
@ -57,7 +78,6 @@ bool atomic_ptr_cas(atomic_ptr_t *target, void *oldval, void *newval)
* specified expression. Evaluates to the old value which was
* atomically replaced.
*/
#define Z__GEN_ATOMXCHG(expr) ({ \
atomic_val_t res, cur; \
do { \
@ -66,75 +86,88 @@ bool atomic_ptr_cas(atomic_ptr_t *target, void *oldval, void *newval)
} while (res != cur); \
res; })
/** Implementation of @ref atomic_set. */
static ALWAYS_INLINE
atomic_val_t atomic_set(atomic_t *target, atomic_val_t value)
{
return Z__GEN_ATOMXCHG(value);
}
/** Implementation of @ref atomic_add. */
static ALWAYS_INLINE
atomic_val_t atomic_add(atomic_t *target, atomic_val_t value)
{
return Z__GEN_ATOMXCHG(cur + value);
}
/** Implementation of @ref atomic_sub. */
static ALWAYS_INLINE
atomic_val_t atomic_sub(atomic_t *target, atomic_val_t value)
{
return Z__GEN_ATOMXCHG(cur - value);
}
/** Implementation of @ref atomic_inc. */
static ALWAYS_INLINE
atomic_val_t atomic_inc(atomic_t *target)
{
return Z__GEN_ATOMXCHG(cur + 1);
}
/** Implementation of @ref atomic_dec. */
static ALWAYS_INLINE
atomic_val_t atomic_dec(atomic_t *target)
{
return Z__GEN_ATOMXCHG(cur - 1);
}
/** Implementation of @ref atomic_or. */
static ALWAYS_INLINE atomic_val_t atomic_or(atomic_t *target,
atomic_val_t value)
{
return Z__GEN_ATOMXCHG(cur | value);
}
/** Implementation of @ref atomic_xor. */
static ALWAYS_INLINE atomic_val_t atomic_xor(atomic_t *target,
atomic_val_t value)
{
return Z__GEN_ATOMXCHG(cur ^ value);
}
/** Implementation of @ref atomic_and. */
static ALWAYS_INLINE atomic_val_t atomic_and(atomic_t *target,
atomic_val_t value)
{
return Z__GEN_ATOMXCHG(cur & value);
}
/** Implementation of @ref atomic_nand. */
static ALWAYS_INLINE atomic_val_t atomic_nand(atomic_t *target,
atomic_val_t value)
{
return Z__GEN_ATOMXCHG(~(cur & value));
}
/** Implementation of @ref atomic_ptr_get. */
static ALWAYS_INLINE void *atomic_ptr_get(const atomic_ptr_t *target)
{
return (void *) atomic_get((atomic_t *)target);
}
/** Implementation of @ref atomic_ptr_set. */
static ALWAYS_INLINE void *atomic_ptr_set(atomic_ptr_t *target, void *value)
{
return (void *) atomic_set((atomic_t *) target, (atomic_val_t) value);
}
/** Implementation of @ref atomic_clear. */
static ALWAYS_INLINE atomic_val_t atomic_clear(atomic_t *target)
{
return atomic_set(target, 0);
}
/** Implementation of @ref atomic_ptr_clear. */
static ALWAYS_INLINE void *atomic_ptr_clear(atomic_ptr_t *target)
{
return (void *) atomic_set((atomic_t *) target, 0);

View file

@ -22,7 +22,9 @@ BUILD_ASSERT(Z_IS_POW2(XCHAL_DCACHE_LINESIZE));
BUILD_ASSERT(Z_IS_POW2(Z_DCACHE_MAX));
#endif
#if defined(CONFIG_DCACHE)
#if defined(CONFIG_DCACHE) || defined(__DOXYGEN__)
/** Implementation of @ref arch_dcache_flush_range. */
static ALWAYS_INLINE int arch_dcache_flush_range(void *addr, size_t bytes)
{
#if XCHAL_DCACHE_SIZE
@ -38,6 +40,7 @@ static ALWAYS_INLINE int arch_dcache_flush_range(void *addr, size_t bytes)
return 0;
}
/** Implementation of @ref arch_dcache_flush_and_invd_range. */
static ALWAYS_INLINE int arch_dcache_flush_and_invd_range(void *addr, size_t bytes)
{
#if XCHAL_DCACHE_SIZE
@ -53,6 +56,7 @@ static ALWAYS_INLINE int arch_dcache_flush_and_invd_range(void *addr, size_t byt
return 0;
}
/** Implementation of @ref arch_dcache_invd_range. */
static ALWAYS_INLINE int arch_dcache_invd_range(void *addr, size_t bytes)
{
#if XCHAL_DCACHE_SIZE
@ -68,6 +72,7 @@ static ALWAYS_INLINE int arch_dcache_invd_range(void *addr, size_t bytes)
return 0;
}
/** Implementation of @ref arch_dcache_invd_all. */
static ALWAYS_INLINE int arch_dcache_invd_all(void)
{
#if XCHAL_DCACHE_SIZE
@ -81,6 +86,7 @@ static ALWAYS_INLINE int arch_dcache_invd_all(void)
return 0;
}
/** Implementation of @ref arch_dcache_flush_all. */
static ALWAYS_INLINE int arch_dcache_flush_all(void)
{
#if XCHAL_DCACHE_SIZE
@ -94,6 +100,7 @@ static ALWAYS_INLINE int arch_dcache_flush_all(void)
return 0;
}
/** Implementation of @ref arch_dcache_flush_and_invd_all. */
static ALWAYS_INLINE int arch_dcache_flush_and_invd_all(void)
{
#if XCHAL_DCACHE_SIZE
@ -107,11 +114,13 @@ static ALWAYS_INLINE int arch_dcache_flush_and_invd_all(void)
return 0;
}
/** Implementation of @ref arch_dcache_enable. */
static ALWAYS_INLINE void arch_dcache_enable(void)
{
/* nothing */
}
/** Implementation of @ref arch_dcache_disable. */
static ALWAYS_INLINE void arch_dcache_disable(void)
{
/* nothing */
@ -119,18 +128,21 @@ static ALWAYS_INLINE void arch_dcache_disable(void)
#endif /* CONFIG_DCACHE */
#if defined(CONFIG_ICACHE)
#if defined(CONFIG_ICACHE) || defined(__DOXYGEN__)
/** Implementation of @ref arch_icache_line_size_get. */
static ALWAYS_INLINE size_t arch_icache_line_size_get(void)
{
return -ENOTSUP;
}
/** Implementation of @ref arch_icache_flush_all. */
static ALWAYS_INLINE int arch_icache_flush_all(void)
{
return -ENOTSUP;
}
/** Implementation of @ref arch_icache_invd_all. */
static ALWAYS_INLINE int arch_icache_invd_all(void)
{
#if XCHAL_ICACHE_SIZE
@ -139,16 +151,19 @@ static ALWAYS_INLINE int arch_icache_invd_all(void)
return 0;
}
/** Implementation of @ref arch_icache_flush_and_invd_all. */
static ALWAYS_INLINE int arch_icache_flush_and_invd_all(void)
{
return -ENOTSUP;
}
/** Implementation of @ref arch_icache_flush_range. */
static ALWAYS_INLINE int arch_icache_flush_range(void *addr, size_t size)
{
return -ENOTSUP;
}
/** Implementation of @ref arch_icache_invd_range. */
static ALWAYS_INLINE int arch_icache_invd_range(void *addr, size_t size)
{
#if XCHAL_ICACHE_SIZE
@ -157,16 +172,19 @@ static ALWAYS_INLINE int arch_icache_invd_range(void *addr, size_t size)
return 0;
}
/** Implementation of @ref arch_icache_flush_and_invd_range. */
static ALWAYS_INLINE int arch_icache_flush_and_invd_range(void *addr, size_t size)
{
return -ENOTSUP;
}
/** Implementation of @ref arch_icache_enable. */
static ALWAYS_INLINE void arch_icache_enable(void)
{
/* nothing */
}
/** Implementation of @ref arch_icache_disable. */
static ALWAYS_INLINE void arch_icache_disable(void)
{
/* nothing */

View file

@ -17,8 +17,8 @@
#define XTREG_GRP_SPECIAL 0x0200
#define XTREG_GRP_USER 0x0300
/*
* Register description fot GDB stub.
/**
* @brief Register description for GDB stub.
*
* Values are based on gdb/gdb/xtensa-config.c in the Xtensa overlay,
* where registers are defined using XTREG() macro:
@ -35,32 +35,35 @@
* gpkt_offset : ofs
*/
struct xtensa_register {
/* Register value */
/** Register value */
uint32_t val;
/* GDB register index (for p/P packets) */
/** GDB register index (for p/P packets) */
uint8_t idx;
/* Size of register */
/** Size of register */
uint8_t byte_size;
/* Xtensa register number */
/** Xtensa register number */
uint16_t regno;
/* Offset of this register in GDB G-packet.
/**
* Offset of this register in GDB G-packet.
* -1 if register is not in G-packet.
*/
int16_t gpkt_offset;
/* Offset of saved register in stack frame.
/**
* Offset of saved register in stack frame.
* 0 if not saved in stack frame.
*/
int8_t stack_offset;
/* Sequence number */
/** Sequence number */
uint8_t seqno;
/* Set 1 to if register should not be written
/**
* Set to 1 if register should not be written
* to during debugging.
*/
uint8_t is_read_only:1;
@ -78,26 +81,29 @@ struct xtensa_register {
*/
#include <gdbstub/soc.h>
/**
* @brief Architecture specific GDB context.
*/
struct gdb_ctx {
/* Exception reason */
/** Exception reason */
unsigned int exception;
/* Register descriptions */
/** Register descriptions */
struct xtensa_register *regs;
/* Number of registers */
/** Number of registers */
uint8_t num_regs;
/* Sequence number */
/** Sequence number */
uint8_t seqno;
/* Index in register descriptions of A0 register */
/** Index in register descriptions of A0 register */
uint8_t a0_idx;
/* Index in register descriptions of AR0 register */
/** Index in register descriptions of AR0 register */
uint8_t ar_idx;
/* Index in register descriptions of WINDOWBASE register */
/** Index in register descriptions of WINDOWBASE register */
uint8_t wb_idx;
};

View file

@ -130,6 +130,7 @@ static ALWAYS_INLINE void xtensa_irq_disable(uint32_t irq)
z_xt_ints_off(1 << irq);
}
/** Implementation of @ref arch_irq_lock. */
static ALWAYS_INLINE unsigned int arch_irq_lock(void)
{
unsigned int key;
@ -139,12 +140,14 @@ static ALWAYS_INLINE unsigned int arch_irq_lock(void)
return key;
}
/** Implementation of @ref arch_irq_unlock. */
static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
{
__asm__ volatile("wsr.ps %0; rsync"
:: "r"(key) : "memory");
}
/** Implementation of @ref arch_irq_unlocked. */
static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)
{
return (key & 0xf) == 0; /* INTLEVEL field */