intel_adsp: add a new series to support Meteorlake

Meteorlake support as part of the Intel ADSP family.

Signed-off-by: Anas Nashif <anas.nashif@intel.com>
Co-authored-by: Michal Wasko <michal.wasko@intel.com>
Co-authored-by: Konrad Leszczynski <konrad.leszczynski@intel.com>
Co-authored-by: Rafal Redzimski <rafal.f.redzimski@intel.com>
Co-authored-by: Enjia Mai <enjia.mai@intel.com>
Co-authored-by: Flavio Ceolin <flavio.ceolin@intel.com>
Co-authored-by: Tomasz Leman <tomasz.m.leman@intel.com>
Co-authored-by: Bonislawski Adrian <adrian.bonislawski@intel.com>
Co-authored-by: Serhiy Katsyuba <serhiy.katsyuba@intel.com>
Co-authored-by: Andrey Borisovich <andrey.borisovich@intel.com>
This commit is contained in:
Anas Nashif 2022-01-28 15:19:19 -08:00
parent 252f4052a6
commit b330a05539
20 changed files with 2068 additions and 2 deletions

View file

@ -46,6 +46,8 @@ source "drivers/dma/Kconfig.iproc_pax"
source "drivers/dma/Kconfig.cavs_gpdma"
source "drivers/dma/Kconfig.ace_gpdma"
source "drivers/dma/Kconfig.cavs_hda"
endif # DMA

View file

@ -0,0 +1,10 @@
# DMA configuration options
# Copyright (c) 2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
config DMA_ACE_GPDMA
bool "ACE General Purpose Direct Memory Access driver"
default $(dt_compat_enabled,$(DT_COMPAT_DMA_ACE_GPDMA))
help
Intel ACE DMA driver.

View file

@ -0,0 +1,295 @@
/*
* Copyright (c) 2022 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <xtensa/xtensa.dtsi>
#include <mem.h>
/ {
cpus {
#address-cells = <1>;
#size-cells = <0>;
cpu0: cpu@0 {
device_type = "cpu";
compatible = "cdns,tensilica-xtensa-lx7";
reg = <0>;
cpu-power-states = <&idle &suspend &off>;
};
cpu1: cpu@1 {
device_type = "cpu";
compatible = "cdns,tensilica-xtensa-lx7";
reg = <1>;
cpu-power-states = <&idle &suspend &off>;
};
cpu2: cpu@2 {
device_type = "cpu";
compatible = "cdns,tensilica-xtensa-lx7";
reg = <2>;
cpu-power-states = <&idle &suspend &off>;
};
};
power-states {
idle: idle {
compatible = "zephyr,power-state";
power-state-name = "runtime-idle";
min-residency-us = <0>;
exit-latency-us = <0>;
};
suspend: suspend {
compatible = "zephyr,power-state";
power-state-name = "suspend-to-idle";
min-residency-us = <200>;
exit-latency-us = <100>;
};
/* PM_STATE_SOFT_OFF can be entered only by calling pm_state_force.
* The procedure is triggered by IPC from the HOST (SET_DX).
*/
off: off {
compatible = "zephyr,power-state";
power-state-name = "soft-off";
min-residency-us = <2147483647>;
exit-latency-us = <0>;
};
};
sram0: memory@a0020000 {
device_type = "memory";
compatible = "mmio-sram";
reg = <0xa0020000 DT_SIZE_K(2816)>;
};
sram1: memory@a0000000 {
device_type = "memory";
compatible = "mmio-sram";
reg = <0xa0000000 DT_SIZE_K(64)>;
};
sysclk: system-clock {
compatible = "fixed-clock";
clock-frequency = <38400000>;
#clock-cells = <0>;
};
audioclk: audio-clock {
compatible = "fixed-clock";
clock-frequency = <24576000>;
#clock-cells = <0>;
};
pllclk: pll-clock {
compatible = "fixed-clock";
clock-frequency = <96000000>;
#clock-cells = <0>;
};
IMR1: memory@A1000000 {
compatible = "intel,adsp-imr";
reg = <0xA1000000 DT_SIZE_M(16)>;
block-size = <0x1000>;
zephyr,memory-region = "IMR1";
};
soc {
core_intc: core_intc@0 {
compatible = "cdns,xtensa-core-intc";
reg = <0x00 0x400>;
interrupt-controller;
#interrupt-cells = <3>;
};
/* This is actually an array of per-core designware
* controllers, but the special setup and extra
* masking layer makes it easier for MTL to handle
* this internally.
*/
ace_intc: ace_intc@7ac00 {
compatible = "intel,ace-intc";
reg = <0x7ac00 0xc00>;
interrupt-controller;
#interrupt-cells = <3>;
interrupts = <4 0 0>;
num-irqs = <28>;
interrupt-parent = <&core_intc>;
label = "ACE_0";
};
shim: shim@71f00 {
compatible = "intel,cavs-shim";
reg = <0x71f00 0x100>;
};
sspbase: ssp_base@28800 {
compatible = "intel,ssp-sspbase";
reg = <0x28800 0x1000>;
};
win: win@70200 {
compatible = "intel,cavs-win";
reg = <0x70200 0x30>;
};
tlb: tlb@17e000 {
compatible = "intel,adsp-tlb";
reg = <0x17e000 0x1000>;
};
lpgpdma0: dma@7c000 {
compatible = "intel,ace-gpdma";
#dma-cells = <1>;
reg = <0x0007c000 0x1000>;
shim = <0x0007c800 0x1000>;
interrupts = <0x10 0 0>;
interrupt-parent = <&core_intc>;
label = "DMA_0";
status = "okay";
};
lpgpdma1: dma@7d000 {
compatible = "intel,ace-gpdma";
#dma-cells = <1>;
reg = <0x0007d000 0x1000>;
shim = <0x0007d800 0x1000>;
interrupts = <0x20 0 0>;
interrupt-parent = <&core_intc>;
label = "DMA_1";
status = "okay";
};
ssp0:ssp@28000 {
compatible = "intel,ssp-dai";
#address-cells = <1>;
#size-cells = <0>;
reg = <0x00028000 0x1000
0x00079C00 0x200>;
interrupts = <0x00 0 0>;
interrupt-parent = <&ace_intc>;
dmas = <&lpgpdma0 2
&lpgpdma0 3>;
dma-names = "tx", "rx";
label = "SSP_0";
status = "okay";
};
ssp1:ssp@29000 {
compatible = "intel,ssp-dai";
#address-cells = <1>;
#size-cells = <0>;
reg = <0x00029000 0x1000
0x00079C00 0x200>;
interrupts = <0x01 0 0>;
interrupt-parent = <&ace_intc>;
dmas = <&lpgpdma0 4
&lpgpdma0 5>;
dma-names = "tx", "rx";
label = "SSP_1";
status = "okay";
};
ssp2:ssp@2a000 {
compatible = "intel,ssp-dai";
#address-cells = <1>;
#size-cells = <0>;
reg = <0x0002a000 0x1000
0x00079C00 0x200>;
interrupts = <0x02 0 0>;
interrupt-parent = <&ace_intc>;
dmas = <&lpgpdma0 6
&lpgpdma0 7>;
dma-names = "tx", "rx";
label = "SSP_2";
status = "okay";
};
ssp3:ssp@2b000 {
compatible = "intel,ssp-dai";
#address-cells = <1>;
#size-cells = <0>;
reg = <0x0002b000 0x1000
0x00079C00 0x200>;
interrupts = <0x03 0 0>;
interrupt-parent = <&ace_intc>;
dmas = <&lpgpdma0 8
&lpgpdma0 9>;
dma-names = "tx", "rx";
label = "SSP_3";
status = "okay";
};
ssp4:ssp@2c000 {
compatible = "intel,ssp-dai";
#address-cells = <1>;
#size-cells = <0>;
reg = <0x0002c000 0x1000
0x00079C00 0x200>;
interrupts = <0x04 0 0>;
interrupt-parent = <&ace_intc>;
dmas = <&lpgpdma0 10
&lpgpdma0 11>;
dma-names = "tx", "rx";
label = "SSP_4";
status = "okay";
};
ssp5:ssp@2d000 {
compatible = "intel,ssp-dai";
#address-cells = <1>;
#size-cells = <0>;
reg = <0x0002d000 0x1000
0x00079C00 0x200>;
interrupts = <0x04 0 0>;
interrupt-parent = <&ace_intc>;
dmas = <&lpgpdma0 12
&lpgpdma0 13>;
dma-names = "tx", "rx";
label = "SSP_5";
status = "okay";
};
hda_host_out: dma@72800 {
compatible = "intel,cavs-hda-host-out";
#dma-cells = <1>;
reg = <0x00072800 0x40>;
dma-channels = <9>;
dma-buf-alignment = <128>;
label = "HDA_HOST_OUT";
status = "okay";
};
hda_host_in: dma@72c00 {
compatible = "intel,cavs-hda-host-in";
#dma-cells = <1>;
reg = <0x00072c00 0x40>;
dma-channels = <10>;
dma-buf-alignment = <128>;
label = "HDA_HOST_IN";
status = "okay";
};
hda_link_out: dma@72400 {
compatible = "intel,cavs-hda-link-out";
#dma-cells = <1>;
reg = <0x00072400 0x40>;
dma-channels = <9>;
dma-buf-alignment = <128>;
label = "HDA_LINK_OUT";
status = "okay";
};
hda_link_in: dma@72600 {
compatible = "intel,cavs-hda-link-in";
#dma-cells = <1>;
reg = <0x00072600 0x40>;
dma-channels = <10>;
dma-buf-alignment = <128>;
label = "HDA_LINK_IN";
status = "okay";
};
};
};

View file

@ -1,7 +1,9 @@
# Intel CAVS SoC family CMake file
# Intel ADSP SoCs family CMake file
#
# Copyright (c) 2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
add_subdirectory(common)
if(CONFIG_SOC_SERIES_INTEL_ACE1X)
add_subdirectory(ace_v1x)
endif()

View file

@ -0,0 +1,13 @@
# Intel ACE SoC family CMake file
#
# Copyright (c) 2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
zephyr_library_sources(
soc.c
multiprocessing.c
irq.c
boot.c
rimage_modules.c
power_down.S
)

View file

@ -0,0 +1,78 @@
# Copyright (c) 2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
if SOC_SERIES_INTEL_ACE1X
config SOC_SERIES
string
default "ace_v1x"
config SOC_TOOLCHAIN_NAME
string
default "intel_s1000"
config SOC
string
default "intel_ace15_mtpm"
config IMR_MANIFEST_ADDR
default 0xa1042000
config MP_NUM_CPUS
default 3
config SMP
default y
# MTL leaves the upper mapping in the same spot as cAVS, but moves the
# lower one inexplicably.
config XTENSA_UNCACHED_REGION
default 2
# Parameters for gen_isr_tables.py:
config 2ND_LVL_INTR_00_OFFSET
default 4
config MULTI_LEVEL_INTERRUPTS
default y
config MAX_IRQ_PER_AGGREGATOR
default 29
config NUM_2ND_LEVEL_AGGREGATORS
default 1
config 2ND_LVL_ISR_TBL_OFFSET
default 9
config 2ND_LEVEL_INTERRUPTS
default y
config XTENSA_TIMER
default n
config XTENSA_TIMER_ID
default 0
config MTL_TIMER
default y
config SYS_CLOCK_HW_CYCLES_PER_SEC
default 400000000 if XTENSA_TIMER
default 19200000 if MTL_TIMER
config SYS_CLOCK_TICKS_PER_SEC
default 50000
config DYNAMIC_INTERRUPTS
default y
if LOG
config LOG_BACKEND_ADSP
default y
endif # LOG
endif # SOC_SERIES_INTEL_ACE1X

View file

@ -0,0 +1,14 @@
# Copyright (c) 2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
config SOC_SERIES_INTEL_ACE1X
bool "Intel ACE 1.x"
select SOC_FAMILY_INTEL_ADSP
select XTENSA
select XTENSA_HAL if "$(ZEPHYR_TOOLCHAIN_VARIANT)" != "xcc"
select ATOMIC_OPERATIONS_BUILTIN if "$(ZEPHYR_TOOLCHAIN_VARIANT)" != "xcc"
select ARCH_HAS_COHERENCE
select SCHED_IPI_SUPPORTED
select DW_ICTL_ACE_V1X
help
Intel ACE 1.x

View file

@ -0,0 +1,11 @@
# Copyright (c) 2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
choice
prompt "Intel ADSP SoC Selection"
config SOC_INTEL_ACE15_MTPM
bool "ACE 1.5 Meteor PCH M"
depends on SOC_SERIES_INTEL_ACE1X
endchoice

View file

@ -0,0 +1,140 @@
/* Copyright (c) 2022 Intel Corporation
* SPDX-License-Identifier: Apache-2.0
*/
/*
* THIS FILE WAS AUTOMATICALLY GENERATED. DO NOT EDIT.
*
* Functions here are designed to produce efficient code to
* search an Xtensa bitmask of interrupts, inspecting only those bits
* declared to be associated with a given interrupt level. Each
* dispatcher will handle exactly one flagged interrupt, in numerical
* order (low bits first) and will return a mask of that bit that can
* then be cleared by the calling code. Unrecognized bits for the
* level will invoke an error handler.
*/
#include <xtensa/config/core-isa.h>
#include <sys/util.h>
#include <sw_isr_table.h>
#if !defined(XCHAL_INT0_LEVEL) || XCHAL_INT0_LEVEL != 1
#error core-isa.h interrupt level does not match dispatcher!
#endif
#if !defined(XCHAL_INT1_LEVEL) || XCHAL_INT1_LEVEL != 1
#error core-isa.h interrupt level does not match dispatcher!
#endif
#if !defined(XCHAL_INT2_LEVEL) || XCHAL_INT2_LEVEL != 2
#error core-isa.h interrupt level does not match dispatcher!
#endif
#if !defined(XCHAL_INT3_LEVEL) || XCHAL_INT3_LEVEL != 2
#error core-isa.h interrupt level does not match dispatcher!
#endif
#if !defined(XCHAL_INT4_LEVEL) || XCHAL_INT4_LEVEL != 2
#error core-isa.h interrupt level does not match dispatcher!
#endif
#if !defined(XCHAL_INT5_LEVEL) || XCHAL_INT5_LEVEL != 3
#error core-isa.h interrupt level does not match dispatcher!
#endif
#if !defined(XCHAL_INT6_LEVEL) || XCHAL_INT6_LEVEL != 3
#error core-isa.h interrupt level does not match dispatcher!
#endif
#if !defined(XCHAL_INT7_LEVEL) || XCHAL_INT7_LEVEL != 3
#error core-isa.h interrupt level does not match dispatcher!
#endif
#if !defined(XCHAL_INT8_LEVEL) || XCHAL_INT8_LEVEL != 5
#error core-isa.h interrupt level does not match dispatcher!
#endif
static inline int _xtensa_handle_one_int1(unsigned int mask)
{
int irq;
if (mask & BIT(0)) {
mask = BIT(0);
irq = 0;
goto handle_irq;
}
if (mask & BIT(1)) {
mask = BIT(1);
irq = 1;
goto handle_irq;
}
return 0;
handle_irq:
_sw_isr_table[irq].isr(_sw_isr_table[irq].arg);
return mask;
}
static inline int _xtensa_handle_one_int2(unsigned int mask)
{
int irq;
if (mask & BIT(2)) {
mask = BIT(2);
irq = 2;
goto handle_irq;
}
if (mask & BIT(3)) {
mask = BIT(3);
irq = 3;
goto handle_irq;
}
if (mask & BIT(4)) {
mask = BIT(4);
irq = 4;
goto handle_irq;
}
return 0;
handle_irq:
_sw_isr_table[irq].isr(_sw_isr_table[irq].arg);
return mask;
}
static inline int _xtensa_handle_one_int3(unsigned int mask)
{
int irq;
if (mask & BIT(5)) {
mask = BIT(5);
irq = 5;
goto handle_irq;
}
if (mask & BIT(6)) {
mask = BIT(6);
irq = 6;
goto handle_irq;
}
if (mask & BIT(7)) {
mask = BIT(7);
irq = 7;
goto handle_irq;
}
return 0;
handle_irq:
_sw_isr_table[irq].isr(_sw_isr_table[irq].arg);
return mask;
}
static inline int _xtensa_handle_one_int5(unsigned int mask)
{
int irq;
if (mask & BIT(8)) {
mask = BIT(8);
irq = 8;
goto handle_irq;
}
return 0;
handle_irq:
_sw_isr_table[irq].isr(_sw_isr_table[irq].arg);
return mask;
}
static inline int _xtensa_handle_one_int0(unsigned int mask)
{
return 0;
}
static inline int _xtensa_handle_one_int4(unsigned int mask)
{
return 0;
}

View file

@ -0,0 +1,62 @@
/* Copyright (c) 2022 Intel Corporation
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_SOC_INTEL_ADSP_ACE_IPC_REGS_H
#define ZEPHYR_SOC_INTEL_ADSP_ACE_IPC_REGS_H
/**
* @remark Inter Processor Communication:
* Used for sending interrupts to and receiving them from another
* device. ACE uses it to talk to the host and the CSME. In general
* there is one of these blocks instantiated for each endpoint of a
* connection.
*/
struct cavs_ipc {
#ifdef CONFIG_SOC_SERIES_INTEL_ACE1X
uint32_t tdr;
uint32_t tda;
uint32_t unused0[2];
uint32_t idr;
uint32_t ida;
uint32_t unused1[2];
uint32_t cst;
uint32_t csr;
uint32_t ctl;
uint32_t cap;
uint32_t unused2[52];
uint32_t tdd;
uint32_t unused3[31];
uint32_t idd;
#endif
};
#define CAVS_IPC_BUSY BIT(31)
#define CAVS_IPC_DONE BIT(31)
#define CAVS_IPC_CTL_TBIE BIT(0)
#define CAVS_IPC_CTL_IDIE BIT(1)
/**
* @remark MTL provides an array of six IPC endpoints to be used for
* peer-to-peer communication between DSP cores. This is organized as
* two "agents" (A and B) each wired to its own interrupt. Each agent,
* has three numbered endpoints (0,1,2), each of which is paired with
* the equivalent endpoint on the other agent.
* So a given endpoint on an agent (A=0/B=1) can be found via:
* MTL_P2P_IPC[endpoint].agents[agent].ipc
*/
struct mtl_p2p_ipc {
union {
int8_t unused[512];
struct cavs_ipc ipc;
} agents[2];
};
/**
* @brief This register is for intra DSP communication, among multiple Tensilica Cores.
* @todo Move to Devicetree.
*/
#define IDCC_REG 0x70400
#define MTL_P2P_IPC ((volatile struct mtl_p2p_ipc *)IDCC_REG)
#endif /* ZEPHYR_SOC_INTEL_ADSP_ACE_IPC_REGS_H */

View file

@ -0,0 +1,507 @@
/*
* Copyright (c) 2022 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Linker command/script file
*
* Linker script for the intel_apl_adsp platform
*/
OUTPUT_ARCH(xtensa)
#include <zephyr/devicetree.h>
#include <xtensa/config/core-isa.h>
#include <zephyr/linker/sections.h>
#include <cavs-vectors.h>
#include <cavs-mem.h>
#include <zephyr/linker/linker-defs.h>
#include <zephyr/linker/linker-tool.h>
ENTRY(rom_entry);
/* DSP RAM regions (all of them) are mapped twice on the DSP. One
* mapping is set up to bypass the L1 cache, so it must be used when
* multiprocessor coherence is desired, where the latter mapping is
* best used for processor-local data (e.g. stacks) or shared data
* that is managed with explicit cache flush/invalidate operations.
*
* These macros will set up a segment start address correctly,
* including alignment to a cache line. Be sure to also emit the
* section to ">ram" or ">ucram" as appropriate, to prevent the linker
* from filling in 512MB of sparse zeros.
*/
#ifdef CONFIG_KERNEL_COHERENCE
#define RPO_SET(addr, reg) ((addr & 0x1fffffff) | (reg << 29))
#define SEGSTART_CACHED RPO_SET(ALIGN(64), CONFIG_XTENSA_CACHED_REGION)
#define SEGSTART_UNCACHED RPO_SET(ALIGN(64), CONFIG_XTENSA_UNCACHED_REGION)
#else
#define SEGSTART_CACHED .
#define SEGSTART_UNCACHED .
#define ucram ram
#endif
/* intlist.ld needs an IDT_LIST memory region */
#define IDT_BASE 0xe0000000
#define IDT_SIZE 0x2000
/* rimage module sections are C struct data, and thus flagged ALLOC.
* The xcc linker demands they be in a declared memory region even if
* the enclosing output section is (NOLOAD). Put them here.
*/
#define NOLOAD_BASE 0x20000
#define NOLOAD_SIZE 0x100000
MEMORY {
vector_base_text :
org = XCHAL_VECBASE_RESET_PADDR_SRAM,
len = MEM_VECBASE_LIT_SIZE
vector_int2_lit :
org = XCHAL_INTLEVEL2_VECTOR_PADDR_SRAM - MEM_VECT_LIT_SIZE,
len = MEM_VECT_LIT_SIZE
vector_int2_text :
org = XCHAL_INTLEVEL2_VECTOR_PADDR_SRAM,
len = MEM_VECT_TEXT_SIZE
vector_int3_lit :
org = XCHAL_INTLEVEL3_VECTOR_PADDR_SRAM - MEM_VECT_LIT_SIZE,
len = MEM_VECT_LIT_SIZE
vector_int3_text :
org = XCHAL_INTLEVEL3_VECTOR_PADDR_SRAM,
len = MEM_VECT_TEXT_SIZE
vector_int4_lit :
org = XCHAL_INTLEVEL4_VECTOR_PADDR_SRAM - MEM_VECT_LIT_SIZE,
len = MEM_VECT_LIT_SIZE
vector_int4_text :
org = XCHAL_INTLEVEL4_VECTOR_PADDR_SRAM,
len = MEM_VECT_TEXT_SIZE
vector_int5_lit :
org = XCHAL_INTLEVEL5_VECTOR_PADDR_SRAM - MEM_VECT_LIT_SIZE,
len = MEM_VECT_LIT_SIZE
vector_int5_text :
org = XCHAL_INTLEVEL5_VECTOR_PADDR_SRAM,
len = MEM_VECT_TEXT_SIZE
vector_int6_lit :
org = XCHAL_INTLEVEL6_VECTOR_PADDR_SRAM - MEM_VECT_LIT_SIZE,
len = MEM_VECT_LIT_SIZE
vector_int6_text :
org = XCHAL_INTLEVEL6_VECTOR_PADDR_SRAM,
len = MEM_VECT_TEXT_SIZE
vector_int7_lit :
org = XCHAL_INTLEVEL7_VECTOR_PADDR_SRAM - MEM_VECT_LIT_SIZE,
len = MEM_VECT_LIT_SIZE
vector_int7_text :
org = XCHAL_INTLEVEL7_VECTOR_PADDR_SRAM,
len = MEM_VECT_TEXT_SIZE
vector_kernel_lit :
org = XCHAL_KERNEL_VECTOR_PADDR_SRAM - MEM_VECT_LIT_SIZE,
len = MEM_VECT_LIT_SIZE
vector_kernel_text :
org = XCHAL_KERNEL_VECTOR_PADDR_SRAM,
len = MEM_VECT_TEXT_SIZE
vector_user_lit :
org = XCHAL_USER_VECTOR_PADDR_SRAM - MEM_VECT_LIT_SIZE,
len = MEM_VECT_LIT_SIZE
vector_user_text :
org = XCHAL_USER_VECTOR_PADDR_SRAM,
len = MEM_VECT_TEXT_SIZE
vector_double_lit :
org = XCHAL_DOUBLEEXC_VECTOR_PADDR_SRAM - MEM_VECT_LIT_SIZE,
len = MEM_VECT_LIT_SIZE
vector_double_text :
org = XCHAL_DOUBLEEXC_VECTOR_PADDR_SRAM,
len = MEM_VECT_TEXT_SIZE
imr :
org = IMR_BOOT_LDR_TEXT_ENTRY_BASE,
len = 0x100000
ram :
org = RAM_BASE,
len = RAM_SIZE
#ifdef CONFIG_KERNEL_COHERENCE
ucram :
org = RPO_SET(RAM_BASE, CONFIG_XTENSA_UNCACHED_REGION),
len = RAM_SIZE
#endif
#ifdef CONFIG_GEN_ISR_TABLES
IDT_LIST :
org = IDT_BASE,
len = IDT_SIZE
#endif
lpram :
org = LP_SRAM_BASE,
len = LP_SRAM_SIZE
noload :
org = NOLOAD_BASE,
len = NOLOAD_SIZE
}
SECTIONS {
/* Boot loader code in IMR memory */
.imr : {
_imr_start = .;
/* Entry point MUST be here per external configuration */
KEEP (*(.boot_entry.text))
*(.imr .imr.*)
} >imr
/* Boot loader data. Note that rimage seems to want this
* page-aligned or it will throw an error, not sure why since all
* the ROM cares about is a contiguous region. And it's
* particularly infuriating as it precludes linker .rodata next to
* .text.
*/
.imrdata : ALIGN(4096) {
*(.imrdata .imrdata.*)
_imr_end = .;
} >imr
.WindowVectors.text : {
_WindowVectors_text_start = .;
KEEP (*(.WindowVectors.text))
_WindowVectors_text_end = .;
} >vector_base_text
.Level2InterruptVector.literal : {
_Level2InterruptVector_literal_start = .;
*(.Level2InterruptVector.literal)
_Level2InterruptVector_literal_end = .;
} >vector_int2_lit
.Level2InterruptVector.text : {
_Level2InterruptVector_text_start = .;
KEEP (*(.Level2InterruptVector.text))
_Level2InterruptVector_text_end = .;
} >vector_int2_text
.Level3InterruptVector.literal : {
_Level3InterruptVector_literal_start = .;
*(.Level3InterruptVector.literal)
_Level3InterruptVector_literal_end = .;
} >vector_int3_lit
.Level3InterruptVector.text : {
_Level3InterruptVector_text_start = .;
KEEP (*(.Level3InterruptVector.text))
_Level3InterruptVector_text_end = .;
} >vector_int3_text
.Level4InterruptVector.literal : {
_Level4InterruptVector_literal_start = .;
*(.Level4InterruptVector.literal)
_Level4InterruptVector_literal_end = .;
} >vector_int4_lit
.Level4InterruptVector.text : {
_Level4InterruptVector_text_start = .;
KEEP (*(.Level4InterruptVector.text))
_Level4InterruptVector_text_end = .;
} >vector_int4_text
.Level5InterruptVector.literal : {
_Level5InterruptVector_literal_start = .;
*(.Level5InterruptVector.literal)
_Level5InterruptVector_literal_end = .;
} >vector_int5_lit
.Level5InterruptVector.text : {
_Level5InterruptVector_text_start = .;
KEEP (*(.Level5InterruptVector.text))
_Level5InterruptVector_text_end = .;
} >vector_int5_text
.DebugExceptionVector.literal : {
_DebugExceptionVector_literal_start = .;
*(.DebugExceptionVector.literal)
_DebugExceptionVector_literal_end = .;
} >vector_int6_lit
.DebugExceptionVector.text : {
_DebugExceptionVector_text_start = .;
KEEP (*(.DebugExceptionVector.text))
_DebugExceptionVector_text_end = .;
} >vector_int6_text
.NMIExceptionVector.literal : {
_NMIExceptionVector_literal_start = .;
*(.NMIExceptionVector.literal)
_NMIExceptionVector_literal_end = .;
} >vector_int7_lit
.NMIExceptionVector.text : {
_NMIExceptionVector_text_start = .;
KEEP (*(.NMIExceptionVector.text))
_NMIExceptionVector_text_end = .;
} >vector_int7_text
.KernelExceptionVector.literal : {
_KernelExceptionVector_literal_start = .;
*(.KernelExceptionVector.literal)
_KernelExceptionVector_literal_end = .;
} >vector_kernel_lit
.KernelExceptionVector.text : {
_KernelExceptionVector_text_start = .;
KEEP (*(.KernelExceptionVector.text))
_KernelExceptionVector_text_end = .;
} >vector_kernel_text
.UserExceptionVector.literal : {
_UserExceptionVector_literal_start = .;
*(.UserExceptionVector.literal)
_UserExceptionVector_literal_end = .;
} >vector_user_lit
.UserExceptionVector.text : {
_UserExceptionVector_text_start = .;
KEEP (*(.UserExceptionVector.text))
_UserExceptionVector_text_end = .;
} >vector_user_text
.DoubleExceptionVector.literal : {
_DoubleExceptionVector_literal_start = .;
*(.DoubleExceptionVector.literal)
_DoubleExceptionVector_literal_end = .;
} >vector_double_lit
.DoubleExceptionVector.text : {
_DoubleExceptionVector_text_start = .;
KEEP (*(.DoubleExceptionVector.text))
_DoubleExceptionVector_text_end = .;
} >vector_double_text
.text : {
_text_start = .;
*(.iram1 .iram1.*)
*(.entry.text)
*(.init.literal)
*(.iram0.text)
KEEP(*(.init))
KEEP(*(.lps_vector))
*(.literal .text .literal.* .text.* .stub .gnu.warning .gnu.linkonce.literal.* .gnu.linkonce.t.*.literal .gnu.linkonce.t.*)
*(.fini.literal)
KEEP(*(.fini))
*(.gnu.version)
_text_end = .;
} >ram
.rodata : ALIGN(4096)
{
_rodata_start = .;
*(.rodata)
*(.rodata.*)
*(.gnu.linkonce.r.*)
*(.rodata1)
. = ALIGN(4);
#include <snippets-rodata.ld>
__XT_EXCEPTION_TABLE__ = .;
KEEP (*(.xt_except_table))
KEEP (*(.gcc_except_table .gcc_except_table.*))
*(.gnu.linkonce.e.*)
*(.gnu.version_r)
KEEP (*(.eh_frame))
KEEP (*crtbegin.o(.ctors))
KEEP (*(EXCLUDE_FILE (*crtend.o) .ctors))
KEEP (*(SORT(.ctors.*)))
KEEP (*(.ctors))
KEEP (*crtbegin.o(.dtors))
KEEP (*(EXCLUDE_FILE (*crtend.o) .dtors))
KEEP (*(SORT(.dtors.*)))
KEEP (*(.dtors))
__XT_EXCEPTION_DESCS__ = .;
*(.xt_except_desc)
*(.gnu.linkonce.h.*)
__XT_EXCEPTION_DESCS_END__ = .;
*(.xt_except_desc_end)
*(.dynamic)
*(.gnu.version_d)
_bss_table_start = .;
LONG(_bss_start)
LONG(_bss_end)
_bss_table_end = .;
_rodata_end = .;
} >ram
.module_init : {
_module_init_start = .;
*(*.initcall)
_module_init_end = .;
} >ram
#define RAMABLE_REGION ram
#define ROMABLE_REGION ram
#include <zephyr/linker/common-rom.ld>
.fw_ready : {
KEEP(*(".fw_ready"));
KEEP (*(.fw_ready_metadata))
} >ram
.noinit SEGSTART_UNCACHED : {
*(.noinit)
*(.noinit.*)
} >ucram
.data SEGSTART_UNCACHED : {
_data_start = .;
*(.data)
*(.data.*)
*(.gnu.linkonce.d.*)
KEEP(*(.gnu.linkonce.d.*personality*))
*(.data1)
*(.sdata)
*(.sdata.*)
*(.gnu.linkonce.s.*)
*(.sdata2)
*(.sdata2.*)
*(.gnu.linkonce.s2.*)
KEEP(*(.jcr))
_trace_ctx_start = ABSOLUTE(.);
*(.trace_ctx)
_trace_ctx_end = ABSOLUTE(.);
*(.gna_model)
_data_end = .;
} >ucram
.lit4 SEGSTART_CACHED : {
_lit4_start = .;
*(*.lit4)
*(.lit4.*)
*(.gnu.linkonce.lit4.*)
_lit4_end = .;
} >ram
/* These values need to change in our scheme, where the common-ram
* sections need to be linked in safe/uncached memory but common-rom
* wants to use the cache
*/
. = SEGSTART_UNCACHED;
#undef RAMABLE_REGION
#undef ROMABLE_REGION
#define RAMABLE_REGION ucram
#define ROMABLE_REGION ucram
#include <zephyr/linker/common-ram.ld>
.tm_clone_table : {
*(.tm_clone_table)
} >ram
/* This section is cached. By default it contains only declared
* thread stacks, but applications can put symbols here too.
*/
.cached SEGSTART_CACHED : {
_cached_start = .;
*(.cached .cached.*)
_cached_end = .;
} >ram
/* Rimage requires 4k alignment between "DATA" and "BSS", can't do
* this in the section declaration below because we're also changing
* cacheability and that leaves a gap in the image large enough for
* binutils to decide to warn about (no way to turn that off, it
* seems, --warn-section-align is on by default)
*/
. = ALIGN(4096);
.bss SEGSTART_UNCACHED (NOLOAD) :
{
_bss_start = .;
*(.dynsbss)
*(.sbss)
*(.sbss.*)
*(.gnu.linkonce.sb.*)
*(.scommon)
*(.sbss2)
*(.sbss2.*)
*(.gnu.linkonce.sb2.*)
*(.dynbss)
*(.bss)
*(.bss.*)
*(.gnu.linkonce.b.*)
*(COMMON)
. = ALIGN(8);
_bss_end = .;
} >ucram
. = SEGSTART_UNCACHED;
_end = ALIGN(8);
/* Heap start and end markers. Used to reserve system heap memory. */
.heap_mem SEGSTART_UNCACHED (NOLOAD) :
{
_heap_start = .;
*(.heap_mem)
_heap_end = .;
} >ucram
.unused_ram_start_marker SEGSTART_CACHED (NOLOAD) :
{
. = ALIGN(4096);
_unused_ram_start_marker = .;
*(.unused_ram_start_marker)
*(.unused_ram_start_marker.*)
} >ram
. = L2_SRAM_BASE + L2_SRAM_SIZE;
. = SEGSTART_UNCACHED;
_heap_sentry = .;
/* dma buffers */
.lpbuf (NOLOAD): {
_dma_buf_start = .;
*(.dma_buffers)
_dma_buf_end = .;
} >lpram
/* Non-loadable sections below. Back to cached memory so
* the cache remap script doesn't try to move them around needlessly.
*/
. = SEGSTART_CACHED;
/* rimage module manifest headers */
.module.boot : { KEEP(*(.module.boot)) } >noload
.module.main : { KEEP(*(.module.main)) } >noload
.static_uuid_entries : {
*(*.static_uuids)
} >noload
.static_log_entries : {
*(*.static_log*)
} >noload
/* This is the "extended manifest" data (mostly versioning stuff)
* emitted by SOF and inspected by the kernel driver. It doesn't
* appear directly in the image, but rimage will parse and repack
* this into the output file header, so requires this be present
* even if empty. Alignment and padding to 16 bytes is required,
* otherwise rimage will complain about the size being wrong (which
* sounds like a struct should be declared packed somewhere...)
*/
.fw_metadata : ALIGN(16) {
KEEP (*(.fw_metadata))
. = ALIGN(16);
} >noload
#include <zephyr/linker/debug-sections.ld>
.xtensa.info 0 : { *(.xtensa.info) }
.xt.insn 0 : {
KEEP (*(.xt.insn))
KEEP (*(.gnu.linkonce.x.*))
}
.xt.prop 0 : {
KEEP (*(.xt.prop))
KEEP (*(.xt.prop.*))
KEEP (*(.gnu.linkonce.prop.*))
}
.xt.lit 0 : {
KEEP (*(.xt.lit))
KEEP (*(.xt.lit.*))
KEEP (*(.gnu.linkonce.p.*))
}
.xt.profile_range 0 : {
KEEP (*(.xt.profile_range))
KEEP (*(.gnu.linkonce.profile_range.*))
}
.xt.profile_ranges 0 : {
KEEP (*(.xt.profile_ranges))
KEEP (*(.gnu.linkonce.xt.profile_ranges.*))
}
.xt.profile_files 0 : {
KEEP (*(.xt.profile_files))
KEEP (*(.gnu.linkonce.xt.profile_files.*))
}
#ifdef CONFIG_GEN_ISR_TABLES
#include <zephyr/linker/intlist.ld>
#endif
}

View file

@ -0,0 +1,222 @@
/* Copyright (c) 2022 Intel Corporation
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef _ZEPHYR_SOC_INTEL_ADSP_ACE_V1X_REGS_H_
#define _ZEPHYR_SOC_INTEL_ADSP_ACE_V1X_REGS_H_
#include <stdint.h>
#include <xtensa/config/core-isa.h>
/* Core power and boot control block */
struct mtl_pwrboot {
struct {
uint32_t cap;
uint32_t ctl;
} capctl[3];
uint32_t unused0[10];
struct {
uint32_t brcap;
uint32_t wdtcs;
uint32_t wdtipptr;
uint32_t unused1;
uint32_t bctl;
uint32_t baddr;
uint32_t battr;
uint32_t unused2;
} bootctl[3];
};
#define MTL_PWRBOOT_CTL_SPA BIT(0)
#define MTL_PWRBOOT_CTL_CPA BIT(8)
#define MTL_PWRBOOT_BCTL_BYPROM BIT(0)
#define MTL_PWRBOOT_BCTL_WAITIPCG BIT(16)
#define MTL_PWRBOOT_BCTL_WAITIPPG BIT(17)
#define MTL_PWRBOOT (*(volatile struct mtl_pwrboot *)0x00178d00)
struct clk64 {
uint32_t lo;
uint32_t hi;
};
/* Timers & Time Stamping register block */
struct mtl_tts {
uint32_t ttscap;
uint32_t unused0;
struct clk64 rtcwc;
uint16_t wcctl;
uint16_t wcsts;
uint32_t unused1;
struct clk64 wcav;
struct clk64 wc;
uint32_t wctcs;
uint32_t unused2;
struct clk64 wctc[2];
};
/* FIXME: devicetree */
#define MTL_TTS (*(volatile struct mtl_tts *)0x72000)
/* Low priority interrupt indices */
#define MTL_INTL_HIPC 0
#define MTL_INTL_SBIPC 1
#define MTL_INTL_ML 2
#define MTL_INTL_IDCA 3
#define MTL_INTL_LPVML 4
#define MTL_INTL_SHA 5
#define MTL_INTL_L1L2M 6
#define MTL_INTL_I2S 7
#define MTL_INTL_DMIC 8
#define MTL_INTL_SNDW 9
#define MTL_INTL_TTS 10
#define MTL_INTL_WDT 11
#define MTL_INTL_HDAHIDMA 12
#define MTL_INTL_HDAHODMA 13
#define MTL_INTL_HDALIDMA 14
#define MTL_INTL_HDALODMA 15
#define MTL_INTL_I3C 16
#define MTL_INTL_GPDMA 17
#define MTL_INTL_PWM 18
#define MTL_INTL_I2C 19
#define MTL_INTL_SPI 20
#define MTL_INTL_UART 21
#define MTL_INTL_GPIO 22
#define MTL_INTL_UAOL 23
#define MTL_INTL_IDCB 24
#define MTL_INTL_DCW 25
#define MTL_INTL_DTF 26
#define MTL_INTL_FLV 27
#define MTL_INTL_DPDMA 28
/* Device interrupt control for the low priority interrupts. It
* provides per-core masking and status checking: MTL_DINT is an array
* of these structs, one per core. The state is in the bottom bits,
* indexed by MTL_INTL_*. Note that some of these use more than one
* bit to discriminate sources (e.g. TTS's bits 0-2 are
* timestamp/comparator0/comparator1). It seems safe to write all 1's
* to the short to "just enable everything", but drivers should
* probably implement proper logic.
*
* Note that this block is independent of the Designware controller
* that manages the shared IRQ. Interrupts need to unmasked in both
* in order to be delivered to software. Per simulator source code,
* this is "upstream" of DW: an interrupt will not be latched into the
* status registers of the DW controller unless the IE bits here are
* set. That seems unlikely to correctly capture the hardware
* behavior (it would mean that the DW controller was being
* independently triggered multiple times by each core!). Beware.
*
* Enable an interrupt for a core with e.g.:
*
* MTL_DINT[core_id].ie[MTL_INTL_TTS] = 0xffff;
*/
struct mtl_dint {
uint16_t ie[32]; /* enable */
uint16_t is[32]; /* status (potentially masked by ie) */
uint16_t irs[32]; /* "raw" status (hardware input state) */
uint32_t unused[16];
};
/* FIXME: devicetree */
#define MTL_DINT ((volatile struct mtl_dint *)0x78840)
/* Convert between IRQ_CONNECT() numbers and MTL_INTL_* interrupts */
#define MTL_IRQ_TO_ZEPHYR(n) (XCHAL_NUM_INTERRUPTS + (n))
#define MTL_IRQ_FROM_ZEPHYR(n) ((n) - XCHAL_NUM_INTERRUPTS)
/* MTL also has per-core instantiations of a Synopsys interrupt
* controller. These inputs (with the same indices as MTL_INTL_*
* above) are downstream of the DINT layer, and must be independently
* masked/enabled. The core Zephyr intc_dw driver unfortunately
* doesn't understand this kind of MP implementation. Note also that
* as instantiated (there are only 28 sources), the high 32 bit
* registers don't exist and aren't named here. Access via e.g.:
*
* ACE_INTC[core_id].inten |= interrupt_bit;
*/
struct ace_intc {
uint32_t inten;
uint32_t unused0;
uint32_t intmask;
uint32_t unused1;
uint32_t intforce;
uint32_t unused2;
uint32_t rawstatus;
uint32_t unused3;
uint32_t status;
uint32_t unused4;
uint32_t maskstatus;
uint32_t unused5;
uint32_t finalstatus;
uint32_t unused6;
uint32_t vector;
uint32_t unused7[33];
uint32_t fiq_inten;
uint32_t fiq_intmask;
uint32_t fiq_intforce;
uint32_t fiq_rawstatus;
uint32_t fiq_status;
uint32_t fiq_finalstatus;
uint32_t plevel;
uint32_t unused8;
uint32_t dsp_ictl_version_id;
uint32_t unused9[199];
};
#define ACE_INTC ((volatile struct ace_intc *)DT_REG_ADDR(DT_NODELABEL(ace_intc)))
#define ACE_INTC_IRQ DT_IRQN(DT_NODELABEL(ace_intc))
/* L2 Local Memory Management */
struct mtl_l2mm {
uint32_t l2mcap;
uint32_t l2mpat;
uint32_t l2mecap;
uint32_t l2mecs;
uint32_t l2hsbpmptr;
uint32_t l2usbpmptr;
uint32_t l2usbmrpptr;
uint32_t l2ucmrpptr;
uint32_t l2ucmrpdptr;
};
#define MTL_L2MM ((volatile struct mtl_l2mm *)0x71d00)
/* DfL2MCAP */
struct mtl_l2mcap {
uint32_t l2hss : 8;
uint32_t l2uss : 4;
uint32_t l2hsbs : 4;
uint32_t l2hs2s : 8;
uint32_t l2usbs : 5;
uint32_t l2se : 1;
uint32_t el2se : 1;
uint32_t rsvd32 : 1;
};
#define MTL_L2MCAP ((volatile struct mtl_l2mcap *)0x71d00)
static inline uint32_t mtl_hpsram_get_bank_count(void)
{
return MTL_L2MCAP->l2hss;
}
static inline uint32_t mtl_lpsram_get_bank_count(void)
{
return MTL_L2MCAP->l2uss;
}
struct mtl_hpsram_regs {
/** @brief power gating control */
uint8_t HSxPGCTL;
/** @brief retention mode control */
uint8_t HSxRMCTL;
uint8_t reserved[2];
/** @brief power gating status */
uint8_t HSxPGISTS;
uint8_t reserved1[3];
};
#define HPSRAM_REGS(x) ((volatile struct mtl_hpsram_regs* const)(0x17A800 + 0x0008 * (x)))
#endif /* _ZEPHYR_SOC_INTEL_ADSP_ACE_V1X_REGS_H_ */

View file

@ -0,0 +1,76 @@
/* Copyright (c) 2022 Intel Corporation
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @brief Macros for power gating memory banks specific for ACE 1.0
*/
#ifndef __Z_ACE_LIB_ASM_MEMORY_MANAGEMENT_H__
#define __Z_ACE_LIB_ASM_MEMORY_MANAGEMENT_H__
#ifdef _ASMLANGUAGE
/* These definitions should be placed elsewhere, but I can't find a good place for them. */
#define LSPGCTL 0x71D80
#define MAX_MEMORY_SEGMENTS 1
#define EBB_SEGMENT_SIZE 32
#define PLATFORM_HPSRAM_EBB_COUNT 22
.macro m_ace_hpsram_power_change segment_index, mask, ax, ay, az, au, aw
.if \segment_index == 0
.if EBB_SEGMENT_SIZE > PLATFORM_HPSRAM_EBB_COUNT
.set i_end, PLATFORM_HPSRAM_EBB_COUNT
.else
.set i_end, EBB_SEGMENT_SIZE
.endif
.elseif PLATFORM_HPSRAM_EBB_COUNT >= EBB_SEGMENT_SIZE
.set i_end, PLATFORM_HPSRAM_EBB_COUNT - EBB_SEGMENT_SIZE
.else
.err
.endif
.set ebb_index, \segment_index << 5
.set i, 0 /* i = bank bit in segment */
rsr.sar \aw /* store old sar value */
movi \az, (0x17A800 + 0x0008 * ebb_index)/* SHIM_HSPGCTL(ebb_index) */
movi \au, i_end - 1 /* au = banks count in segment */
2 :
/* au = current bank in segment */
mov \ax, \mask /* ax = mask */
ssr \au
srl \ax, \ax /* ax >>= current bank */
extui \ax, \ax, 0, 1 /* ax &= BIT(1) */
s8i \ax, \az, 0 /* HSxPGCTL.l2lmpge = ax */
memw
1 :
l8ui \ay, \az, 4 /* ax=HSxPGISTS.l2lmpgis */
bne \ax, \ay, 1b /* wait till status==request */
addi \az, \az, 8
addi \au, \au, -1
bnez \au, 2b
wsr.sar \aw
.endm
.macro m_ace_lpsram_power_down_entire ax, ay, az, au
movi \au, 8 /* LPSRAM_EBB_QUANTITY */
movi \az, LSPGCTL
movi \ay, 1
2 :
s8i \ay, \az, 0
memw
1 :
l8ui \ax, \az, 4
bne \ax, \ay, 1b
addi \az, \az, 8
addi \au, \au, -1
bnez \au, 2b
.endm
#endif /* _ASMLANGUAGE */
#endif /* __Z_ACE_LIB_ASM_MEMORY_MANAGEMENT_H__ */

View file

@ -0,0 +1,208 @@
/* Copyright(c) 2022 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/devicetree.h>
#include <zephyr/arch/xtensa/cache.h>
#include <stddef.h>
#include <stdint.h>
#include <cavs-shim.h>
#include <soc.h>
#include <cavs-mem.h>
#include <cpu_init.h>
#include "manifest.h"
#include <ace_v1x-regs.h>
/* Important note about linkage:
*
* The C code here, starting from boot_core0(), is running entirely in
* IMR memory. The sram banks are not initialized yet and the Zephyr
* code is not yet copied there. No use of this memory is legal until
* after parse_manifest() returns. This means that all symbols in
* this file must be flagged "__imr" or "__imrdata" (or be guaranteed
* to inline via ALWAYS_INLINE, normal gcc "inline" is only a hint)!
*
* There's a similar note with Xtensa register windows: the Zephyr
* exception handles for window overflow are not present in IMR.
* While on existing systems, we start running with a VECBASE pointing
* to ROM handlers (that seem to work), it seems unsafe to rely on
* that. It's not possible to hit an overflow until at least four
* nested function calls, so this is mostly theoretical. Nonetheless
* care should be taken here to make sure the function tree remains
* shallow until SRAM initialization is finished.
*/
/* Various cAVS platform dependencies needed by the bootloader code.
* These probably want to migrate to devicetree.
*/
#define LPSRAM_MASK(x) 0x00000003
#define SRAM_BANK_SIZE (64 * 1024)
#define HOST_PAGE_SIZE 4096
#define MANIFEST_SEGMENT_COUNT 3
extern void soc_trace_init(void);
/* Initial/true entry point. Does nothing but jump to
* z_boot_asm_entry (which cannot be here, because it needs to be able
* to reference immediates which must link before it)
*/
__asm__(".pushsection .boot_entry.text, \"ax\" \n\t"
".global rom_entry \n\t"
"rom_entry: \n\t"
" j z_boot_asm_entry \n\t"
".popsection \n\t");
/* Entry stub. Sets up register windows and stack such that we can
* enter C code successfully, and calls boot_core0()
*/
#define STRINGIFY_MACRO(x) Z_STRINGIFY(x)
#define IMRSTACK STRINGIFY_MACRO(CONFIG_IMR_MANIFEST_ADDR)
__asm__(".section .imr.z_boot_asm_entry, \"x\" \n\t"
".align 4 \n\t"
"z_boot_asm_entry: \n\t"
" movi a0, 0x4002f \n\t"
" wsr a0, PS \n\t"
" movi a0, 0 \n\t"
" wsr a0, WINDOWBASE \n\t"
" movi a0, 1 \n\t"
" wsr a0, WINDOWSTART \n\t"
" rsync \n\t"
" movi a1, " IMRSTACK "\n\t"
" call4 boot_core0 \n\t");
static ALWAYS_INLINE void idelay(int n)
{
while (n--) {
__asm__ volatile("nop");
}
}
/* memcopy used by boot loader */
static ALWAYS_INLINE void bmemcpy(void *dest, void *src, size_t bytes)
{
uint32_t *d = dest;
uint32_t *s = src;
z_xtensa_cache_inv(src, bytes);
for (int i = 0; i < (bytes >> 2); i++)
d[i] = s[i];
z_xtensa_cache_flush(dest, bytes);
}
/* bzero used by bootloader */
static ALWAYS_INLINE void bbzero(void *dest, size_t bytes)
{
uint32_t *d = dest;
for (int i = 0; i < (bytes >> 2); i++)
d[i] = 0;
z_xtensa_cache_flush(dest, bytes);
}
static __imr void parse_module(struct sof_man_fw_header *hdr, struct sof_man_module *mod)
{
uint32_t bias;
/* each module has 3 segments */
for (int i = 0; i < MANIFEST_SEGMENT_COUNT; i++) {
switch (mod->segment[i].flags.r.type) {
case SOF_MAN_SEGMENT_TEXT:
case SOF_MAN_SEGMENT_DATA:
bias = mod->segment[i].file_offset -
SOF_MAN_ELF_TEXT_OFFSET;
/* copy from IMR to SRAM */
bmemcpy((void *)mod->segment[i].v_base_addr,
(uint8_t *)hdr + bias,
mod->segment[i].flags.r.length * HOST_PAGE_SIZE);
break;
case SOF_MAN_SEGMENT_BSS:
/* already bbzero'd by sram init */
break;
default:
/* ignore */
break;
}
}
}
#define MAN_SKIP_ENTRIES 1
/* parse FW manifest and copy modules */
__imr void parse_manifest(void)
{
struct sof_man_fw_desc *desc = (struct sof_man_fw_desc *)CONFIG_IMR_MANIFEST_ADDR;
struct sof_man_fw_header *hdr = &desc->header;
struct sof_man_module *mod;
z_xtensa_cache_inv(hdr, sizeof(*hdr));
/* copy module to SRAM - skip bootloader module */
for (int i = MAN_SKIP_ENTRIES; i < hdr->num_module_entries; i++) {
mod = desc->man_module + i;
z_xtensa_cache_inv(mod, sizeof(*mod));
parse_module(hdr, mod);
}
}
/**
* @brief Powers up a number of memory banks provided as an argument and
* gates remaining memory banks.
*/
static __imr void hp_sram_pm_banks(void)
{
uint32_t hpsram_ebb_quantity = mtl_hpsram_get_bank_count();
volatile uint32_t *l2hsbpmptr = (volatile uint32_t *)MTL_L2MM->l2hsbpmptr;
volatile uint8_t *status = (volatile uint8_t *)l2hsbpmptr + 4;
int inx, delay_count = 256;
for (inx = 0; inx < hpsram_ebb_quantity; ++inx) {
*(l2hsbpmptr + inx * 2) = 0;
}
for (inx = 0; inx < hpsram_ebb_quantity; ++inx) {
while (*(status + inx * 8) != 0) {
idelay(delay_count);
}
}
}
__imr void hp_sram_init(uint32_t memory_size)
{
hp_sram_pm_banks();
}
__imr void lp_sram_init(void)
{
uint32_t lpsram_ebb_quantity = mtl_lpsram_get_bank_count();
volatile uint32_t *l2usbpmptr = (volatile uint32_t *)MTL_L2MM->l2usbpmptr;
for (uint32_t inx = 0; inx < lpsram_ebb_quantity; ++inx) {
*(l2usbpmptr + inx * 2) = 0;
}
}
__imr void win_setup(void)
{
uint32_t *win0 = z_soc_uncached_ptr((void *)HP_SRAM_WIN0_BASE);
/* Software protocol: "firmware entered" has the value 5 */
win0[0] = 5;
CAVS_WIN[0].dmwlo = HP_SRAM_WIN0_SIZE | 0x7;
CAVS_WIN[0].dmwba = (HP_SRAM_WIN0_BASE | CAVS_DMWBA_READONLY | CAVS_DMWBA_ENABLE);
CAVS_WIN[3].dmwlo = HP_SRAM_WIN3_SIZE | 0x7;
CAVS_WIN[3].dmwba = (HP_SRAM_WIN3_BASE | CAVS_DMWBA_READONLY | CAVS_DMWBA_ENABLE);
}

View file

@ -0,0 +1,58 @@
/*
* Copyright (c) 2022 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/drivers/interrupt_controller/dw_ace_v1x.h>
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(ace_v1x_soc, CONFIG_SOC_LOG_LEVEL);
void z_soc_irq_enable(uint32_t irq)
{
const struct device *dev;
const struct dw_ace_v1_ictl_driver_api *api;
dev = device_get_binding(DT_LABEL(DT_NODELABEL(ace_intc)));
if (!dev) {
LOG_DBG("board: ACE V1X device binding failed");
return;
}
api = (const struct dw_ace_v1_ictl_driver_api *)dev->api;
api->intr_enable(dev, irq);
}
void z_soc_irq_disable(uint32_t irq)
{
const struct device *dev;
const struct dw_ace_v1_ictl_driver_api *api;
dev = device_get_binding(DT_LABEL(DT_NODELABEL(ace_intc)));
if (!dev) {
LOG_DBG("board: ACE V1X device binding failed");
return;
}
api = (const struct dw_ace_v1_ictl_driver_api *)dev->api;
api->intr_disable(dev, irq);
}
int z_soc_irq_is_enabled(unsigned int irq)
{
const struct device *dev;
const struct dw_ace_v1_ictl_driver_api *api;
dev = device_get_binding(DT_LABEL(DT_NODELABEL(ace_intc)));
if (!dev) {
LOG_DBG("board: ACE V1X device binding failed");
return -ENODEV;
}
api = (const struct dw_ace_v1_ictl_driver_api *)dev->api;
return api->intr_is_enabled(dev, irq);
}

View file

@ -0,0 +1,16 @@
/* Copyright (c) 2022 Intel Corporation
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/linker/devicetree_regions.h>
#include <ace-link.ld>
MEMORY
{
LINKER_DT_REGIONS()
}
SECTIONS
{
LINKER_DT_SECTIONS()
}

View file

@ -0,0 +1,64 @@
/*
* Copyright (c) 2022 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr.h>
#include <soc.h>
#include <ace_v1x-regs.h>
#include <ace-ipc-regs.h>
static void ipc_isr(void *arg)
{
MTL_P2P_IPC[arch_proc_id()].agents[0].ipc.tdr = BIT(31); /* clear BUSY bit */
#ifdef CONFIG_SMP
void z_sched_ipi(void);
z_sched_ipi();
#endif
}
void soc_mp_init(void)
{
IRQ_CONNECT(MTL_IRQ_TO_ZEPHYR(MTL_INTL_IDCA), 0, ipc_isr, 0, 0);
irq_enable(MTL_IRQ_TO_ZEPHYR(MTL_INTL_IDCA));
for (int i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
/* DINT has one bit per IPC, unmask only IPC "Ax" on core "x" */
MTL_DINT[i].ie[MTL_INTL_IDCA] = BIT(i);
/* Agent A should signal only BUSY interrupts */
MTL_P2P_IPC[i].agents[0].ipc.ctl = BIT(0); /* IPCTBIE */
}
/* Set the core 0 active */
soc_cpus_active[0] = true;
}
void soc_start_core(int cpu_num)
{
MTL_PWRBOOT.capctl[cpu_num].ctl |= MTL_PWRBOOT_CTL_SPA;
}
void soc_mp_startup(uint32_t cpu)
{
/* Must have this enabled always */
z_xtensa_irq_enable(ACE_INTC_IRQ);
/* Prevent idle from powering us off */
MTL_PWRBOOT.bootctl[cpu].bctl |=
MTL_PWRBOOT_BCTL_WAITIPCG | MTL_PWRBOOT_BCTL_WAITIPPG;
}
void arch_sched_ipi(void)
{
uint32_t curr = arch_proc_id();
/* Signal agent B[n] to cause an interrupt from agent A[n] */
for (int core = 0; core < CONFIG_MP_NUM_CPUS; core++) {
if (core != curr && soc_cpus_active[core]) {
MTL_P2P_IPC[core].agents[1].ipc.idr = CAVS_IPC_BUSY;
}
}
}

View file

@ -0,0 +1,159 @@
/* Copyright (c) 2022 Intel Corporation
* SPDX-License-Identifier: Apache-2.0
*/
#include "asm_memory_management.h"
.section .text, "ax"
.align 64
power_down_literals:
.literal_position
ipc_flag:
.word 0x80000000 // IPC_DIPCTDR_BUSY
sram_dis_loop_cnt:
.word 4096
.global ace_power_down
.type ace_power_down, @function
/**
* @brief Perform power down.
*
* Depending on arguments, memories are switched off.
*
* @param A2 - argument for LPSRAM
* @param A3 - pointer to array containing power gating mask.
* Size of array is determined by MAX_MEMORY_SEGMENTS define.
* @param A4 - send response to ipc
*/
#define IPC_HOST_BASE 0x00073000
#define b_enable_lpsram a2
#define pu32_hpsram_mask a3
#define b_ipc_response a4
#define temp_reg0 a6
#define temp_reg1 a7
#define temp_reg2 a8
#define temp_reg3 a9
#define temp_reg4 a10
#define temp_reg5 a11
#define temp_reg6 a12
#define p_ipc_regs a13
#define u32_ipc_response_mask a14
#define pfl_reg a15
ace_power_down:
entry sp, 32
/**
* effectively executes:
* xthal_dcache_region_lock(&literals, 128);
* xthal_dcache_region_lock(&powerdown, 256);
* xthal_dcache_region_lock(&pu32_hpsram_mask, 64);
*/
movi pfl_reg, power_down_literals
dpfl pfl_reg, 0
dpfl pfl_reg, 64
movi pfl_reg, ace_power_down
ipfl pfl_reg, 0
ipfl pfl_reg, 64
ipfl pfl_reg, 128
ipfl pfl_reg, 192
mov pfl_reg, pu32_hpsram_mask
dpfl pfl_reg, 0
/* move some values to registries before switching off whole memory */
/* load address of DIPCTDR register */
movi p_ipc_regs, IPC_HOST_BASE
movi u32_ipc_response_mask, 0x20000000
_PD_DISABLE_LPSRAM:
/**
* effectively executes:
* if (b_enable_lpsram) {
* ace_lpsram_power_down_entire();
* }
*/
beqz b_enable_lpsram, _PD_DISABLE_HPSRAM
m_ace_lpsram_power_down_entire temp_reg0, temp_reg1, temp_reg2, temp_reg3
_PD_DISABLE_HPSRAM:
/* if value in memory pointed by pu32_hpsram_mask = 0
(hpsram_pwrgating_mask) - do not disable hpsram. */
beqz pu32_hpsram_mask, _PD_SEND_IPC
/**
* effectively executes:
* for (size_t seg_index = (MAX_MEMORY_SEGMENTS - 1); seg_index >= 0;
* --seg_index) {
* ace_hpsram_power_change(seg_index, mask[seg_index]);
* }
* where mask is given in pu32_hpsram_mask register
*/
.set seg_index, MAX_MEMORY_SEGMENTS - 1
.rept MAX_MEMORY_SEGMENTS
l32i temp_reg0, pu32_hpsram_mask, 4 * seg_index
m_ace_hpsram_power_change\
/*segment_index=*/ seg_index,\
/*mask=*/ temp_reg0,\
temp_reg1,\
temp_reg2,\
temp_reg3,\
temp_reg4,\
temp_reg5
.set seg_index, seg_index - 1
.endr
_PD_SEND_IPC:
/**
* Send IPC to host informing of PD completion - Clear BUSY
* bit by writing IPC_DIPCTDR_BUSY to IPC_DIPCTDR
* and writing IPC_DIPCTDA_DONE to IPC_DIPCTDA
*/
/**
* effecfively executes:
* if (b_ipc_response)
* {
* temp_reg2 = *p_ipc_regs;
* *(p_ipc_regs) = 0x80000000;
* *(p_ipc_regs + 1) = 0x40000000;
* temp_reg1 = temp_reg2 | u32_ipc_response_mask;
* *(p_ipc_regs + 4) = temp_reg1;
* }
*/
beqz b_ipc_response, _PD_SLEEP
movi temp_reg0, 1
slli temp_reg1, temp_reg0, 31
l32i temp_reg2, p_ipc_regs, 0
/* clear busy bit by storing whole message in ADDRESS(HfIPCx) */
s32i temp_reg1, p_ipc_regs, 0
/* store msg received from host in DIPCTDA register */
/* to enlighten done bit and trigger interrupt on host side */
/* ace busy is cleared by writing 0 */
movi temp_reg1, 0
s32i temp_reg1, p_ipc_regs, 0x4
/* Copy to a13 with IPC_RESPONSE_MASK set which is in a14 */
or temp_reg1, temp_reg2, u32_ipc_response_mask
/* Send reply IPC writing to DIPCIDR register */
movi temp_reg0, 0
s32i temp_reg0, p_ipc_regs, 0x18
s32i temp_reg1, p_ipc_regs, 0x10
l32i temp_reg1, p_ipc_regs, 0x10
_PD_SLEEP:
/* effecfively executes:
* xmp_spin()
* waiti 5
*/
movi temp_reg0, 128
loop:
addi temp_reg0, temp_reg0, -1
bnez temp_reg0, loop
extw
extw
waiti 5
1:
j 1b
.size ace_power_down , . - ace_power_down

View file

@ -0,0 +1,41 @@
/* Copyright (c) 2022 Intel Corporation
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/toolchain.h>
#include <manifest.h>
#include <cavs-mem.h>
/* These data structures define "module manifest" headers. They
* aren't runtime data used by Zephyr, but instead act as input
* parameters to rimage and later to the ROM loader on the DSP. As it
* happens most of the data here is ignored by both layers, but it's
* left unchanged for historical purposes.
*/
__attribute__((section(".module.boot")))
const struct sof_man_module_manifest boot_manifest = {
.module = {
.name = "BRNGUP",
.uuid = { 0xf3, 0xe4, 0x79, 0x2b, 0x75, 0x46, 0x49, 0xf6,
0x89, 0xdf, 0x3b, 0xc1, 0x94, 0xa9, 0x1a, 0xeb },
.entry_point = IMR_BOOT_LDR_TEXT_ENTRY_BASE,
.type = { .load_type = SOF_MAN_MOD_TYPE_MODULE,
.domain_ll = 1, },
.affinity_mask = 3,
}
};
__attribute__((section(".module.main")))
const struct sof_man_module_manifest main_manifest = {
.module = {
.name = "BASEFW",
.uuid = { 0x32, 0x8c, 0x39, 0x0e, 0xde, 0x5a, 0x4b, 0xba,
0x93, 0xb1, 0xc5, 0x04, 0x32, 0x28, 0x0e, 0xe4 },
.entry_point = RAM_BASE,
.type = { .load_type = SOF_MAN_MOD_TYPE_MODULE,
.domain_ll = 1 },
.affinity_mask = 3,
}
};

View file

@ -0,0 +1,88 @@
/*
* Copyright (c) 2022 Intel Corporation
* SPDX-License-Identifier: Apache-2.0
*/
#include <device.h>
#include <xtensa/xtruntime.h>
#include <irq_nextlevel.h>
#include <xtensa/hal.h>
#include <init.h>
#include <arch/xtensa/cache.h>
#include <cavs-shim.h>
#include <cavs-mem.h>
#include <cpu_init.h>
#include "manifest.h"
#include <ace_v1x-regs.h>
#include "soc.h"
extern void soc_mp_init(void);
extern void win_setup(void);
extern void lp_sram_init(void);
extern void hp_sram_init(uint32_t memory_size);
extern void parse_manifest(void);
#define DSP_INIT_LPGPDMA(x) (0x71A60 + (2*x))
#define LPGPDMA_CTLOSEL_FLAG BIT(15)
#define LPGPDMA_CHOSEL_FLAG 0xFF
__imr void boot_core0(void)
{
/* This is a workaround for simulator, which
* doesn't support direct control over the secondary core boot
* vectors. It always boots into the ROM, and ends up here.
* This emulates the hardware design and jumps to the pointer
* found in the BADDR register (which is wired to the Xtensa
* LX core's alternate boot vector input). Should be removed
* at some point, but it's tiny and harmless otherwise.
*/
int prid;
prid = arch_proc_id();
if (prid != 0) {
((void(*)(void))MTL_PWRBOOT.bootctl[prid].baddr)();
}
cpu_early_init();
#ifdef PLATFORM_DISABLE_L2CACHE_AT_BOOT
/* FIXME: L2 cache control PCFG register */
*(uint32_t *)0x1508 = 0;
#endif
hp_sram_init(L2_SRAM_SIZE);
win_setup();
lp_sram_init();
parse_manifest();
soc_trace_init();
z_xtensa_cache_flush_all();
/* Zephyr! */
extern FUNC_NORETURN void z_cstart(void);
z_cstart();
}
static __imr void power_init_mtl(void)
{
/* Disable idle power gating */
MTL_PWRBOOT.bootctl[0].bctl |= MTL_PWRBOOT_BCTL_WAITIPCG | MTL_PWRBOOT_BCTL_WAITIPPG;
#if CONFIG_DMA_ACE_GPDMA
sys_write32(LPGPDMA_CHOSEL_FLAG | LPGPDMA_CTLOSEL_FLAG, DSP_INIT_LPGPDMA(0));
sys_write32(LPGPDMA_CHOSEL_FLAG | LPGPDMA_CTLOSEL_FLAG, DSP_INIT_LPGPDMA(1));
#endif
}
static __imr int soc_init(const struct device *dev)
{
power_init_mtl();
#if CONFIG_MP_NUM_CPUS > 1
soc_mp_init();
#endif
return 0;
}
SYS_INIT(soc_init, PRE_KERNEL_1, 99);