soc: intel_adsp: cavs: start using zephyr power management

Start using zephyr power management in cavs platform in a similar way
that is already done in ace. This commit only addresses the power off/on
sequence. Runtime power management is not implemented.

Signed-off-by: Jaska Uimonen <jaska.uimonen@linux.intel.com>
This commit is contained in:
Jaska Uimonen 2023-03-01 08:13:48 +02:00 committed by Anas Nashif
parent 59fe77a6b4
commit 95168e6776
7 changed files with 453 additions and 0 deletions

View file

@ -16,6 +16,7 @@
device_type = "cpu";
compatible = "cdns,tensilica-xtensa-lx6";
reg = <0>;
cpu-power-states = <&d3>;
i-cache-line-size = <64>;
d-cache-line-size = <64>;
};
@ -24,18 +25,33 @@
device_type = "cpu";
compatible = "cdns,tensilica-xtensa-lx6";
reg = <1>;
cpu-power-states = <&d3>;
};
cpu2: cpu@2 {
device_type = "cpu";
compatible = "cdns,tensilica-xtensa-lx6";
reg = <2>;
cpu-power-states = <&d3>;
};
cpu3: cpu@3 {
device_type = "cpu";
compatible = "cdns,tensilica-xtensa-lx6";
reg = <3>;
cpu-power-states = <&d3>;
};
};
power-states {
/* PM_STATE_SOFT_OFF can be entered only by calling pm_state_force.
* The procedure is triggered by IPC from the HOST (SET_DX).
*/
d3: off {
compatible = "zephyr,power-state";
power-state-name = "soft-off";
min-residency-us = <2147483647>;
exit-latency-us = <0>;
};
};

View file

@ -16,6 +16,7 @@
device_type = "cpu";
compatible = "cdns,tensilica-xtensa-lx6";
reg = <0>;
cpu-power-states = <&d3>;
i-cache-line-size = <64>;
d-cache-line-size = <64>;
};
@ -24,6 +25,19 @@
device_type = "cpu";
compatible = "cdns,tensilica-xtensa-lx6";
reg = <1>;
cpu-power-states = <&d3>;
};
};
power-states {
/* PM_STATE_SOFT_OFF can be entered only by calling pm_state_force.
* The procedure is triggered by IPC from the HOST (SET_DX).
*/
d3: off {
compatible = "zephyr,power-state";
power-state-name = "soft-off";
min-residency-us = <2147483647>;
exit-latency-us = <0>;
};
};

View file

@ -9,6 +9,7 @@ zephyr_library_include_directories(${ZEPHYR_BASE}/drivers)
zephyr_library_sources(
sram.c
power.c
power_down_cavs.S
)
if(CONFIG_SMP OR CONFIG_MP_MAX_NUM_CPUS GREATER 1)

View file

@ -0,0 +1,108 @@
/* Copyright (c) 2023 Intel Corporation
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef __ZEPHYR_CAVS_LIB_ASM_LDO_MANAGEMENT_H__
#define __ZEPHYR_CAVS_LIB_ASM_LDO_MANAGEMENT_H__
#ifdef _ASMLANGUAGE
#define SHIM_BASE 0x00071F00
#define SHIM_LDOCTL 0xA4
#define SHIM_LDOCTL_HPSRAM_MASK (3 << 0 | 3 << 16)
#define SHIM_LDOCTL_LPSRAM_MASK (3 << 2)
#define SHIM_LDOCTL_HPSRAM_LDO_ON (3 << 0 | 3 << 16)
#define SHIM_LDOCTL_LPSRAM_LDO_ON (3 << 2)
#define SHIM_LDOCTL_HPSRAM_LDO_OFF (0 << 0)
#define SHIM_LDOCTL_LPSRAM_LDO_OFF (0 << 2)
#define SHIM_LDOCTL_HPSRAM_LDO_BYPASS (BIT(0) | BIT(16))
#define SHIM_LDOCTL_LPSRAM_LDO_BYPASS BIT(2)
.macro m_cavs_set_ldo_state state, ax
movi \ax, (SHIM_BASE + SHIM_LDOCTL)
s32i \state, \ax, 0
memw
/* wait loop > 300ns (min 100ns required) */
movi \ax, 128
1 :
addi \ax, \ax, -1
nop
bnez \ax, 1b
.endm
.macro m_cavs_set_hpldo_state state, ax, ay
movi \ax, (SHIM_BASE + SHIM_LDOCTL)
l32i \ay, \ax, 0
movi \ax, ~(SHIM_LDOCTL_HPSRAM_MASK)
and \ay, \ax, \ay
or \state, \ay, \state
m_cavs_set_ldo_state \state, \ax
.endm
.macro m_cavs_set_lpldo_state state, ax, ay
movi \ax, (SHIM_BASE + SHIM_LDOCTL)
l32i \ay, \ax, 0
/* LP SRAM mask */
movi \ax, ~(SHIM_LDOCTL_LPSRAM_MASK)
and \ay, \ax, \ay
or \state, \ay, \state
m_cavs_set_ldo_state \state, \ax
.endm
.macro m_cavs_set_ldo_on_state ax, ay, az
movi \ay, (SHIM_BASE + SHIM_LDOCTL)
l32i \az, \ay, 0
movi \ax, ~(SHIM_LDOCTL_HPSRAM_MASK | SHIM_LDOCTL_LPSRAM_MASK)
and \az, \ax, \az
movi \ax, (SHIM_LDOCTL_HPSRAM_LDO_ON | SHIM_LDOCTL_LPSRAM_LDO_ON)
or \ax, \az, \ax
m_cavs_set_ldo_state \ax, \ay
.endm
.macro m_cavs_set_ldo_off_state ax, ay, az
/* wait loop > 300ns (min 100ns required) */
movi \ax, 128
1 :
addi \ax, \ax, -1
nop
bnez \ax, 1b
movi \ay, (SHIM_BASE + SHIM_LDOCTL)
l32i \az, \ay, 0
movi \ax, ~(SHIM_LDOCTL_HPSRAM_MASK | SHIM_LDOCTL_LPSRAM_MASK)
and \az, \az, \ax
movi \ax, (SHIM_LDOCTL_HPSRAM_LDO_OFF | SHIM_LDOCTL_LPSRAM_LDO_OFF)
or \ax, \ax, \az
s32i \ax, \ay, 0
l32i \ax, \ay, 0
.endm
.macro m_cavs_set_ldo_bypass_state ax, ay, az
/* wait loop > 300ns (min 100ns required) */
movi \ax, 128
1 :
addi \ax, \ax, -1
nop
bnez \ax, 1b
movi \ay, (SHIM_BASE + SHIM_LDOCTL)
l32i \az, \ay, 0
movi \ax, ~(SHIM_LDOCTL_HPSRAM_MASK | SHIM_LDOCTL_LPSRAM_MASK)
and \az, \az, \ax
movi \ax, (SHIM_LDOCTL_HPSRAM_LDO_BYPASS | SHIM_LDOCTL_LPSRAM_LDO_BYPASS)
or \ax, \ax, \az
s32i \ax, \ay, 0
l32i \ax, \ay, 0
.endm
#endif
#endif /* __ZEPHYR_CAVS_LIB_ASM_LDO_MANAGEMENT_H__ */

View file

@ -0,0 +1,76 @@
/* Copyright (c) 2023 Intel Corporation
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef __ZEPHYR_CAVS_LIB_ASM_MEMORY_MANAGEMENT_H__
#define __ZEPHYR_CAVS_LIB_ASM_MEMORY_MANAGEMENT_H__
#ifdef _ASMLANGUAGE
#define HSPGCTL0 0x71D10
#define HSRMCTL0 0x71D14
#define HSPGISTS0 0x71D18
#define LSPGCTL 0x71D50
#define LSRMCTL 0x71D54
#define LSPGISTS 0x71D58
#define SHIM_HSPGCTL(x) (HSPGCTL0 + 0x10 * (x))
#define SHIM_HSPGISTS(x) (HSPGISTS0 + 0x10 * (x))
#define LPSRAM_MASK 0x1
/**
* Macro powers down entire HPSRAM. On entry literals and code for section from
* where this code is executed need to be placed in memory which is not
* HPSRAM (in case when this code is located in HPSRAM, lock memory in L1$ or
* L1 SRAM)
*/
.macro m_cavs_hpsram_power_down_entire ax, ay, az
/* SEGMENT #0 */
movi \az, SHIM_HSPGCTL(0)
movi \ax, SHIM_HSPGISTS(0)
movi \ay, 0x1FFFFFFF /* HPSRAM_MASK(0) */
s32i \ay, \ax, 0
memw
1 :
l32i \ax, \az, 0
bne \ax, \ay, 1b
/* SEGMENT #1 */
movi \az, SHIM_HSPGCTL(1)
movi \ax, SHIM_HSPGISTS(1)
movi \ay, 0x0FFFFFFF /* HPSRAM_MASK(1) */
s32i \ay, \ax, 0
memw
1 :
l32i \ax, \az, 0
bne \ax, \ay, 1b
.endm
.macro m_cavs_hpsram_power_change segment_index, mask, ax, ay, az
movi \ax, SHIM_HSPGCTL(\segment_index)
movi \ay, SHIM_HSPGISTS(\segment_index)
s32i \mask, \ax, 0
memw
/* assumed that HDA shared dma buffer will be in LPSRAM */
1 :
l32i \ax, \ay, 0
bne \ax, \mask, 1b
.endm
.macro m_cavs_lpsram_power_down_entire ax, ay, az, loop_cnt_addr
movi \az, LSPGISTS
movi \ax, LSPGCTL
movi \ay, LPSRAM_MASK
s32i \ay, \ax, 0
memw
/* assumed that HDA shared dma buffer will be in LPSRAM */
movi \ax, \loop_cnt_addr
l32i \ax, \ax, 0
1 :
addi \ax, \ax, -1
bnez \ax, 1b
.endm
#endif
#endif

View file

@ -10,6 +10,9 @@
#include <xtensa/hal.h>
#include <zephyr/init.h>
#include <zephyr/kernel.h>
#include <zephyr/pm/pm.h>
#include <zephyr/device.h>
#include <cpu_init.h>
#include <adsp_shim.h>
#include <adsp_clk.h>
@ -32,6 +35,84 @@ LOG_MODULE_REGISTER(soc);
#endif
#ifdef CONFIG_PM
#define SRAM_ALIAS_BASE 0x9E000000
#define SRAM_ALIAS_MASK 0xFF000000
#define EBB_BANKS_IN_SEGMENT 32
#define SRAM_ALIAS_OFFSET 0x20000000
#define L2_INTERRUPT_NUMBER 4
#define L2_INTERRUPT_MASK (1<<L2_INTERRUPT_NUMBER)
#define L3_INTERRUPT_NUMBER 6
#define L3_INTERRUPT_MASK (1<<L3_INTERRUPT_NUMBER)
#define ALL_USED_INT_LEVELS_MASK (L2_INTERRUPT_MASK | L3_INTERRUPT_MASK)
struct core_state {
uint32_t intenable;
};
static struct core_state core_desc[CONFIG_MP_MAX_NUM_CPUS] = {{0}};
/**
* @brief Power down procedure.
*
* Locks its code in L1 cache and shuts down memories.
* NOTE: there's no return from this function.
*
* @param disable_lpsram flag if LPSRAM is to be disabled (whole)
* @param hpsram_pg_mask pointer to memory segments power gating mask
* (each bit corresponds to one ebb)
*/
extern void power_down_cavs(bool disable_lpsram, uint32_t *hpsram_pg_mask);
static inline void __sparse_cache *uncache_to_cache(void *address)
{
return (void __sparse_cache *)((uintptr_t)(address) | SRAM_ALIAS_OFFSET);
}
__weak void pm_state_set(enum pm_state state, uint8_t substate_id)
{
ARG_UNUSED(substate_id);
uint32_t cpu = arch_proc_id();
if (state == PM_STATE_SOFT_OFF) {
core_desc[cpu].intenable = XTENSA_RSR("INTENABLE");
z_xt_ints_off(0xffffffff);
soc_cpus_active[cpu] = false;
z_xtensa_cache_flush_inv_all();
if (cpu == 0) {
uint32_t ebb = EBB_BANKS_IN_SEGMENT;
/* turn off all HPSRAM banks - get a full bitmap */
uint32_t hpsram_mask = (1 << ebb) - 1;
/* do power down - this function won't return */
power_down_cavs(true, uncache_to_cache(&hpsram_mask));
} else {
z_xt_ints_on(core_desc[cpu].intenable);
k_cpu_idle();
}
} else {
__ASSERT(false, "invalid argument - unsupported power state");
}
}
/* Handle SOC specific activity after Low Power Mode Exit */
__weak void pm_state_exit_post_ops(enum pm_state state, uint8_t substate_id)
{
ARG_UNUSED(substate_id);
uint32_t cpu = arch_proc_id();
if (state == PM_STATE_SOFT_OFF) {
soc_cpus_active[cpu] = true;
z_xtensa_cache_flush_inv_all();
z_xt_ints_on(core_desc[cpu].intenable);
} else {
__ASSERT(false, "invalid argument - unsupported power state");
}
}
#endif
__imr void power_init(void)
{

View file

@ -0,0 +1,157 @@
/* Copyright (c) 2022 Intel Corporation
* SPDX-License-Identifier: Apache-2.0
*/
#include "asm_ldo_management.h"
#include "asm_memory_management.h"
#define IPC_HOST_BASE 0x00071E00
#define IPC_DIPCIDD 0x18
#define IPC_DIPCIDR 0x10
.section .text, "ax"
.align 64
power_down_literals:
.literal_position
set_dx_reply:
/* BUSY (bit31), MODULE_MSG (bit30), reply (bit29), SET_DX (bit 24-28: 7) */
.word 0xE7000000
sram_dis_loop_cnt:
.word 4096
.global power_down_cavs
.type power_down_cavs, @function
/**
* Perform power down.
*
* Depending on arguments, memories are switched off.
* A2 - argument for LPSRAM
* A3 - pointer to array containing power gating mask.
*Size of array is determined by MEMORY_SEGMENTS define.
* A4 - platform type
* A5 - response_to_ipc
*/
#define b_enable_lpsram a2
#define pu32_hpsram_mask a3
#define temp_reg0 a6
#define temp_reg1 a7
#define temp_reg2 a8
#define temp_reg3 a9
#define host_base a10
#define pfl_reg a15
#define MAX_MEMORY_SEGMENTS 2
power_down_cavs:
entry sp, 32
/**
* effectively executes:
* xthal_dcache_region_lock(&literals, 128);
* xthal_dcache_region_lock(&powerdown, 256);
* xthal_dcache_region_lock(&pu32_hpsram_mask, 64);
*/
movi pfl_reg, power_down_literals
dpfl pfl_reg, 0
dpfl pfl_reg, 64
movi pfl_reg, power_down_cavs
ipfl pfl_reg, 0
ipfl pfl_reg, 64
ipfl pfl_reg, 128
ipfl pfl_reg, 192
mov pfl_reg, pu32_hpsram_mask
dpfl pfl_reg, 0
movi host_base, IPC_HOST_BASE
_PD_DISABLE_LPSRAM:
/* effectively executes:
* if (b_enable_lpsram){
* cavs_lpsram_power_down_entire();
* }
*/
beqz b_enable_lpsram, _PD_DISABLE_HPSRAM
m_cavs_lpsram_power_down_entire temp_reg0, temp_reg1, temp_reg2, sram_dis_loop_cnt
j _PD_DISABLE_HPSRAM
_PD_DISABLE_HPSRAM:
/* if value in memory pointed by pu32_hpsram_mask = 0
(hpsram_pwrgating_mask) - do not disable hpsram. */
beqz pu32_hpsram_mask, _PD_SEND_IPC
/* mandatory sequence for LDO ON - effectively executes:
* m_cavs_s_set_ldo_hpsram_on_state();
* WAIT_300NS();
*/
movi temp_reg0, SHIM_LDOCTL_HPSRAM_LDO_ON
m_cavs_set_hpldo_state temp_reg0, temp_reg1, temp_reg2
movi temp_reg0, 128
1 :
addi temp_reg0, temp_reg0, -1
bnez temp_reg0, 1b
/* effectively executes:
* for (size_t seg_index = (MAX_MEMORY_SEGMENTS - 1); seg_index >= 0;
* --seg_index) {
* cavs_hpsram_power_change(seg_index, mask[seg_index]);
* }
* where mask is given in pu32_hpsram_mask register
*/
.set seg_index, MAX_MEMORY_SEGMENTS - 1
.rept MAX_MEMORY_SEGMENTS
l32i temp_reg0, pu32_hpsram_mask, 4 * seg_index
m_cavs_hpsram_power_change\
/*segment_index=*/ seg_index,\
/*mask=*/ temp_reg0,\
temp_reg1,\
temp_reg2,\
temp_reg3
.set seg_index, seg_index - 1
.endr
/* mandatory sequence for LDO OFF - effectively executes:
* WAIT_300NS();
* m_cavs_set_ldo_hpsram_on_state()
*/
movi temp_reg0, 128
1 :
addi temp_reg0, temp_reg0, -1
bnez temp_reg0, 1b
movi temp_reg0, SHIM_LDOCTL_HPSRAM_LDO_OFF
m_cavs_set_hpldo_state temp_reg0, temp_reg1, temp_reg2
_PD_SEND_IPC:
/* Send IPC reply for SET_DX message */
movi temp_reg1, 0
s32i temp_reg1, host_base, IPC_DIPCIDD
movi temp_reg1, set_dx_reply
l32i temp_reg1, temp_reg1, 0
s32i temp_reg1, host_base, IPC_DIPCIDR
_PD_SLEEP:
/* effecfively executes:
* xmp_spin()
* waiti 5
*/
movi temp_reg0, 128
loop:
addi temp_reg0, temp_reg0, -1
bnez temp_reg0, loop
extw
extw
waiti 5
1:
j 1b
.size power_down_cavs , . - power_down_cavs