esp32: drivers: interrupt_controller: add interrupt allocation support

Add interrupt allocation support for ESP32.

Signed-off-by: Glauber Maroto Ferreira <glauber.ferreira@espressif.com>
This commit is contained in:
Glauber Maroto Ferreira 2021-04-12 16:13:54 -03:00 committed by Christopher Friedt
parent 54b26ca7e8
commit 9ae5fd1b34
11 changed files with 1328 additions and 0 deletions

View file

@ -242,6 +242,7 @@
/drivers/ieee802154/ieee802154_cc13xx* @bwitherspoon @cfriedt
/drivers/interrupt_controller/ @dcpleung @nashif
/drivers/interrupt_controller/intc_gic.c @stephanosio
/drivers/interrupt_controller/*esp32* @glaubermaroto
/drivers/ipm/ipm_mhu* @karl-zh
/drivers/ipm/Kconfig.nrfx @masz-nordic @ioannisg
/drivers/ipm/Kconfig.nrfx_ipc_channel @masz-nordic @ioannisg

View file

@ -20,3 +20,4 @@ zephyr_sources_ifdef(CONFIG_SWERV_PIC intc_swerv_pic.c)
zephyr_sources_ifdef(CONFIG_NPCX_MIWU intc_miwu.c)
zephyr_sources_ifdef(CONFIG_LEON_IRQMP intc_irqmp.c)
zephyr_sources_ifdef(CONFIG_INTEL_VTD_ICTL intc_intel_vtd.c)
zephyr_sources_ifdef(CONFIG_SOC_ESP32 intc_esp32.c)

View file

@ -68,4 +68,6 @@ source "drivers/interrupt_controller/Kconfig.npcx"
source "drivers/interrupt_controller/Kconfig.intel_vtd"
source "drivers/interrupt_controller/Kconfig.esp32"
endmenu

View file

@ -0,0 +1,14 @@
# ESP32 Interrupt Allocator messages configuration
# Copyright (c) 2021 Espressif Systems (Shanghai) Co., Ltd.
# SPDX-License-Identifier: Apache-2.0
config INTC_ESP32_DECISIONS_LOG
bool "Enables ESP32 interrupt allocator logging"
depends on SOC_ESP32
select LOG
help
Enable this option to visualize information on decisions made by the
interrupt allocator. This has no impact on the interrupt allocator usage
but may be valuable for debugging purposes. When enabled, messages are
print to the serial console.

View file

@ -0,0 +1,897 @@
/*
* Copyright (c) 2021 Espressif Systems (Shanghai) Co., Ltd.
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include <soc.h>
#include <drivers/interrupt_controller/intc_esp32.h>
#include "esp_attr.h"
#include <hal/cpu_hal.h>
#include <hal/interrupt_controller_hal.h>
#include <limits.h>
#include <assert.h>
#include "soc/soc.h"
#include <logging/log.h>
LOG_MODULE_REGISTER(esp32_intc, LOG_LEVEL_DBG);
#define ETS_INTERNAL_TIMER0_INTR_NO 6
#define ETS_INTERNAL_TIMER1_INTR_NO 15
#define ETS_INTERNAL_TIMER2_INTR_NO 16
#define ETS_INTERNAL_SW0_INTR_NO 7
#define ETS_INTERNAL_SW1_INTR_NO 29
#define ETS_INTERNAL_PROFILING_INTR_NO 11
#define VECDESC_FL_RESERVED (1 << 0)
#define VECDESC_FL_INIRAM (1 << 1)
#define VECDESC_FL_SHARED (1 << 2)
#define VECDESC_FL_NONSHARED (1 << 3)
/*
* Define this to debug the choices made when allocating the interrupt. This leads to much debugging
* output within a critical region, which can lead to weird effects like e.g. the interrupt watchdog
* being triggered, that is why it is separate from the normal LOG* scheme.
*/
#ifdef CONFIG_INTC_ESP32_DECISIONS_LOG
# define INTC_LOG(...) LOG_INF(__VA_ARGS__)
#else
# define INTC_LOG(...) do {} while (0)
#endif
/* Typedef for C-callable interrupt handler function */
typedef void (*intc_handler_t)(void *);
typedef void (*intc_dyn_handler_t)(const void *);
/* shared critical section context */
static int esp_intc_csec;
static inline void esp_intr_lock(void)
{
esp_intc_csec = irq_lock();
}
static inline void esp_intr_unlock(void)
{
irq_unlock(esp_intc_csec);
}
/*
* Interrupt handler table and unhandled uinterrupt routine. Duplicated
* from xtensa_intr.c... it's supposed to be private, but we need to look
* into it in order to see if someone allocated an int using
* set_interrupt_handler.
*/
struct intr_alloc_table_entry {
void (*handler)(void *arg);
void *arg;
};
/* Default handler for unhandled interrupts. */
void default_intr_handler(void *arg)
{
printk("Unhandled interrupt %d on cpu %d!\n", (int)arg, arch_curr_cpu()->id);
}
static struct intr_alloc_table_entry intr_alloc_table[ESP_INTC_INTS_NUM * CONFIG_MP_NUM_CPUS];
static void set_interrupt_handler(int n, intc_handler_t f, void *arg)
{
irq_disable(n);
intr_alloc_table[n * CONFIG_MP_NUM_CPUS].handler = f;
irq_connect_dynamic(n, n, (intc_dyn_handler_t)f, arg, 0);
irq_enable(n);
}
/* Linked list of vector descriptions, sorted by cpu.intno value */
static struct vector_desc_t *vector_desc_head; /* implicitly initialized to NULL */
/* This bitmask has an 1 if the int should be disabled when the flash is disabled. */
static uint32_t non_iram_int_mask[CONFIG_MP_NUM_CPUS];
/* This bitmask has 1 in it if the int was disabled using esp_intr_noniram_disable. */
static uint32_t non_iram_int_disabled[CONFIG_MP_NUM_CPUS];
static bool non_iram_int_disabled_flag[CONFIG_MP_NUM_CPUS];
/*
* Inserts an item into vector_desc list so that the list is sorted
* with an incrementing cpu.intno value.
*/
static void insert_vector_desc(struct vector_desc_t *to_insert)
{
struct vector_desc_t *vd = vector_desc_head;
struct vector_desc_t *prev = NULL;
while (vd != NULL) {
if (vd->cpu > to_insert->cpu) {
break;
}
if (vd->cpu == to_insert->cpu && vd->intno >= to_insert->intno) {
break;
}
prev = vd;
vd = vd->next;
}
if ((vector_desc_head == NULL) || (prev == NULL)) {
/* First item */
to_insert->next = vd;
vector_desc_head = to_insert;
} else {
prev->next = to_insert;
to_insert->next = vd;
}
}
/* Returns a vector_desc entry for an intno/cpu, or NULL if none exists. */
static struct vector_desc_t *find_desc_for_int(int intno, int cpu)
{
struct vector_desc_t *vd = vector_desc_head;
while (vd != NULL) {
if (vd->cpu == cpu && vd->intno == intno) {
break;
}
vd = vd->next;
}
return vd;
}
/*
* Returns a vector_desc entry for an intno/cpu.
* Either returns a preexisting one or allocates a new one and inserts
* it into the list. Returns NULL on malloc fail.
*/
static struct vector_desc_t *get_desc_for_int(int intno, int cpu)
{
struct vector_desc_t *vd = find_desc_for_int(intno, cpu);
if (vd == NULL) {
struct vector_desc_t *newvd = k_malloc(sizeof(struct vector_desc_t));
if (newvd == NULL) {
return NULL;
}
memset(newvd, 0, sizeof(struct vector_desc_t));
newvd->intno = intno;
newvd->cpu = cpu;
insert_vector_desc(newvd);
return newvd;
} else {
return vd;
}
}
/*
* Returns a vector_desc entry for an source, the cpu parameter is used
* to tell GPIO_INT and GPIO_NMI from different CPUs
*/
static struct vector_desc_t *find_desc_for_source(int source, int cpu)
{
struct vector_desc_t *vd = vector_desc_head;
while (vd != NULL) {
if (!(vd->flags & VECDESC_FL_SHARED)) {
if (vd->source == source && cpu == vd->cpu) {
break;
}
} else if (vd->cpu == cpu) {
/* check only shared vds for the correct cpu, otherwise skip */
bool found = false;
struct shared_vector_desc_t *svd = vd->shared_vec_info;
assert(svd != NULL);
while (svd) {
if (svd->source == source) {
found = true;
break;
}
svd = svd->next;
}
if (found) {
break;
}
}
vd = vd->next;
}
return vd;
}
void esp_intr_initialize(void)
{
for (size_t i = 0; i < (ESP_INTC_INTS_NUM * CONFIG_MP_NUM_CPUS); ++i) {
intr_alloc_table[i].handler = default_intr_handler;
intr_alloc_table[i].arg = (void *)i;
}
}
int esp_intr_mark_shared(int intno, int cpu, bool is_int_ram)
{
if (intno >= ESP_INTC_INTS_NUM) {
return -EINVAL;
}
if (cpu >= CONFIG_MP_NUM_CPUS) {
return -EINVAL;
}
esp_intr_lock();
struct vector_desc_t *vd = get_desc_for_int(intno, cpu);
if (vd == NULL) {
esp_intr_unlock();
return -ENOMEM;
}
vd->flags = VECDESC_FL_SHARED;
if (is_int_ram) {
vd->flags |= VECDESC_FL_INIRAM;
}
esp_intr_unlock();
return 0;
}
int esp_intr_reserve(int intno, int cpu)
{
if (intno >= ESP_INTC_INTS_NUM) {
return -EINVAL;
}
if (cpu >= CONFIG_MP_NUM_CPUS) {
return -EINVAL;
}
esp_intr_lock();
struct vector_desc_t *vd = get_desc_for_int(intno, cpu);
if (vd == NULL) {
esp_intr_unlock();
return -ENOMEM;
}
vd->flags = VECDESC_FL_RESERVED;
esp_intr_unlock();
return 0;
}
/* Returns true if handler for interrupt is not the default unhandled interrupt handler */
static bool intr_has_handler(int intr, int cpu)
{
return (intr_alloc_table[intr * CONFIG_MP_NUM_CPUS + cpu].handler != default_intr_handler);
}
static bool is_vect_desc_usable(struct vector_desc_t *vd, int flags, int cpu, int force)
{
/* Check if interrupt is not reserved by design */
int x = vd->intno;
if (interrupt_controller_hal_get_cpu_desc_flags(x, cpu) == INTDESC_RESVD) {
INTC_LOG("....Unusable: reserved");
return false;
}
if (interrupt_controller_hal_get_cpu_desc_flags(x, cpu) == INTDESC_SPECIAL && force == -1) {
INTC_LOG("....Unusable: special-purpose int");
return false;
}
/* Check if the interrupt level is acceptable */
if (!(flags & (1 << interrupt_controller_hal_get_level(x)))) {
INTC_LOG("....Unusable: incompatible level");
return false;
}
/* check if edge/level type matches what we want */
if (((flags & ESP_INTR_FLAG_EDGE) &&
(interrupt_controller_hal_get_type(x) == INTTP_LEVEL)) ||
(((!(flags & ESP_INTR_FLAG_EDGE)) &&
(interrupt_controller_hal_get_type(x) == INTTP_EDGE)))) {
INTC_LOG("....Unusable: incompatible trigger type");
return false;
}
/* check if interrupt is reserved at runtime */
if (vd->flags & VECDESC_FL_RESERVED) {
INTC_LOG("....Unusable: reserved at runtime.");
return false;
}
/* Ints can't be both shared and non-shared. */
assert(!((vd->flags & VECDESC_FL_SHARED) && (vd->flags & VECDESC_FL_NONSHARED)));
/* check if interrupt already is in use by a non-shared interrupt */
if (vd->flags & VECDESC_FL_NONSHARED) {
INTC_LOG("....Unusable: already in (non-shared) use.");
return false;
}
/* check shared interrupt flags */
if (vd->flags & VECDESC_FL_SHARED) {
if (flags & ESP_INTR_FLAG_SHARED) {
bool in_iram_flag = ((flags & ESP_INTR_FLAG_IRAM) != 0);
bool desc_in_iram_flag = ((vd->flags & VECDESC_FL_INIRAM) != 0);
/*
* Bail out if int is shared, but iram property
* doesn't match what we want.
*/
if ((vd->flags & VECDESC_FL_SHARED) &&
(desc_in_iram_flag != in_iram_flag)) {
INTC_LOG("....Unusable: shared but iram prop doesn't match");
return false;
}
} else {
/*
* We need an unshared IRQ; can't use shared ones;
* bail out if this is shared.
*/
INTC_LOG("...Unusable: int is shared, we need non-shared.");
return false;
}
} else if (intr_has_handler(x, cpu)) {
/* Check if interrupt already is allocated by set_interrupt_handler */
INTC_LOG("....Unusable: already allocated");
return false;
}
return true;
}
/*
* Locate a free interrupt compatible with the flags given.
* The 'force' argument can be -1, or 0-31 to force checking a certain interrupt.
* When a CPU is forced, the INTDESC_SPECIAL marked interrupts are also accepted.
*/
static int get_available_int(int flags, int cpu, int force, int source)
{
int x;
int best = -1;
int best_level = 9;
int best_shared_ct = INT_MAX;
/* Default vector desc, for vectors not in the linked list */
struct vector_desc_t empty_vect_desc;
memset(&empty_vect_desc, 0, sizeof(struct vector_desc_t));
/* Level defaults to any low/med interrupt */
if (!(flags & ESP_INTR_FLAG_LEVELMASK)) {
flags |= ESP_INTR_FLAG_LOWMED;
}
INTC_LOG("%s: try to find existing. Cpu: %d, Source: %d", __func__, cpu, source);
struct vector_desc_t *vd = find_desc_for_source(source, cpu);
if (vd) {
/* if existing vd found, don't need to search any more. */
INTC_LOG("%s: existing vd found. intno: %d", __func__, vd->intno);
if (force != -1 && force != vd->intno) {
INTC_LOG("%s: intr forced but not matach existing. "
"existing intno: %d, force: %d", __func__, vd->intno, force);
} else if (!is_vect_desc_usable(vd, flags, cpu, force)) {
INTC_LOG("%s: existing vd invalid.", __func__);
} else {
best = vd->intno;
}
return best;
}
if (force != -1) {
INTC_LOG("%s: try to find force. "
"Cpu: %d, Source: %d, Force: %d", __func__, cpu, source, force);
/* if force assigned, don't need to search any more. */
vd = find_desc_for_int(force, cpu);
if (vd == NULL) {
/* if existing vd not found, just check the default state for the intr. */
empty_vect_desc.intno = force;
vd = &empty_vect_desc;
}
if (is_vect_desc_usable(vd, flags, cpu, force)) {
best = vd->intno;
} else {
INTC_LOG("%s: forced vd invalid.", __func__);
}
return best;
}
INTC_LOG("%s: start looking. Current cpu: %d", __func__, cpu);
/* No allocated handlers as well as forced intr, iterate over the 32 possible interrupts */
for (x = 0; x < ESP_INTC_INTS_NUM; x++) {
/* Grab the vector_desc for this vector. */
vd = find_desc_for_int(x, cpu);
if (vd == NULL) {
empty_vect_desc.intno = x;
vd = &empty_vect_desc;
}
INTC_LOG("Int %d reserved %d level %d %s hasIsr %d",
x,
interrupt_controller_hal_get_cpu_desc_flags(x, cpu) == INTDESC_RESVD,
interrupt_controller_hal_get_level(x),
interrupt_controller_hal_get_type(x) == INTTP_LEVEL ? "LEVEL" : "EDGE",
intr_has_handler(x, cpu));
if (!is_vect_desc_usable(vd, flags, cpu, force)) {
continue;
}
if (flags & ESP_INTR_FLAG_SHARED) {
/* We're allocating a shared int. */
/* See if int already is used as a shared interrupt. */
if (vd->flags & VECDESC_FL_SHARED) {
/*
* We can use this already-marked-as-shared interrupt. Count the
* already attached isrs in order to see how useful it is.
*/
int no = 0;
struct shared_vector_desc_t *svdesc = vd->shared_vec_info;
while (svdesc != NULL) {
no++;
svdesc = svdesc->next;
}
if (no < best_shared_ct ||
best_level > interrupt_controller_hal_get_level(x)) {
/*
* Seems like this shared vector is both okay and has
* the least amount of ISRs already attached to it.
*/
best = x;
best_shared_ct = no;
best_level = interrupt_controller_hal_get_level(x);
INTC_LOG("...int %d more usable as a shared int: "
"has %d existing vectors", x, no);
} else {
INTC_LOG("...worse than int %d", best);
}
} else {
if (best == -1) {
/*
* We haven't found a feasible shared interrupt yet.
* This one is still free and usable, even if not
* marked as shared.
* Remember it in case we don't find any other shared
* interrupt that qualifies.
*/
if (best_level > interrupt_controller_hal_get_level(x)) {
best = x;
best_level = interrupt_controller_hal_get_level(x);
INTC_LOG("...int %d usable as new shared int", x);
}
} else {
INTC_LOG("...already have a shared int");
}
}
} else {
/*
* Seems this interrupt is feasible. Select it and break out of the loop
* No need to search further.
*/
if (best_level > interrupt_controller_hal_get_level(x)) {
best = x;
best_level = interrupt_controller_hal_get_level(x);
} else {
INTC_LOG("...worse than int %d", best);
}
}
}
INTC_LOG("%s: using int %d", __func__, best);
/*
* By now we have looked at all potential interrupts and
* hopefully have selected the best one in best.
*/
return best;
}
/* Common shared isr handler. Chain-call all ISRs. */
static void IRAM_ATTR shared_intr_isr(void *arg)
{
struct vector_desc_t *vd = (struct vector_desc_t *)arg;
struct shared_vector_desc_t *sh_vec = vd->shared_vec_info;
esp_intr_lock();
while (sh_vec) {
if (!sh_vec->disabled) {
if (!(sh_vec->statusreg) || (*sh_vec->statusreg & sh_vec->statusmask)) {
sh_vec->isr(sh_vec->arg);
}
}
sh_vec = sh_vec->next;
}
esp_intr_unlock();
}
int esp_intr_alloc_intrstatus(int source,
int flags,
uint32_t intrstatusreg,
uint32_t intrstatusmask,
intr_handler_t handler,
void *arg,
struct intr_handle_data_t **ret_handle)
{
struct intr_handle_data_t *ret = NULL;
int force = -1;
LOG_INF("%s (cpu %d): checking args", __func__, arch_curr_cpu()->id);
/* Shared interrupts should be level-triggered. */
if ((flags & ESP_INTR_FLAG_SHARED) && (flags & ESP_INTR_FLAG_EDGE)) {
return -EINVAL;
}
/* You can't set an handler / arg for a non-C-callable interrupt. */
if ((flags & ESP_INTR_FLAG_HIGH) && (handler)) {
return -EINVAL;
}
/* Shared ints should have handler and non-processor-local source */
if ((flags & ESP_INTR_FLAG_SHARED) && (!handler || source < 0)) {
return -EINVAL;
}
/* Statusreg should have a mask */
if (intrstatusreg && !intrstatusmask) {
return -EINVAL;
}
/*
* If the ISR is marked to be IRAM-resident, the handler must not be in the cached region
* If we are to allow placing interrupt handlers into the 0x400c00000x400c2000 region,
* we need to make sure the interrupt is connected to the CPU0.
* CPU1 does not have access to the RTC fast memory through this region.
*/
if ((flags & ESP_INTR_FLAG_IRAM) &&
(ptrdiff_t) handler >= SOC_RTC_IRAM_HIGH &&
(ptrdiff_t) handler < SOC_RTC_DATA_LOW) {
return -EINVAL;
}
/*
* Default to prio 1 for shared interrupts.
* Default to prio 1, 2 or 3 for non-shared interrupts.
*/
if ((flags & ESP_INTR_FLAG_LEVELMASK) == 0) {
if (flags & ESP_INTR_FLAG_SHARED) {
flags |= ESP_INTR_FLAG_LEVEL1;
} else {
flags |= ESP_INTR_FLAG_LOWMED;
}
}
LOG_INF("%s (cpu %d): Args okay."
"Resulting flags 0x%X", __func__, arch_curr_cpu()->id, flags);
/*
* Check 'special' interrupt sources. These are tied to one specific
* interrupt, so we have to force get_available_int to only look at that.
*/
switch (source) {
case ETS_INTERNAL_TIMER0_INTR_SOURCE:
force = ETS_INTERNAL_TIMER0_INTR_NO;
break;
case ETS_INTERNAL_TIMER1_INTR_SOURCE:
force = ETS_INTERNAL_TIMER1_INTR_NO;
break;
case ETS_INTERNAL_TIMER2_INTR_SOURCE:
force = ETS_INTERNAL_TIMER2_INTR_NO;
break;
case ETS_INTERNAL_SW0_INTR_SOURCE:
force = ETS_INTERNAL_SW0_INTR_NO;
break;
case ETS_INTERNAL_SW1_INTR_SOURCE:
force = ETS_INTERNAL_SW1_INTR_NO;
break;
case ETS_INTERNAL_PROFILING_INTR_SOURCE:
force = ETS_INTERNAL_PROFILING_INTR_NO;
break;
default:
break;
}
/* Allocate a return handle. If we end up not needing it, we'll free it later on. */
ret = k_malloc(sizeof(struct intr_handle_data_t));
if (ret == NULL) {
return -ENOMEM;
}
esp_intr_lock();
int cpu = arch_curr_cpu()->id;
/* See if we can find an interrupt that matches the flags. */
int intr = get_available_int(flags, cpu, force, source);
if (intr == -1) {
/* None found. Bail out. */
esp_intr_unlock();
k_free(ret);
return -ENODEV;
}
/* Get an int vector desc for int. */
struct vector_desc_t *vd = get_desc_for_int(intr, cpu);
if (vd == NULL) {
esp_intr_unlock();
k_free(ret);
return -ENOMEM;
}
/* Allocate that int! */
if (flags & ESP_INTR_FLAG_SHARED) {
/* Populate vector entry and add to linked list. */
struct shared_vector_desc_t *sv = k_malloc(sizeof(struct shared_vector_desc_t));
if (sv == NULL) {
esp_intr_unlock();
k_free(ret);
return -ENOMEM;
}
memset(sv, 0, sizeof(struct shared_vector_desc_t));
sv->statusreg = (uint32_t *)intrstatusreg;
sv->statusmask = intrstatusmask;
sv->isr = handler;
sv->arg = arg;
sv->next = vd->shared_vec_info;
sv->source = source;
sv->disabled = 0;
vd->shared_vec_info = sv;
vd->flags |= VECDESC_FL_SHARED;
/* (Re-)set shared isr handler to new value. */
set_interrupt_handler(intr, shared_intr_isr, vd);
} else {
/* Mark as unusable for other interrupt sources. This is ours now! */
vd->flags = VECDESC_FL_NONSHARED;
if (handler) {
set_interrupt_handler(intr, handler, arg);
}
if (flags & ESP_INTR_FLAG_EDGE) {
xthal_set_intclear(1 << intr);
}
vd->source = source;
}
if (flags & ESP_INTR_FLAG_IRAM) {
vd->flags |= VECDESC_FL_INIRAM;
non_iram_int_mask[cpu] &= ~(1 << intr);
} else {
vd->flags &= ~VECDESC_FL_INIRAM;
non_iram_int_mask[cpu] |= (1 << intr);
}
if (source >= 0) {
intr_matrix_set(cpu, source, intr);
}
/* Fill return handle data. */
ret->vector_desc = vd;
ret->shared_vector_desc = vd->shared_vec_info;
/* Enable int at CPU-level; */
irq_enable(intr);
/*
* If interrupt has to be started disabled, do that now; ints won't be enabled for
* real until the end of the critical section.
*/
if (flags & ESP_INTR_FLAG_INTRDISABLED) {
esp_intr_disable(ret);
}
esp_intr_unlock();
/* Fill return handle if needed, otherwise free handle. */
if (ret_handle != NULL) {
*ret_handle = ret;
} else {
k_free(ret);
}
LOG_DBG("Connected src %d to int %d (cpu %d)", source, intr, cpu);
return 0;
}
int esp_intr_alloc(int source,
int flags,
intr_handler_t handler,
void *arg,
struct intr_handle_data_t **ret_handle)
{
/*
* As an optimization, we can create a table with the possible interrupt status
* registers and masks for every single source there is. We can then add code here to
* look up an applicable value and pass that to the esp_intr_alloc_intrstatus function.
*/
return esp_intr_alloc_intrstatus(source, flags, 0, 0, handler, arg, ret_handle);
}
int IRAM_ATTR esp_intr_set_in_iram(struct intr_handle_data_t *handle, bool is_in_iram)
{
if (!handle) {
return -EINVAL;
}
struct vector_desc_t *vd = handle->vector_desc;
if (vd->flags & VECDESC_FL_SHARED) {
return -EINVAL;
}
esp_intr_lock();
uint32_t mask = (1 << vd->intno);
if (is_in_iram) {
vd->flags |= VECDESC_FL_INIRAM;
non_iram_int_mask[vd->cpu] &= ~mask;
} else {
vd->flags &= ~VECDESC_FL_INIRAM;
non_iram_int_mask[vd->cpu] |= mask;
}
esp_intr_unlock();
return 0;
}
int esp_intr_free(struct intr_handle_data_t *handle)
{
bool free_shared_vector = false;
if (!handle) {
return -EINVAL;
}
esp_intr_lock();
esp_intr_disable(handle);
if (handle->vector_desc->flags & VECDESC_FL_SHARED) {
/* Find and kill the shared int */
struct shared_vector_desc_t *svd = handle->vector_desc->shared_vec_info;
struct shared_vector_desc_t *prevsvd = NULL;
assert(svd); /* should be something in there for a shared int */
while (svd != NULL) {
if (svd == handle->shared_vector_desc) {
/* Found it. Now kill it. */
if (prevsvd) {
prevsvd->next = svd->next;
} else {
handle->vector_desc->shared_vec_info = svd->next;
}
k_free(svd);
break;
}
prevsvd = svd;
svd = svd->next;
}
/* If nothing left, disable interrupt. */
if (handle->vector_desc->shared_vec_info == NULL) {
free_shared_vector = true;
}
LOG_INF("%s: Deleting shared int: %s. "
"Shared int is %s", __func__, svd ? "not found or last one" : "deleted",
free_shared_vector ? "empty now." : "still in use");
}
if ((handle->vector_desc->flags & VECDESC_FL_NONSHARED) || free_shared_vector) {
LOG_INF("%s: Disabling int, killing handler", __func__);
/* Reset to normal handler */
set_interrupt_handler(handle->vector_desc->intno,
default_intr_handler,
(void *)((int)handle->vector_desc->intno));
/*
* Theoretically, we could free the vector_desc... not sure if that's worth the
* few bytes of memory we save.(We can also not use the same exit path for empty
* shared ints anymore if we delete the desc.) For now, just mark it as free.
*/
handle->vector_desc->flags &= !(VECDESC_FL_NONSHARED | VECDESC_FL_RESERVED);
/* Also kill non_iram mask bit. */
non_iram_int_mask[handle->vector_desc->cpu] &= ~(1 << (handle->vector_desc->intno));
}
esp_intr_unlock();
k_free(handle);
return 0;
}
int esp_intr_get_intno(struct intr_handle_data_t *handle)
{
return handle->vector_desc->intno;
}
int esp_intr_get_cpu(struct intr_handle_data_t *handle)
{
return handle->vector_desc->cpu;
}
/**
* Interrupt disabling strategy:
* If the source is >=0 (meaning a muxed interrupt), we disable it by muxing the interrupt to a
* non-connected interrupt. If the source is <0 (meaning an internal, per-cpu interrupt).
* This allows us to, for the muxed CPUs, disable an int from
* the other core. It also allows disabling shared interrupts.
*/
/*
* Muxing an interrupt source to interrupt 6, 7, 11, 15, 16 or 29
* cause the interrupt to effectively be disabled.
*/
#define INT_MUX_DISABLED_INTNO 6
int IRAM_ATTR esp_intr_enable(struct intr_handle_data_t *handle)
{
if (!handle) {
return -EINVAL;
}
esp_intr_lock();
int source;
if (handle->shared_vector_desc) {
handle->shared_vector_desc->disabled = 0;
source = handle->shared_vector_desc->source;
} else {
source = handle->vector_desc->source;
}
if (source >= 0) {
/* Disabled using int matrix; re-connect to enable */
intr_matrix_set(handle->vector_desc->cpu, source, handle->vector_desc->intno);
} else {
/* Re-enable using cpu int ena reg */
if (handle->vector_desc->cpu != arch_curr_cpu()->id) {
return -EINVAL; /* Can only enable these ints on this cpu */
}
irq_enable(handle->vector_desc->intno);
}
esp_intr_unlock();
return 0;
}
int IRAM_ATTR esp_intr_disable(struct intr_handle_data_t *handle)
{
if (!handle) {
return -EINVAL;
}
esp_intr_lock();
int source;
bool disabled = 1;
if (handle->shared_vector_desc) {
handle->shared_vector_desc->disabled = 1;
source = handle->shared_vector_desc->source;
struct shared_vector_desc_t *svd = handle->vector_desc->shared_vec_info;
assert(svd != NULL);
while (svd) {
if (svd->source == source && svd->disabled == 0) {
disabled = 0;
break;
}
svd = svd->next;
}
} else {
source = handle->vector_desc->source;
}
if (source >= 0) {
if (disabled) {
/* Disable using int matrix */
intr_matrix_set(handle->vector_desc->cpu, source, INT_MUX_DISABLED_INTNO);
}
} else {
/* Disable using per-cpu regs */
if (handle->vector_desc->cpu != arch_curr_cpu()->id) {
esp_intr_unlock();
return -EINVAL; /* Can only enable these ints on this cpu */
}
irq_disable(handle->vector_desc->intno);
}
esp_intr_unlock();
return 0;
}
void IRAM_ATTR esp_intr_noniram_disable(void)
{
int oldint;
int cpu = arch_curr_cpu()->id;
int intmask = ~non_iram_int_mask[cpu];
if (non_iram_int_disabled_flag[cpu]) {
abort();
}
non_iram_int_disabled_flag[cpu] = true;
oldint = interrupt_controller_hal_disable_int_mask(intmask);
/* Save which ints we did disable */
non_iram_int_disabled[cpu] = oldint & non_iram_int_mask[cpu];
}
void IRAM_ATTR esp_intr_noniram_enable(void)
{
int cpu = arch_curr_cpu()->id;
int intmask = non_iram_int_disabled[cpu];
if (!non_iram_int_disabled_flag[cpu]) {
abort();
}
non_iram_int_disabled_flag[cpu] = false;
interrupt_controller_hal_enable_int_mask(intmask);
}

View file

@ -0,0 +1,14 @@
# Copyright (c) 2021 Espressif Systems (Shanghai) Co., Ltd.
# SPDX-License-Identifier: Apache-2.0
description: ESP32 Interrupt controller
compatible: "espressif,esp32-intc"
include: [interrupt-controller.yaml, base.yaml]
properties:
reg:
required: true
interrupt-cells:
- irq

View file

@ -8,6 +8,7 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/i2c/i2c.h>
#include <dt-bindings/clock/esp32_clock.h>
#include <dt-bindings/interrupt-controller/esp-xtensa-intmux.h>
/ {
chosen {
@ -46,6 +47,15 @@
reg = <0x3FFB0000 0x50000>;
};
intc: interrupt-controller@3ff00104 {
#interrupt-cells = <1>;
compatible = "espressif,esp32-intc";
interrupt-controller;
reg = <0x3ff00104 0x114>;
label = "INTC_0";
status = "okay";
};
rtc: rtc@3ff48000 {
compatible = "espressif,esp32-rtc";
reg = <0x3ff48000 0x0D8>;

View file

@ -0,0 +1,304 @@
/*
* Copyright (c) 2021 Espressif Systems (Shanghai) Co., Ltd.
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_INCLUDE_DRIVERS_ESP_INTR_ALLOC_H__
#define ZEPHYR_INCLUDE_DRIVERS_ESP_INTR_ALLOC_H__
#include <stdint.h>
#include <stdbool.h>
/* number of possible interrupts per core */
#define ESP_INTC_INTS_NUM (32)
/*
* Interrupt allocation flags - These flags can be used to specify
* which interrupt qualities the code calling esp_intr_alloc* needs.
*
*/
/* Keep the LEVELx values as they are here; they match up with (1<<level) */
#define ESP_INTR_FLAG_LEVEL1 (1<<1) /* Accept a Level 1 int vector, lowest priority */
#define ESP_INTR_FLAG_LEVEL2 (1<<2) /* Accept a Level 2 int vector */
#define ESP_INTR_FLAG_LEVEL3 (1<<3) /* Accept a Level 3 int vector */
#define ESP_INTR_FLAG_LEVEL4 (1<<4) /* Accept a Level 4 int vector */
#define ESP_INTR_FLAG_LEVEL5 (1<<5) /* Accept a Level 5 int vector */
#define ESP_INTR_FLAG_LEVEL6 (1<<6) /* Accept a Level 6 int vector */
#define ESP_INTR_FLAG_NMI (1<<7) /* Accept a Level 7 int vector, highest priority */
#define ESP_INTR_FLAG_SHARED (1<<8) /* Interrupt can be shared between ISRs */
#define ESP_INTR_FLAG_EDGE (1<<9) /* Edge-triggered interrupt */
#define ESP_INTR_FLAG_IRAM (1<<10) /* ISR can be called if cache is disabled */
#define ESP_INTR_FLAG_INTRDISABLED (1<<11) /* Return with this interrupt disabled */
/* Low and medium prio interrupts. These can be handled in C. */
#define ESP_INTR_FLAG_LOWMED (ESP_INTR_FLAG_LEVEL1|ESP_INTR_FLAG_LEVEL2|ESP_INTR_FLAG_LEVEL3)
/* High level interrupts. Need to be handled in assembly. */
#define ESP_INTR_FLAG_HIGH (ESP_INTR_FLAG_LEVEL4|ESP_INTR_FLAG_LEVEL5|ESP_INTR_FLAG_LEVEL6| \
ESP_INTR_FLAG_NMI)
/* Mask for all level flags */
#define ESP_INTR_FLAG_LEVELMASK (ESP_INTR_FLAG_LEVEL1|ESP_INTR_FLAG_LEVEL2|ESP_INTR_FLAG_LEVEL3| \
ESP_INTR_FLAG_LEVEL4|ESP_INTR_FLAG_LEVEL5|ESP_INTR_FLAG_LEVEL6| \
ESP_INTR_FLAG_NMI)
/*
* The esp_intr_alloc* functions can allocate an int for all *_INTR_SOURCE int sources that
* are routed through the interrupt mux. Apart from these sources, each core also has some internal
* sources that do not pass through the interrupt mux. To allocate an interrupt for these sources,
* pass these pseudo-sources to the functions.
*/
#define ETS_INTERNAL_TIMER0_INTR_SOURCE -1 /* Xtensa timer 0 interrupt source */
#define ETS_INTERNAL_TIMER1_INTR_SOURCE -2 /* Xtensa timer 1 interrupt source */
#define ETS_INTERNAL_TIMER2_INTR_SOURCE -3 /* Xtensa timer 2 interrupt source */
#define ETS_INTERNAL_SW0_INTR_SOURCE -4 /* Software int source 1 */
#define ETS_INTERNAL_SW1_INTR_SOURCE -5 /* Software int source 2 */
#define ETS_INTERNAL_PROFILING_INTR_SOURCE -6 /* Int source for profiling */
/* Function prototype for interrupt handler function */
typedef void (*intr_handler_t)(void *arg);
struct shared_vector_desc_t {
int disabled : 1;
int source : 8;
volatile uint32_t *statusreg;
uint32_t statusmask;
intr_handler_t isr;
void *arg;
struct shared_vector_desc_t *next;
};
/* Pack using bitfields for better memory use */
struct vector_desc_t {
int flags : 16; /* OR of VECDESC_FLAG_* defines */
unsigned int cpu : 1;
unsigned int intno : 5;
int source : 8; /* Int mux flags, used when not shared */
struct shared_vector_desc_t *shared_vec_info; /* used when VECDESC_FL_SHARED */
struct vector_desc_t *next;
};
/** Interrupt handler associated data structure */
struct intr_handle_data_t {
struct vector_desc_t *vector_desc;
struct shared_vector_desc_t *shared_vector_desc;
};
/**
* @brief Initializes interrupt table to its defaults
*/
void esp_intr_initialize(void);
/**
* @brief Mark an interrupt as a shared interrupt
*
* This will mark a certain interrupt on the specified CPU as
* an interrupt that can be used to hook shared interrupt handlers
* to.
*
* @param intno The number of the interrupt (0-31)
* @param cpu CPU on which the interrupt should be marked as shared (0 or 1)
* @param is_in_iram Shared interrupt is for handlers that reside in IRAM and
* the int can be left enabled while the flash cache is disabled.
*
* @return -EINVAL if cpu or intno is invalid
* 0 otherwise
*/
int esp_intr_mark_shared(int intno, int cpu, bool is_in_iram);
/**
* @brief Reserve an interrupt to be used outside of this framework
*
* This will mark a certain interrupt on the specified CPU as
* reserved, not to be allocated for any reason.
*
* @param intno The number of the interrupt (0-31)
* @param cpu CPU on which the interrupt should be marked as shared (0 or 1)
*
* @return -EINVAL if cpu or intno is invalid
* 0 otherwise
*/
int esp_intr_reserve(int intno, int cpu);
/**
* @brief Allocate an interrupt with the given parameters.
*
* This finds an interrupt that matches the restrictions as given in the flags
* parameter, maps the given interrupt source to it and hooks up the given
* interrupt handler (with optional argument) as well. If needed, it can return
* a handle for the interrupt as well.
*
* The interrupt will always be allocated on the core that runs this function.
*
* If ESP_INTR_FLAG_IRAM flag is used, and handler address is not in IRAM or
* RTC_FAST_MEM, then ESP_ERR_INVALID_ARG is returned.
*
* @param source The interrupt source. One of the *_INTR_SOURCE interrupt mux
* sources, as defined in esp-xtensa-intmux.h, or one of the internal
* ETS_INTERNAL_*_INTR_SOURCE sources as defined in this header.
* @param flags An ORred mask of the ESP_INTR_FLAG_* defines. These restrict the
* choice of interrupts that this routine can choose from. If this value
* is 0, it will default to allocating a non-shared interrupt of level
* 1, 2 or 3. If this is ESP_INTR_FLAG_SHARED, it will allocate a shared
* interrupt of level 1. Setting ESP_INTR_FLAG_INTRDISABLED will return
* from this function with the interrupt disabled.
* @param handler The interrupt handler. Must be NULL when an interrupt of level >3
* is requested, because these types of interrupts aren't C-callable.
* @param arg Optional argument for passed to the interrupt handler
* @param ret_handle Pointer to a struct intr_handle_data_t pointer to store a handle that can
* later be used to request details or free the interrupt. Can be NULL if no handle
* is required.
*
* @return -EINVAL if the combination of arguments is invalid.
* -ENODEV No free interrupt found with the specified flags
* 0 otherwise
*/
int esp_intr_alloc(int source,
int flags,
intr_handler_t handler,
void *arg,
struct intr_handle_data_t **ret_handle);
/**
* @brief Allocate an interrupt with the given parameters.
*
*
* This essentially does the same as esp_intr_alloc, but allows specifying a register and mask
* combo. For shared interrupts, the handler is only called if a read from the specified
* register, ANDed with the mask, returns non-zero. By passing an interrupt status register
* address and a fitting mask, this can be used to accelerate interrupt handling in the case
* a shared interrupt is triggered; by checking the interrupt statuses first, the code can
* decide which ISRs can be skipped
*
* @param source The interrupt source. One of the *_INTR_SOURCE interrupt mux
* sources, as defined in esp-xtensa-intmux.h, or one of the internal
* ETS_INTERNAL_*_INTR_SOURCE sources as defined in this header.
* @param flags An ORred mask of the ESP_INTR_FLAG_* defines. These restrict the
* choice of interrupts that this routine can choose from. If this value
* is 0, it will default to allocating a non-shared interrupt of level
* 1, 2 or 3. If this is ESP_INTR_FLAG_SHARED, it will allocate a shared
* interrupt of level 1. Setting ESP_INTR_FLAG_INTRDISABLED will return
* from this function with the interrupt disabled.
* @param intrstatusreg The address of an interrupt status register
* @param intrstatusmask A mask. If a read of address intrstatusreg has any of the bits
* that are 1 in the mask set, the ISR will be called. If not, it will be
* skipped.
* @param handler The interrupt handler. Must be NULL when an interrupt of level >3
* is requested, because these types of interrupts aren't C-callable.
* @param arg Optional argument for passed to the interrupt handler
* @param ret_handle Pointer to a struct intr_handle_data_t pointer to store a handle that can
* later be used to request details or free the interrupt. Can be NULL if no handle
* is required.
*
* @return -EINVAL if the combination of arguments is invalid.
* -ENODEV No free interrupt found with the specified flags
* 0 otherwise
*/
int esp_intr_alloc_intrstatus(int source,
int flags,
uint32_t intrstatusreg,
uint32_t intrstatusmask,
intr_handler_t handler,
void *arg,
struct intr_handle_data_t **ret_handle);
/**
* @brief Disable and free an interrupt.
*
* Use an interrupt handle to disable the interrupt and release the resources associated with it.
* If the current core is not the core that registered this interrupt, this routine will be
* assigned to the core that allocated this interrupt, blocking and waiting until the resource
* is successfully released.
*
* @note
* When the handler shares its source with other handlers, the interrupt status bits
* it's responsible for should be managed properly before freeing it. See ``esp_intr_disable``
* for more details. Please do not call this function in ``esp_ipc_call_blocking``.
*
* @param handle The handle, as obtained by esp_intr_alloc or esp_intr_alloc_intrstatus
*
* @return -EINVAL the handle is NULL
* 0 otherwise
*/
int esp_intr_free(struct intr_handle_data_t *handle);
/**
* @brief Get CPU number an interrupt is tied to
*
* @param handle The handle, as obtained by esp_intr_alloc or esp_intr_alloc_intrstatus
*
* @return The core number where the interrupt is allocated
*/
int esp_intr_get_cpu(struct intr_handle_data_t *handle);
/**
* @brief Get the allocated interrupt for a certain handle
*
* @param handle The handle, as obtained by esp_intr_alloc or esp_intr_alloc_intrstatus
*
* @return The interrupt number
*/
int esp_intr_get_intno(struct intr_handle_data_t *handle);
/**
* @brief Disable the interrupt associated with the handle
*
* @note
* 1. For local interrupts (ESP_INTERNAL_* sources), this function has to be called on the
* CPU the interrupt is allocated on. Other interrupts have no such restriction.
* 2. When several handlers sharing a same interrupt source, interrupt status bits, which are
* handled in the handler to be disabled, should be masked before the disabling, or handled
* in other enabled interrupts properly. Miss of interrupt status handling will cause infinite
* interrupt calls and finally system crash.
*
* @param handle The handle, as obtained by esp_intr_alloc or esp_intr_alloc_intrstatus
*
* @return -EINVAL if the combination of arguments is invalid.
* 0 otherwise
*/
int esp_intr_disable(struct intr_handle_data_t *handle);
/**
* @brief Enable the interrupt associated with the handle
*
* @note For local interrupts (ESP_INTERNAL_* sources), this function has to be called on the
* CPU the interrupt is allocated on. Other interrupts have no such restriction.
*
* @param handle The handle, as obtained by esp_intr_alloc or esp_intr_alloc_intrstatus
*
* @return -EINVAL if the combination of arguments is invalid.
* 0 otherwise
*/
int esp_intr_enable(struct intr_handle_data_t *handle);
/**
* @brief Set the "in IRAM" status of the handler.
*
* @note Does not work on shared interrupts.
*
* @param handle The handle, as obtained by esp_intr_alloc or esp_intr_alloc_intrstatus
* @param is_in_iram Whether the handler associated with this handle resides in IRAM.
* Handlers residing in IRAM can be called when cache is disabled.
*
* @return -EINVAL if the combination of arguments is invalid.
* 0 otherwise
*/
int esp_intr_set_in_iram(struct intr_handle_data_t *handle, bool is_in_iram);
/**
* @brief Disable interrupts that aren't specifically marked as running from IRAM
*/
void esp_intr_noniram_disable(void);
/**
* @brief Re-enable interrupts disabled by esp_intr_noniram_disable
*/
void esp_intr_noniram_enable(void);
#endif

View file

@ -0,0 +1,82 @@
/*
* Copyright (c) 2021 Espressif Systems (Shanghai) Co., Ltd.
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_INCLUDE_DT_BINDINGS_INTERRUPT_CONTROLLER_XTENSA_INTMUX_H_
#define ZEPHYR_INCLUDE_DT_BINDINGS_INTERRUPT_CONTROLLER_XTENSA_INTMUX_H_
#define WIFI_MAC_INTR_SOURCE 0 /* WiFi MAC, level */
#define WIFI_MAC_NMI_SOURCE 1 /* WiFi MAC, NMI, use if MAC needs fix in NMI */
#define WIFI_BB_INTR_SOURCE 2 /* WiFi BB, level, we can do some calibartion */
#define BT_MAC_INTR_SOURCE 3 /* will be cancelled */
#define BT_BB_INTR_SOURCE 4 /* BB, level */
#define BT_BB_NMI_SOURCE 5 /* BT BB, NMI, use if BB have bug to fix in NMI */
#define RWBT_INTR_SOURCE 6 /* RWBT, level */
#define RWBLE_INTR_SOURCE 7 /* RWBLE, level */
#define RWBT_NMI_SOURCE 8 /* RWBT, NMI, use if RWBT has bug to fix in NMI */
#define RWBLE_NMI_SOURCE 9 /* RWBLE, NMI, use if RWBT has bug to fix in NMI */
#define SLC0_INTR_SOURCE 10 /* SLC0, level */
#define SLC1_INTR_SOURCE 11 /* SLC1, level */
#define UHCI0_INTR_SOURCE 12 /* UHCI0, level */
#define UHCI1_INTR_SOURCE 13 /* UHCI1, level */
#define TG0_T0_LEVEL_INTR_SOURCE 14 /* TIMER_GROUP0, TIMER0, level */
#define TG0_T1_LEVEL_INTR_SOURCE 15 /* TIMER_GROUP0, TIMER1, level */
#define TG0_WDT_LEVEL_INTR_SOURCE 16 /* TIMER_GROUP0, WATCHDOG, level */
#define TG0_LACT_LEVEL_INTR_SOURCE 17 /* TIMER_GROUP0, LACT, level */
#define TG1_T0_LEVEL_INTR_SOURCE 18 /* TIMER_GROUP1, TIMER0, level */
#define TG1_T1_LEVEL_INTR_SOURCE 19 /* TIMER_GROUP1, TIMER1, level */
#define TG1_WDT_LEVEL_INTR_SOURCE 20 /* TIMER_GROUP1, WATCHDOG, level */
#define TG1_LACT_LEVEL_INTR_SOURCE 21 /* TIMER_GROUP1, LACT, level */
#define GPIO_INTR_SOURCE 22 /* interrupt of GPIO, level */
#define GPIO_NMI_SOURCE 23 /* interrupt of GPIO, NMI */
#define FROM_CPU_INTR0_SOURCE 24 /* int0 from a CPU, level */
#define FROM_CPU_INTR1_SOURCE 25 /* int1 from a CPU, level */
#define FROM_CPU_INTR2_SOURCE 26 /* int2 from a CPU, level, for DPORT Access */
#define FROM_CPU_INTR3_SOURCE 27 /* int3 from a CPU, level, for DPORT Access */
#define SPI0_INTR_SOURCE 28 /* SPI0, level, for $ Access, do not use this */
#define SPI1_INTR_SOURCE 29 /* SPI1, level, flash r/w, do not use this */
#define SPI2_INTR_SOURCE 30 /* SPI2, level */
#define SPI3_INTR_SOURCE 31 /* SPI3, level */
#define I2S0_INTR_SOURCE 32 /* I2S0, level */
#define I2S1_INTR_SOURCE 33 /* I2S1, level */
#define UART0_INTR_SOURCE 34 /* UART0, level */
#define UART1_INTR_SOURCE 35 /* UART1, level */
#define UART2_INTR_SOURCE 36 /* UART2, level */
#define SDIO_HOST_INTR_SOURCE 37 /* SD/SDIO/MMC HOST, level */
#define ETH_MAC_INTR_SOURCE 38 /* ethernet mac, level */
#define PWM0_INTR_SOURCE 39 /* PWM0, level, Reserved */
#define PWM1_INTR_SOURCE 40 /* PWM1, level, Reserved */
#define PWM2_INTR_SOURCE 41 /* PWM2, level */
#define PWM3_INTR_SOURCE 42 /* PWM3, level */
#define LEDC_INTR_SOURCE 43 /* LED PWM, level */
#define EFUSE_INTR_SOURCE 44 /* efuse, level, not likely to use */
#define TWAI_INTR_SOURCE 45 /* twai, level */
#define CAN_INTR_SOURCE TWAI_INTR_SOURCE
#define RTC_CORE_INTR_SOURCE 46 /* rtc core, level, include rtc watchdog */
#define RMT_INTR_SOURCE 47 /* remote controller, level */
#define PCNT_INTR_SOURCE 48 /* pulse count, level */
#define I2C_EXT0_INTR_SOURCE 49 /* I2C controller1, level */
#define I2C_EXT1_INTR_SOURCE 50 /* I2C controller0, level */
#define RSA_INTR_SOURCE 51 /* RSA accelerator, level */
#define SPI1_DMA_INTR_SOURCE 52 /* SPI1 DMA, for flash r/w, do not use it */
#define SPI2_DMA_INTR_SOURCE 53 /* SPI2 DMA, level */
#define SPI3_DMA_INTR_SOURCE 54 /* interrupt of SPI3 DMA, level */
#define WDT_INTR_SOURCE 55 /* will be cancelled */
#define TIMER1_INTR_SOURCE 56 /* will be cancelled */
#define TIMER2_INTR_SOURCE 57 /* will be cancelled */
#define TG0_T0_EDGE_INTR_SOURCE 58 /* TIMER_GROUP0, TIMER0, EDGE */
#define TG0_T1_EDGE_INTR_SOURCE 59 /* TIMER_GROUP0, TIMER1, EDGE */
#define TG0_WDT_EDGE_INTR_SOURCE 60 /* TIMER_GROUP0, WATCH DOG, EDGE */
#define TG0_LACT_EDGE_INTR_SOURCE 61 /* TIMER_GROUP0, LACT, EDGE */
#define TG1_T0_EDGE_INTR_SOURCE 62 /* TIMER_GROUP1, TIMER0, EDGE */
#define TG1_T1_EDGE_INTR_SOURCE 63 /* TIMER_GROUP1, TIMER1, EDGE */
#define TG1_WDT_EDGE_INTR_SOURCE 64 /* TIMER_GROUP1, WATCHDOG, EDGE */
#define TG1_LACT_EDGE_INTR_SOURCE 65 /* TIMER_GROUP0, LACT, EDGE */
#define MMU_IA_INTR_SOURCE 66 /* MMU Invalid Access, LEVEL */
#define MPU_IA_INTR_SOURCE 67 /* MPU Invalid Access, LEVEL */
#define CACHE_IA_INTR_SOURCE 68 /* Cache Invalid Access, LEVEL */
#define MAX_INTR_SOURCE 69 /* total number of interrupt sources */
#endif

View file

@ -6,6 +6,7 @@ config SOC_ESP32
select XTENSA
select CLOCK_CONTROL
select CLOCK_CONTROL_ESP32
select DYNAMIC_INTERRUPTS
if SOC_ESP32

View file

@ -8,6 +8,7 @@
#include "soc.h"
#include <soc/rtc_cntl_reg.h>
#include <soc/timer_group_reg.h>
#include <drivers/interrupt_controller/intc_esp32.h>
#include <xtensa/config/core-isa.h>
#include <xtensa/corebits.h>
@ -121,6 +122,7 @@ void __attribute__((section(".iram1"))) __start(void)
#if CONFIG_SOC_FLASH_ESP32 || CONFIG_ESP_SPIRAM
spi_flash_guard_set(&g_flash_guard_default_ops);
#endif
esp_intr_initialize();
/* Start Zephyr */
z_cstart();