drivers: dma: stm32: refactor irq handling

- Remove the need for kmalloc
- On hardware that supports it use 1 IRQ handler per stream to
  determine the stream ID, so the ISR does not have to loop
  over all ID's to see which one is active. On hardware (like
  STM32L0 and STM32F0) where up to 7 streams share 3 IRQ's use
  the loop to check which stream is active.

Signed-off-by: Erwin Rol <erwin@erwinrol.com>
This commit is contained in:
Erwin Rol 2020-08-21 23:50:00 +02:00 committed by Carles Cufí
parent c8ae070de7
commit 67f68e68f1
5 changed files with 247 additions and 103 deletions

View file

@ -6,7 +6,7 @@
config DMA_STM32
bool "Enable STM32 DMA driver"
select DYNAMIC_INTERRUPTS
select USE_STM32_LL_DMA
depends on SOC_FAMILY_STM32
help
DMA driver for STM32 series SoCs.
@ -16,14 +16,12 @@ if DMA_STM32
config DMA_STM32_V1
bool
depends on SOC_SERIES_STM32F2X || SOC_SERIES_STM32F4X || SOC_SERIES_STM32F7X
select USE_STM32_LL_DMA
help
Enable DMA support on F2/F4/F7 series SoCs.
config DMA_STM32_V2
bool
depends on SOC_SERIES_STM32F0X || SOC_SERIES_STM32F1X || SOC_SERIES_STM32F3X || SOC_SERIES_STM32L0X || SOC_SERIES_STM32L4X || SOC_SERIES_STM32WBX
select USE_STM32_LL_DMA
help
Enable DMA support on F0/F1/F3/L0/L4/WB series SoCs.
@ -34,4 +32,11 @@ config DMAMUX_STM32
help
Enable DMAMUX support on L4R/WB series SoCs.
config DMA_STM32_SHARED_IRQS
bool
default y
depends on SOC_SERIES_STM32F0X || SOC_SERIES_STM32L0X
help
Enable shared IRQ support on devices where channels share 1 IRQ.
endif # DMA_STM32

View file

@ -24,6 +24,30 @@
#include <logging/log.h>
LOG_MODULE_REGISTER(dma_stm32, CONFIG_DMA_LOG_LEVEL);
#if DT_NODE_HAS_STATUS(DT_DRV_INST(0), okay)
#if DT_INST_IRQ_HAS_IDX(0, 7)
#define DMA_STM32_0_STREAM_COUNT 8
#elif DT_INST_IRQ_HAS_IDX(0, 6)
#define DMA_STM32_0_STREAM_COUNT 7
#elif DT_INST_IRQ_HAS_IDX(0, 5)
#define DMA_STM32_0_STREAM_COUNT 6
#else
#define DMA_STM32_0_STREAM_COUNT 5
#endif
#endif /* DT_NODE_HAS_STATUS(DT_DRV_INST(0), okay) */
#if DT_NODE_HAS_STATUS(DT_DRV_INST(1), okay)
#if DT_INST_IRQ_HAS_IDX(1, 7)
#define DMA_STM32_1_STREAM_COUNT 8
#elif DT_INST_IRQ_HAS_IDX(1, 6)
#define DMA_STM32_1_STREAM_COUNT 7
#elif DT_INST_IRQ_HAS_IDX(1, 5)
#define DMA_STM32_1_STREAM_COUNT 6
#else
#define DMA_STM32_1_STREAM_COUNT 5
#endif
#endif /* DT_NODE_HAS_STATUS(DT_DRV_INST(1), okay) */
static uint32_t table_m_size[] = {
LL_DMA_MDATAALIGN_BYTE,
LL_DMA_MDATAALIGN_HALFWORD,
@ -54,31 +78,16 @@ static void dma_stm32_clear_stream_irq(const struct device *dev, uint32_t id)
stm32_dma_clear_stream_irq(dma, id);
}
static void dma_stm32_irq_handler(void *arg)
static void dma_stm32_irq_handler(const struct device *dev, uint32_t id)
{
const struct device *dev = arg;
struct dma_stm32_data *data = dev->data;
const struct dma_stm32_config *config = dev->config;
DMA_TypeDef *dma = (DMA_TypeDef *)(config->base);
struct dma_stm32_stream *stream;
int id;
uint32_t callback_arg;
for (id = 0; id < data->max_streams; id++) {
if (func_ll_is_active_tc[id](dma)) {
break;
}
if (stm32_dma_is_irq_happened(dma, id)) {
break;
}
}
__ASSERT_NO_MSG(id < config->max_streams);
if (id == data->max_streams) {
LOG_ERR("Unknown interrupt happened.");
return;
}
stream = &data->streams[id];
stream = &config->streams[id];
#ifdef CONFIG_DMAMUX_STM32
callback_arg = stream->mux_channel;
@ -93,26 +102,42 @@ static void dma_stm32_irq_handler(void *arg)
/* the dma stream id is in range from STREAM_OFFSET..<dma-requests> */
if (func_ll_is_active_tc[id](dma)) {
func_ll_clear_tc[id](dma);
#ifdef CONFIG_DMAMUX_STM32
stream->busy = false;
#endif
stream->dma_callback(dev, stream->user_data, callback_arg, 0);
} else if (func_ll_is_active_ht[id](dma)) {
func_ll_clear_ht[id](dma);
} else if (stm32_dma_is_unexpected_irq_happened(dma, id)) {
LOG_ERR("Unexpected irq happened.");
stream->dma_callback(dev, stream->user_data,
callback_arg, -EIO);
} else {
LOG_ERR("Transfer Error.");
dma_stm32_dump_stream_irq(dev, id);
dma_stm32_clear_stream_irq(dev, id);
stream->dma_callback(dev, stream->user_data,
callback_arg, -EIO);
}
}
#ifdef CONFIG_DMA_STM32_SHARED_IRQS
static void dma_stm32_shared_irq_handler(const struct device *dev)
{
const struct dma_stm32_config *cfg = dev->config;
DMA_TypeDef *dma = (DMA_TypeDef *)(cfg->base);
uint32_t id = 0;
for (id = 0; id < cfg->max_streams; ++id) {
if (stm32_dma_is_irq_active(dma, id)) {
dma_stm32_irq_handler(dev, id);
}
}
}
#endif /* CONFIG_DMA_STM32_SHARED_IRQS */
static int dma_stm32_width_config(struct dma_config *config,
bool source_periph,
DMA_TypeDef *dma,
@ -224,13 +249,13 @@ static int dma_stm32_get_periph_increment(enum dma_addr_adj increment,
return 0;
}
DMA_STM32_EXPORT_API int dma_stm32_configure(const struct device *dev, uint32_t id,
DMA_STM32_EXPORT_API int dma_stm32_configure(const struct device *dev,
uint32_t id,
struct dma_config *config)
{
struct dma_stm32_data *data = dev->data;
struct dma_stm32_stream *stream = &data->streams[id - STREAM_OFFSET];
const struct dma_stm32_config *dev_config =
dev->config;
const struct dma_stm32_config *dev_config = dev->config;
struct dma_stm32_stream *stream =
&dev_config->streams[id - STREAM_OFFSET];
DMA_TypeDef *dma = (DMA_TypeDef *)dev_config->base;
LL_DMA_InitTypeDef DMA_InitStruct;
uint32_t msize;
@ -239,7 +264,7 @@ DMA_STM32_EXPORT_API int dma_stm32_configure(const struct device *dev, uint32_t
/* give channel from index 0 */
id = id - STREAM_OFFSET;
if (id >= data->max_streams) {
if (id >= dev_config->max_streams) {
LOG_ERR("cannot configure the dma stream %d.", id);
return -EINVAL;
}
@ -462,13 +487,12 @@ DMA_STM32_EXPORT_API int dma_stm32_reload(const struct device *dev, uint32_t id,
{
const struct dma_stm32_config *config = dev->config;
DMA_TypeDef *dma = (DMA_TypeDef *)(config->base);
struct dma_stm32_data *data = dev->data;
struct dma_stm32_stream *stream = &data->streams[id - STREAM_OFFSET];
struct dma_stm32_stream *stream = &config->streams[id - STREAM_OFFSET];
/* give channel from index 0 */
id = id - STREAM_OFFSET;
if (id >= data->max_streams) {
if (id >= config->max_streams) {
return -EINVAL;
}
@ -505,13 +529,12 @@ DMA_STM32_EXPORT_API int dma_stm32_start(const struct device *dev, uint32_t id)
{
const struct dma_stm32_config *config = dev->config;
DMA_TypeDef *dma = (DMA_TypeDef *)(config->base);
struct dma_stm32_data *data = dev->data;
/* give channel from index 0 */
id = id - STREAM_OFFSET;
/* Only M2P or M2M mode can be started manually. */
if (id >= data->max_streams) {
if (id >= config->max_streams) {
return -EINVAL;
}
@ -524,16 +547,14 @@ DMA_STM32_EXPORT_API int dma_stm32_start(const struct device *dev, uint32_t id)
DMA_STM32_EXPORT_API int dma_stm32_stop(const struct device *dev, uint32_t id)
{
struct dma_stm32_data *data = dev->data;
struct dma_stm32_stream *stream = &data->streams[id - STREAM_OFFSET];
const struct dma_stm32_config *config =
dev->config;
const struct dma_stm32_config *config = dev->config;
struct dma_stm32_stream *stream = &config->streams[id - STREAM_OFFSET];
DMA_TypeDef *dma = (DMA_TypeDef *)(config->base);
/* give channel from index 0 */
id = id - STREAM_OFFSET;
if (id >= data->max_streams) {
if (id >= config->max_streams) {
return -EINVAL;
}
@ -553,11 +574,8 @@ DMA_STM32_EXPORT_API int dma_stm32_stop(const struct device *dev, uint32_t id)
return 0;
}
struct k_mem_block block;
static int dma_stm32_init(const struct device *dev)
{
struct dma_stm32_data *data = dev->data;
const struct dma_stm32_config *config = dev->config;
const struct device *clk =
device_get_binding(STM32_CLOCK_CONTROL_NAME);
@ -570,25 +588,16 @@ static int dma_stm32_init(const struct device *dev)
config->config_irq(dev);
int size_stream =
sizeof(struct dma_stm32_stream) * data->max_streams;
data->streams = k_malloc(size_stream);
if (!data->streams) {
LOG_ERR("HEAP_MEM_POOL_SIZE is too small");
return -ENOMEM;
}
memset(data->streams, 0, size_stream);
#ifdef CONFIG_DMAMUX_STM32
int offset = ((dev == device_get_binding((const char *)"DMA_1"))
? 0 : data->max_streams);
? 0 : config->max_streams);
#endif /* CONFIG_DMAMUX_STM32 */
for (int i = 0; i < data->max_streams; i++) {
data->streams[i].busy = false;
for (uint32_t i = 0; i < config->max_streams; i++) {
config->streams[i].busy = false;
#ifdef CONFIG_DMAMUX_STM32
/* each further stream->mux_channel is fixed here */
data->streams[i].mux_channel = i + offset;
config->streams[i].mux_channel = i + offset;
#endif /* CONFIG_DMAMUX_STM32 */
}
@ -600,16 +609,15 @@ static int dma_stm32_get_status(const struct device *dev, uint32_t id,
{
const struct dma_stm32_config *config = dev->config;
DMA_TypeDef *dma = (DMA_TypeDef *)(config->base);
struct dma_stm32_data *data = dev->data;
struct dma_stm32_stream *stream;
/* give channel from index 0 */
id = id - STREAM_OFFSET;
if (id >= data->max_streams) {
if (id >= config->max_streams) {
return -EINVAL;
}
stream = &data->streams[id];
stream = &config->streams[id];
stat->pending_length = LL_DMA_GetDataLength(dma, table_ll_stream[id]);
stat->dir = stream->direction;
stat->busy = stream->busy;
@ -625,8 +633,9 @@ static const struct dma_driver_api dma_funcs = {
.get_status = dma_stm32_get_status,
};
#define DMA_INIT(index) \
static void dma_stm32_config_irq_##index(const struct device *dev); \
#define DMA_STM32_INIT_DEV(index) \
static struct dma_stm32_stream \
dma_stm32_streams_##index[DMA_STM32_##index##_STREAM_COUNT]; \
\
const struct dma_stm32_config dma_stm32_config_##index = { \
.pclken = { .bus = DT_INST_CLOCKS_CELL(index, bus), \
@ -634,84 +643,147 @@ const struct dma_stm32_config dma_stm32_config_##index = { \
.config_irq = dma_stm32_config_irq_##index, \
.base = DT_INST_REG_ADDR(index), \
.support_m2m = DT_INST_PROP(index, st_mem2mem), \
.max_streams = DMA_STM32_##index##_STREAM_COUNT, \
.streams = dma_stm32_streams_##index, \
}; \
\
static struct dma_stm32_data dma_stm32_data_##index = { \
.max_streams = 0, \
.streams = NULL, \
}; \
\
DEVICE_AND_API_INIT(dma_##index, DT_INST_LABEL(index), \
DEVICE_AND_API_INIT(dma_stm32_##index##_dev, DT_INST_LABEL(index), \
&dma_stm32_init, \
&dma_stm32_data_##index, &dma_stm32_config_##index, \
PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT, \
&dma_funcs)
#define irq_func(chan) \
static void dma_stm32_irq_##chan(void *arg) \
{ \
dma_stm32_irq_handler(arg, chan); \
}
#ifdef CONFIG_DMA_STM32_SHARED_IRQS
#define IRQ_INIT(dma, chan) \
#define DMA_STM32_DEFINE_IRQ_HANDLER(dma, chan) /* nothing */
#define DMA_STM32_IRQ_CONNECT(dma, chan) \
do { \
if (!irq_is_enabled(DT_INST_IRQ_BY_IDX(dma, chan, irq))) { \
irq_connect_dynamic(DT_INST_IRQ_BY_IDX(dma, chan, irq), \
IRQ_CONNECT(DT_INST_IRQ_BY_IDX(dma, chan, irq), \
DT_INST_IRQ_BY_IDX(dma, chan, priority), \
(void (*)(const void *)) \
dma_stm32_irq_handler, dev, 0); \
dma_stm32_shared_irq_handler, \
DEVICE_GET(dma_stm32_##dma##_dev), 0); \
irq_enable(DT_INST_IRQ_BY_IDX(dma, chan, irq)); \
} \
data->max_streams++; \
} while (0)
#else /* CONFIG_DMA_STM32_SHARED_IRQS */
#define DMA_STM32_DEFINE_IRQ_HANDLER(dma, chan) \
static void dma_stm32_irq_##dma##_##chan(const struct device *dev) \
{ \
dma_stm32_irq_handler(dev, chan); \
}
#define DMA_STM32_IRQ_CONNECT(dma, chan) \
do { \
IRQ_CONNECT(DT_INST_IRQ_BY_IDX(dma, chan, irq), \
DT_INST_IRQ_BY_IDX(dma, chan, priority), \
dma_stm32_irq_##dma##_##chan, \
DEVICE_GET(dma_stm32_##dma##_dev), 0); \
irq_enable(DT_INST_IRQ_BY_IDX(dma, chan, irq)); \
} while (0)
#endif /* CONFIG_DMA_STM32_SHARED_IRQS */
#if DT_NODE_HAS_STATUS(DT_DRV_INST(0), okay)
DMA_INIT(0);
static void dma_stm32_config_irq_0(const struct device *dev)
{
struct dma_stm32_data *data = dev->data;
DEVICE_DECLARE(dma_stm32_0_dev);
IRQ_INIT(0, 0);
IRQ_INIT(0, 1);
IRQ_INIT(0, 2);
IRQ_INIT(0, 3);
IRQ_INIT(0, 4);
DMA_STM32_DEFINE_IRQ_HANDLER(0, 0);
DMA_STM32_DEFINE_IRQ_HANDLER(0, 1);
DMA_STM32_DEFINE_IRQ_HANDLER(0, 2);
DMA_STM32_DEFINE_IRQ_HANDLER(0, 3);
DMA_STM32_DEFINE_IRQ_HANDLER(0, 4);
#if DT_INST_IRQ_HAS_IDX(0, 5)
IRQ_INIT(0, 5);
DMA_STM32_DEFINE_IRQ_HANDLER(0, 5);
#if DT_INST_IRQ_HAS_IDX(0, 6)
IRQ_INIT(0, 6);
DMA_STM32_DEFINE_IRQ_HANDLER(0, 6);
#if DT_INST_IRQ_HAS_IDX(0, 7)
IRQ_INIT(0, 7);
DMA_STM32_DEFINE_IRQ_HANDLER(0, 7);
#endif /* DT_INST_IRQ_HAS_IDX(0, 5) */
#endif /* DT_INST_IRQ_HAS_IDX(0, 6) */
#endif /* DT_INST_IRQ_HAS_IDX(0, 7) */
static void dma_stm32_config_irq_0(const struct device *dev)
{
ARG_UNUSED(dev);
DMA_STM32_IRQ_CONNECT(0, 0);
DMA_STM32_IRQ_CONNECT(0, 1);
#ifndef CONFIG_DMA_STM32_SHARED_IRQS
DMA_STM32_IRQ_CONNECT(0, 2);
#endif /* CONFIG_DMA_STM32_SHARED_IRQS */
DMA_STM32_IRQ_CONNECT(0, 3);
#ifndef CONFIG_DMA_STM32_SHARED_IRQS
DMA_STM32_IRQ_CONNECT(0, 4);
#if DT_INST_IRQ_HAS_IDX(0, 5)
DMA_STM32_IRQ_CONNECT(0, 5);
#if DT_INST_IRQ_HAS_IDX(0, 6)
DMA_STM32_IRQ_CONNECT(0, 6);
#if DT_INST_IRQ_HAS_IDX(0, 7)
DMA_STM32_IRQ_CONNECT(0, 7);
#endif /* DT_INST_IRQ_HAS_IDX(0, 5) */
#endif /* DT_INST_IRQ_HAS_IDX(0, 6) */
#endif /* DT_INST_IRQ_HAS_IDX(0, 7) */
#endif /* CONFIG_DMA_STM32_SHARED_IRQS */
/* Either 5 or 6 or 7 or 8 channels for DMA across all stm32 series. */
}
DMA_STM32_INIT_DEV(0);
#endif /* DT_NODE_HAS_STATUS(DT_DRV_INST(0), okay) */
#if DT_NODE_HAS_STATUS(DT_DRV_INST(1), okay)
DMA_INIT(1);
static void dma_stm32_config_irq_1(const struct device *dev)
{
struct dma_stm32_data *data = dev->data;
DEVICE_DECLARE(dma_stm32_1_dev);
IRQ_INIT(1, 0);
IRQ_INIT(1, 1);
IRQ_INIT(1, 2);
IRQ_INIT(1, 3);
IRQ_INIT(1, 4);
DMA_STM32_DEFINE_IRQ_HANDLER(1, 0);
DMA_STM32_DEFINE_IRQ_HANDLER(1, 1);
DMA_STM32_DEFINE_IRQ_HANDLER(1, 2);
DMA_STM32_DEFINE_IRQ_HANDLER(1, 3);
DMA_STM32_DEFINE_IRQ_HANDLER(1, 4);
#if DT_INST_IRQ_HAS_IDX(1, 5)
IRQ_INIT(1, 5);
DMA_STM32_DEFINE_IRQ_HANDLER(1, 5);
#if DT_INST_IRQ_HAS_IDX(1, 6)
IRQ_INIT(1, 6);
DMA_STM32_DEFINE_IRQ_HANDLER(1, 6);
#if DT_INST_IRQ_HAS_IDX(1, 7)
IRQ_INIT(1, 7);
DMA_STM32_DEFINE_IRQ_HANDLER(1, 7);
#endif /* DT_INST_IRQ_HAS_IDX(1, 5) */
#endif /* DT_INST_IRQ_HAS_IDX(1, 6) */
#endif /* DT_INST_IRQ_HAS_IDX(1, 7) */
static void dma_stm32_config_irq_1(const struct device *dev)
{
ARG_UNUSED(dev);
DMA_STM32_IRQ_CONNECT(1, 0);
DMA_STM32_IRQ_CONNECT(1, 1);
#ifndef CONFIG_DMA_STM32_SHARED_IRQS
DMA_STM32_IRQ_CONNECT(1, 2);
#endif /* CONFIG_DMA_STM32_SHARED_IRQS */
DMA_STM32_IRQ_CONNECT(1, 3);
#ifndef CONFIG_DMA_STM32_SHARED_IRQS
DMA_STM32_IRQ_CONNECT(1, 4);
#if DT_INST_IRQ_HAS_IDX(1, 5)
DMA_STM32_IRQ_CONNECT(1, 5);
#if DT_INST_IRQ_HAS_IDX(1, 6)
DMA_STM32_IRQ_CONNECT(1, 6);
#if DT_INST_IRQ_HAS_IDX(1, 7)
DMA_STM32_IRQ_CONNECT(1, 7);
#endif /* DT_INST_IRQ_HAS_IDX(1, 5) */
#endif /* DT_INST_IRQ_HAS_IDX(1, 6) */
#endif /* DT_INST_IRQ_HAS_IDX(1, 7) */
#endif /* CONFIG_DMA_STM32_SHARED_IRQS */
/* Either 5 or 6 or 7 or 8 channels for DMA across all stm32 series. */
}
DMA_STM32_INIT_DEV(1);
#endif /* DT_NODE_HAS_STATUS(DT_DRV_INST(1), okay) */

View file

@ -24,8 +24,6 @@ struct dma_stm32_stream {
};
struct dma_stm32_data {
int max_streams;
struct dma_stm32_stream *streams;
};
struct dma_stm32_config {
@ -33,6 +31,8 @@ struct dma_stm32_config {
void (*config_irq)(const struct device *dev);
bool support_m2m;
uint32_t base;
uint32_t max_streams;
struct dma_stm32_stream *streams;
};
#ifdef CONFIG_DMA_STM32_V1
@ -57,6 +57,8 @@ extern void (*func_ll_clear_gi[])(DMA_TypeDef *DMAx);
extern uint32_t table_ll_channel[];
#endif
bool stm32_dma_is_irq_active(DMA_TypeDef *dma, uint32_t id);
void stm32_dma_dump_stream_irq(DMA_TypeDef *dma, uint32_t id);
void stm32_dma_clear_stream_irq(DMA_TypeDef *dma, uint32_t id);
bool stm32_dma_is_irq_happened(DMA_TypeDef *dma, uint32_t id);

View file

@ -160,6 +160,45 @@ void stm32_dma_dump_stream_irq(DMA_TypeDef *dma, uint32_t id)
func_ll_is_active_fe[id](dma));
}
static inline bool stm32_dma_is_tc_irq_active(DMA_TypeDef *dma, uint32_t id)
{
return LL_DMA_IsEnabledIT_TC(dma, id) &&
func_ll_is_active_tc[id[(dma);
}
static inline bool stm32_dma_is_ht_irq_active(DMA_TypeDef *dma, uint32_t id)
{
return LL_DMA_IsEnabledIT_HT(dma, id) &&
func_ll_is_active_ht[id](dma);
}
static inline bool stm32_dma_is_te_irq_active(DMA_TypeDef *dma, uint32_t id)
{
return LL_DMA_IsEnabledIT_TE(dma, id) &&
func_ll_is_active_te[id](dma);
}
static inline bool stm32_dma_is_dme_irq_active(DMA_TypeDef *dma, uint32_t id)
{
return LL_DMA_IsEnabledIT_DME(dma, id) &&
func_ll_is_active_dme[id](dma);
}
static inline bool stm32_dma_is_fe_irq_active(DMA_TypeDef *dma, uint32_t id)
{
return LL_DMA_IsEnabledIT_FE(dma, id) &&
func_ll_is_active_fe[id](dma);
}
bool stm32_dma_is_irq_active(DMA_TypeDef *dma, uint32_t id)
{
return stm32_dma_is_tc_irq_active(dma, id) ||
stm32_dma_is_ht_irq_active(dma, id) ||
stm32_dma_is_te_irq_active(dma, id) ||
stm32_dma_is_dme_irq_active(dma, id) ||
stm32_dma_is_fe_irq_active(dma, id);
}
void stm32_dma_clear_stream_irq(DMA_TypeDef *dma, uint32_t id)
{
func_ll_clear_te[id](dma);

View file

@ -179,6 +179,32 @@ void stm32_dma_dump_stream_irq(DMA_TypeDef *dma, uint32_t id)
func_ll_is_active_gi[id](dma));
}
static inline bool stm32_dma_is_tc_irq_active(DMA_TypeDef *dma, uint32_t id)
{
return LL_DMA_IsEnabledIT_TC(dma, id) &&
func_ll_is_active_tc[id](dma);
}
static inline bool stm32_dma_is_ht_irq_active(DMA_TypeDef *dma, uint32_t id)
{
return LL_DMA_IsEnabledIT_HT(dma, id) &&
func_ll_is_active_ht[id](dma);
}
static inline bool stm32_dma_is_te_irq_active(DMA_TypeDef *dma, uint32_t id)
{
return LL_DMA_IsEnabledIT_TE(dma, id) &&
func_ll_is_active_te[id](dma);
}
bool stm32_dma_is_irq_active(DMA_TypeDef *dma, uint32_t id)
{
return stm32_dma_is_tc_irq_active(dma, id) ||
stm32_dma_is_ht_irq_active(dma, id) ||
stm32_dma_is_te_irq_active(dma, id) ||
func_ll_is_active_gi[id](dma);
}
void stm32_dma_clear_stream_irq(DMA_TypeDef *dma, uint32_t id)
{
func_ll_clear_te[id](dma);