drivers: dma: add generic driver support for some series of stm32

This commit adds driver support for DMA on f0/f1/f2/f3/f4/l0/l4
series stm32.

Notice due to some bugs, this is currently not working with f7.

There are two kinds of IP blocks are used across these stm32, one is the
one that has been used on F2/F4/F7 series, and the other one is the one
that has been used on F0/F1/F3/L0/L4 series.

Memory to memory transfer is only supported on the second DMA on
F2/F4 with 'st,mem2mem' to be declared in dts.

This driver depends on k_malloc to allocate memory for stream instances,
so CONFIG_HEAP_MEM_POOL_SIZE must be big enough to hold them.

Common parts of the driver are in dma_stm32.c and SoC related parts are
implemented in dma_stm32_v*.c.

This driver has been tested on multiple nucleo boards, including
NUCLEO_F091RC/F103RB/F207ZG/F302R8/F401RE/L073RZ/L476RG with the
loop_transfer and chan_blen_transfer test cases.

Signed-off-by: Song Qiang <songqiang1304521@gmail.com>
This commit is contained in:
Song Qiang 2019-10-24 19:06:19 +08:00 committed by Kumar Gala
parent 87b9da6141
commit 749d2d21bf
27 changed files with 1307 additions and 698 deletions

View file

@ -119,6 +119,7 @@
/drivers/display/ @vanwinkeljan
/drivers/display/display_framebuf.c @andrewboie
/drivers/dma/*sam0* @Sizurka
/drivers/dma/dma_stm32* @cybertale
/drivers/espi/ @albertofloyd @franciscomunoz @scottwcpg
/drivers/ps2/ @albertofloyd @franciscomunoz @scottwcpg
/drivers/kscan/ @albertofloyd @franciscomunoz @scottwcpg

View file

@ -3,7 +3,9 @@
zephyr_library()
zephyr_library_sources_ifdef(CONFIG_DMA_SAM_XDMAC dma_sam_xdmac.c)
zephyr_library_sources_ifdef(CONFIG_DMA_STM32F4X dma_stm32f4x.c)
zephyr_library_sources_ifdef(CONFIG_DMA_STM32 dma_stm32.c)
zephyr_library_sources_ifdef(CONFIG_DMA_STM32_V1 dma_stm32_v1.c)
zephyr_library_sources_ifdef(CONFIG_DMA_STM32_V2 dma_stm32_v2.c)
zephyr_library_sources_ifdef(CONFIG_DMA_CAVS dma_cavs.c)
zephyr_library_sources_ifdef(CONFIG_DMA_NIOS2_MSGDMA dma_nios2_msgdma.c)
zephyr_library_sources_ifdef(CONFIG_DMA_SAM0 dma_sam0.c)

View file

@ -38,7 +38,7 @@ module = DMA
module-str = dma
source "subsys/logging/Kconfig.template.log_config"
source "drivers/dma/Kconfig.stm32f4x"
source "drivers/dma/Kconfig.stm32"
source "drivers/dma/Kconfig.sam_xdmac"

34
drivers/dma/Kconfig.stm32 Normal file
View file

@ -0,0 +1,34 @@
# Kconfig - DMA configuration options
#
#
# Copyright (c) 2016 Intel Corporation
# Copyright (c) 2019 Song Qiang <songqiang1304521@gmail.com>
#
# SPDX-License-Identifier: Apache-2.0
#
config DMA_STM32
bool "Enable STM32 DMA driver"
select DYNAMIC_INTERRUPTS
depends on SOC_FAMILY_STM32
depends on HEAP_MEM_POOL_SIZE != 0
help
DMA driver for STM32 series SoCs.
if DMA_STM32
config DMA_STM32_V1
bool
depends on SOC_SERIES_STM32F2X || SOC_SERIES_STM32F4X || SOC_SERIES_STM32F7X
select USE_STM32_LL_DMA
help
Enable DMA support on F2/F4/F7 series SoCs.
config DMA_STM32_V2
bool
depends on SOC_SERIES_STM32F0X || SOC_SERIES_STM32F1X || SOC_SERIES_STM32F3X || SOC_SERIES_STM32L0X || SOC_SERIES_STM32L4X
select USE_STM32_LL_DMA
help
Enable DMA support on F0/F1/F3/L0/L4 series SoCs.
endif # DMA_STM32

View file

@ -1,10 +0,0 @@
# DMA configuration options
# Copyright (c) 2016 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
config DMA_STM32F4X
bool "Enable STM32F4x DMA driver"
depends on SOC_SERIES_STM32F4X
help
DMA driver for STM32F4x series SoCs.

635
drivers/dma/dma_stm32.c Normal file
View file

@ -0,0 +1,635 @@
/*
* Copyright (c) 2016 Linaro Limited.
* Copyright (c) 2019 Song Qiang <songqiang1304521@gmail.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @brief Common part of DMA drivers for stm32.
* @note Functions named with stm32_dma_* are SoCs related functions
* implemented in dma_stm32_v*.c
*/
#include "dma_stm32.h"
#define LOG_LEVEL CONFIG_DMA_LOG_LEVEL
#include <logging/log.h>
LOG_MODULE_REGISTER(dma_stm32);
#include <clock_control/stm32_clock_control.h>
static u32_t table_m_size[] = {
LL_DMA_MDATAALIGN_BYTE,
LL_DMA_MDATAALIGN_HALFWORD,
LL_DMA_MDATAALIGN_WORD,
};
static u32_t table_p_size[] = {
LL_DMA_PDATAALIGN_BYTE,
LL_DMA_PDATAALIGN_HALFWORD,
LL_DMA_PDATAALIGN_WORD,
};
struct dma_stm32_stream {
u32_t direction;
bool source_periph;
bool busy;
u32_t src_size;
u32_t dst_size;
void *callback_arg;
void (*dma_callback)(void *arg, u32_t id,
int error_code);
};
struct dma_stm32_data {
int max_streams;
struct dma_stm32_stream *streams;
};
struct dma_stm32_config {
struct stm32_pclken pclken;
void (*config_irq)(struct device *dev);
bool support_m2m;
u32_t base;
};
/* Maximum data sent in single transfer (Bytes) */
#define DMA_STM32_MAX_DATA_ITEMS 0xffff
static void dma_stm32_dump_stream_irq(struct device *dev, u32_t id)
{
const struct dma_stm32_config *config = dev->config->config_info;
DMA_TypeDef *dma = (DMA_TypeDef *)(config->base);
stm32_dma_dump_stream_irq(dma, id);
}
static void dma_stm32_clear_stream_irq(struct device *dev, u32_t id)
{
const struct dma_stm32_config *config = dev->config->config_info;
DMA_TypeDef *dma = (DMA_TypeDef *)(config->base);
func_ll_clear_tc[id](dma);
func_ll_clear_ht[id](dma);
stm32_dma_clear_stream_irq(dma, id);
}
static void dma_stm32_irq_handler(void *arg)
{
struct device *dev = arg;
struct dma_stm32_data *data = dev->driver_data;
const struct dma_stm32_config *config = dev->config->config_info;
DMA_TypeDef *dma = (DMA_TypeDef *)(config->base);
struct dma_stm32_stream *stream;
int id;
for (id = 0; id < data->max_streams; id++) {
if (func_ll_is_active_tc[id](dma)) {
break;
}
if (stm32_dma_is_irq_happened(dma, id)) {
break;
}
}
if (id == data->max_streams) {
LOG_ERR("Unknown interrupt happened.");
return;
}
stream = &data->streams[id];
stream->busy = false;
if (func_ll_is_active_tc[id](dma)) {
func_ll_clear_tc[id](dma);
stream->dma_callback(stream->callback_arg, id, 0);
} else if (stm32_dma_is_unexpected_irq_happened(dma, id)) {
LOG_ERR("Unexpected irq happened.");
stream->dma_callback(stream->callback_arg, id, -EIO);
} else {
LOG_ERR("Transfer Error.");
dma_stm32_dump_stream_irq(dev, id);
dma_stm32_clear_stream_irq(dev, id);
stream->dma_callback(stream->callback_arg, id, -EIO);
}
}
static u32_t dma_stm32_width_config(struct dma_config *config,
bool source_periph,
DMA_TypeDef *dma,
LL_DMA_InitTypeDef *DMA_InitStruct,
u32_t id)
{
u32_t periph, memory;
u32_t m_size = 0, p_size = 0;
if (source_periph) {
periph = config->source_data_size;
memory = config->dest_data_size;
} else {
periph = config->dest_data_size;
memory = config->source_data_size;
}
int index = find_lsb_set(config->source_data_size) - 1;
m_size = table_m_size[index];
index = find_lsb_set(config->dest_data_size) - 1;
p_size = table_p_size[index];
DMA_InitStruct->PeriphOrM2MSrcDataSize = p_size;
DMA_InitStruct->MemoryOrM2MDstDataSize = m_size;
return 0;
}
static int dma_stm32_get_priority(u8_t priority, u32_t *ll_priority)
{
switch (priority) {
case 0x0:
*ll_priority = LL_DMA_PRIORITY_LOW;
break;
case 0x1:
*ll_priority = LL_DMA_PRIORITY_MEDIUM;
break;
case 0x2:
*ll_priority = LL_DMA_PRIORITY_HIGH;
break;
case 0x3:
*ll_priority = LL_DMA_PRIORITY_VERYHIGH;
break;
default:
LOG_ERR("Priority error. %d", priority);
return -EINVAL;
}
return 0;
}
static int dma_stm32_get_direction(enum dma_channel_direction direction,
u32_t *ll_direction)
{
switch (direction) {
case MEMORY_TO_MEMORY:
*ll_direction = LL_DMA_DIRECTION_MEMORY_TO_MEMORY;
break;
case MEMORY_TO_PERIPHERAL:
*ll_direction = LL_DMA_DIRECTION_MEMORY_TO_PERIPH;
break;
case PERIPHERAL_TO_MEMORY:
*ll_direction = LL_DMA_DIRECTION_PERIPH_TO_MEMORY;
break;
default:
LOG_ERR("Direction error. %d", direction);
return -EINVAL;
}
return 0;
}
static int dma_stm32_get_memory_increment(enum dma_addr_adj increment,
u32_t *ll_increment)
{
switch (increment) {
case DMA_ADDR_ADJ_INCREMENT:
*ll_increment = LL_DMA_MEMORY_INCREMENT;
break;
case DMA_ADDR_ADJ_NO_CHANGE:
*ll_increment = LL_DMA_MEMORY_NOINCREMENT;
break;
case DMA_ADDR_ADJ_DECREMENT:
return -ENOTSUP;
default:
LOG_ERR("Memory increment error. %d", increment);
return -EINVAL;
}
return 0;
}
static int dma_stm32_get_periph_increment(enum dma_addr_adj increment,
u32_t *ll_increment)
{
switch (increment) {
case DMA_ADDR_ADJ_INCREMENT:
*ll_increment = LL_DMA_PERIPH_INCREMENT;
break;
case DMA_ADDR_ADJ_NO_CHANGE:
*ll_increment = LL_DMA_PERIPH_NOINCREMENT;
break;
case DMA_ADDR_ADJ_DECREMENT:
return -ENOTSUP;
default:
LOG_ERR("Periph increment error. %d", increment);
return -EINVAL;
}
return 0;
}
static int dma_stm32_configure(struct device *dev, u32_t id,
struct dma_config *config)
{
struct dma_stm32_data *data = dev->driver_data;
struct dma_stm32_stream *stream = &data->streams[id];
const struct dma_stm32_config *dev_config =
dev->config->config_info;
DMA_TypeDef *dma = (DMA_TypeDef *)dev_config->base;
LL_DMA_InitTypeDef DMA_InitStruct;
u32_t msize;
int ret;
if (id >= data->max_streams) {
return -EINVAL;
}
if (stream->busy) {
return -EBUSY;
}
stm32_dma_disable_stream(dma, id);
dma_stm32_clear_stream_irq(dev, id);
if (config->head_block->block_size > DMA_STM32_MAX_DATA_ITEMS) {
LOG_ERR("Data size too big: %d\n",
config->head_block->block_size);
return -EINVAL;
}
if ((stream->direction == MEMORY_TO_MEMORY) &&
(!dev_config->support_m2m)) {
LOG_ERR("Memcopy not supported for device %s",
dev->config->name);
return -ENOTSUP;
}
if (config->source_data_size != 4U &&
config->source_data_size != 2U &&
config->source_data_size != 1U) {
LOG_ERR("Source unit size error, %d",
config->source_data_size);
return -EINVAL;
}
if (config->dest_data_size != 4U &&
config->dest_data_size != 2U &&
config->dest_data_size != 1U) {
LOG_ERR("Dest unit size error, %d",
config->dest_data_size);
return -EINVAL;
}
/*
* STM32's circular mode will auto reset both source address
* counter and destination address counter.
*/
if (config->head_block->source_reload_en !=
config->head_block->dest_reload_en) {
LOG_ERR("source_reload_en and dest_reload_en must "
"be the same.");
return -EINVAL;
}
stream->busy = true;
stream->dma_callback = config->dma_callback;
stream->direction = config->channel_direction;
stream->callback_arg = config->callback_arg;
stream->src_size = config->source_data_size;
stream->dst_size = config->dest_data_size;
if (stream->direction == MEMORY_TO_PERIPHERAL) {
DMA_InitStruct.MemoryOrM2MDstAddress =
config->head_block->source_address;
DMA_InitStruct.PeriphOrM2MSrcAddress =
config->head_block->dest_address;
} else {
DMA_InitStruct.PeriphOrM2MSrcAddress =
config->head_block->source_address;
DMA_InitStruct.MemoryOrM2MDstAddress =
config->head_block->dest_address;
}
u16_t memory_addr_adj, periph_addr_adj;
ret = dma_stm32_get_priority(config->channel_priority,
&DMA_InitStruct.Priority);
if (ret < 0) {
return ret;
}
ret = dma_stm32_get_direction(config->channel_direction,
&DMA_InitStruct.Direction);
if (ret < 0) {
return ret;
}
switch (config->channel_direction) {
case MEMORY_TO_MEMORY:
case PERIPHERAL_TO_MEMORY:
memory_addr_adj = config->head_block->dest_addr_adj;
periph_addr_adj = config->head_block->source_addr_adj;
break;
case MEMORY_TO_PERIPHERAL:
memory_addr_adj = config->head_block->source_addr_adj;
periph_addr_adj = config->head_block->dest_addr_adj;
break;
/* Direction has been asserted in dma_stm32_get_direction. */
}
ret = dma_stm32_get_memory_increment(memory_addr_adj,
&DMA_InitStruct.MemoryOrM2MDstIncMode);
if (ret < 0) {
return ret;
}
ret = dma_stm32_get_periph_increment(periph_addr_adj,
&DMA_InitStruct.PeriphOrM2MSrcIncMode);
if (ret < 0) {
return ret;
}
if (config->head_block->source_reload_en) {
DMA_InitStruct.Mode = LL_DMA_MODE_CIRCULAR;
} else {
DMA_InitStruct.Mode = LL_DMA_MODE_NORMAL;
}
stream->source_periph = stream->direction == MEMORY_TO_PERIPHERAL;
ret = dma_stm32_width_config(config, stream->source_periph, dma,
&DMA_InitStruct, id);
if (ret < 0) {
return ret;
}
msize = DMA_InitStruct.MemoryOrM2MDstDataSize;
#if defined(CONFIG_DMA_STM32_V1)
DMA_InitStruct.MemBurst = stm32_dma_get_mburst(config,
stream->source_periph);
DMA_InitStruct.PeriphBurst = stm32_dma_get_pburst(config,
stream->source_periph);
if (config->channel_direction != MEMORY_TO_MEMORY) {
if (config->dma_slot >= 8) {
LOG_ERR("dma slot error.");
return -EINVAL;
}
} else {
if (config->dma_slot >= 8) {
LOG_ERR("dma slot is too big, using 0 as default.");
config->dma_slot = 0;
}
}
DMA_InitStruct.Channel = table_ll_channel[config->dma_slot];
stm32_dma_get_fifo_threshold(config->head_block->fifo_mode_control);
if (stm32_dma_check_fifo_mburst(&DMA_InitStruct)) {
DMA_InitStruct.FIFOMode = LL_DMA_FIFOMODE_ENABLE;
} else {
DMA_InitStruct.FIFOMode = LL_DMA_FIFOMODE_DISABLE;
}
#endif
if (stream->source_periph) {
DMA_InitStruct.NbData = config->head_block->block_size /
config->source_data_size;
} else {
DMA_InitStruct.NbData = config->head_block->block_size /
config->dest_data_size;
}
LL_DMA_Init(dma, table_ll_stream[id], &DMA_InitStruct);
LL_DMA_EnableIT_TC(dma, table_ll_stream[id]);
#if defined(CONFIG_DMA_STM32_V1)
if (DMA_InitStruct.FIFOMode == LL_DMA_FIFOMODE_ENABLE) {
LL_DMA_EnableIT_FE(dma, table_ll_stream[id]);
} else {
LL_DMA_DisableIT_FE(dma, table_ll_stream[id]);
}
#endif
return ret;
}
int dma_stm32_disable_stream(DMA_TypeDef *dma, u32_t id)
{
int count = 0;
for (;;) {
if (!stm32_dma_disable_stream(dma, id)) {
}
/* After trying for 5 seconds, give up */
if (count++ > (5 * 1000)) {
return -EBUSY;
}
k_sleep(K_MSEC(1));
}
return 0;
}
static int dma_stm32_reload(struct device *dev, u32_t id,
u32_t src, u32_t dst, size_t size)
{
const struct dma_stm32_config *config = dev->config->config_info;
DMA_TypeDef *dma = (DMA_TypeDef *)(config->base);
struct dma_stm32_data *data = dev->driver_data;
struct dma_stm32_stream *stream = &data->streams[id];
if (id >= data->max_streams) {
return -EINVAL;
}
switch (stream->direction) {
case MEMORY_TO_PERIPHERAL:
LL_DMA_SetMemoryAddress(dma, table_ll_stream[id], src);
LL_DMA_SetPeriphAddress(dma, table_ll_stream[id], dst);
break;
case MEMORY_TO_MEMORY:
case PERIPHERAL_TO_MEMORY:
LL_DMA_SetPeriphAddress(dma, table_ll_stream[id], src);
LL_DMA_SetMemoryAddress(dma, table_ll_stream[id], dst);
break;
default:
return -EINVAL;
}
if (stream->source_periph) {
LL_DMA_SetDataLength(dma, table_ll_stream[id],
size / stream->src_size);
} else {
LL_DMA_SetDataLength(dma, table_ll_stream[id],
size / stream->dst_size);
}
return 0;
}
static int dma_stm32_start(struct device *dev, u32_t id)
{
const struct dma_stm32_config *config = dev->config->config_info;
DMA_TypeDef *dma = (DMA_TypeDef *)(config->base);
struct dma_stm32_data *data = dev->driver_data;
/* Only M2P or M2M mode can be started manually. */
if (id >= data->max_streams) {
return -EINVAL;
}
dma_stm32_clear_stream_irq(dev, id);
stm32_dma_enable_stream(dma, id);
return 0;
}
static int dma_stm32_stop(struct device *dev, u32_t id)
{
struct dma_stm32_data *data = dev->driver_data;
struct dma_stm32_stream *stream = &data->streams[id];
const struct dma_stm32_config *config =
dev->config->config_info;
DMA_TypeDef *dma = (DMA_TypeDef *)(config->base);
if (id >= data->max_streams) {
return -EINVAL;
}
LL_DMA_DisableIT_TC(dma, table_ll_stream[id]);
#if defined(CONFIG_DMA_STM32_V1)
stm32_dma_disable_fifo_irq(dma, id);
#endif
dma_stm32_disable_stream(dma, id);
dma_stm32_clear_stream_irq(dev, id);
/* Finally, flag stream as free */
stream->busy = false;
return 0;
}
struct k_mem_block block;
static int dma_stm32_init(struct device *dev)
{
struct dma_stm32_data *data = dev->driver_data;
const struct dma_stm32_config *config = dev->config->config_info;
struct device *clk =
device_get_binding(STM32_CLOCK_CONTROL_NAME);
if (clock_control_on(clk,
(clock_control_subsys_t *) &config->pclken) != 0) {
LOG_ERR("clock op failed\n");
return -EIO;
}
config->config_irq(dev);
int size_stream =
sizeof(struct dma_stm32_stream) * data->max_streams;
data->streams = k_malloc(size_stream);
if (!data->streams) {
LOG_ERR("HEAP_MEM_POOL_SIZE is too small");
return -ENOMEM;
}
memset(data->streams, 0, size_stream);
for (int i = 0; i < data->max_streams; i++) {
data->streams[i].busy = false;
}
return 0;
}
static const struct dma_driver_api dma_funcs = {
.reload = dma_stm32_reload,
.config = dma_stm32_configure,
.start = dma_stm32_start,
.stop = dma_stm32_stop,
};
#define DMA_INIT(index) \
static void dma_stm32_config_irq_##index(struct device *dev); \
\
const struct dma_stm32_config dma_stm32_config_##index = { \
.pclken = { .bus = DT_INST_##index##_ST_STM32_DMA_CLOCK_BUS, \
.enr = DT_INST_##index##_ST_STM32_DMA_CLOCK_BITS }, \
.config_irq = dma_stm32_config_irq_##index, \
.base = DT_INST_##index##_ST_STM32_DMA_BASE_ADDRESS, \
.support_m2m = DT_INST_##index##_ST_STM32_DMA_ST_MEM2MEM, \
}; \
\
static struct dma_stm32_data dma_stm32_data_##index = { \
.max_streams = 0, \
.streams = NULL, \
}; \
\
DEVICE_AND_API_INIT(dma_##index, DT_INST_##index##_ST_STM32_DMA_LABEL, \
&dma_stm32_init, \
&dma_stm32_data_##index, &dma_stm32_config_##index, \
POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT, \
&dma_funcs)
#define irq_func(chan) \
static void dma_stm32_irq_##chan(void *arg) \
{ \
dma_stm32_irq_handler(arg, chan); \
}
#define IRQ_INIT(dma, chan) \
do { \
if (!irq_is_enabled(DT_INST_##dma##_ST_STM32_DMA_IRQ_##chan)) { \
irq_connect_dynamic(DT_INST_##dma##_ST_STM32_DMA_IRQ_##chan,\
DT_INST_##dma##_ST_STM32_DMA_IRQ_##chan##_PRIORITY,\
dma_stm32_irq_handler, dev, 0); \
irq_enable(DT_INST_##dma##_ST_STM32_DMA_IRQ_##chan); \
} \
data->max_streams++; \
} while (0)
#ifdef DT_INST_0_ST_STM32_DMA
DMA_INIT(0);
static void dma_stm32_config_irq_0(struct device *dev)
{
struct dma_stm32_data *data = dev->driver_data;
IRQ_INIT(0, 0);
IRQ_INIT(0, 1);
IRQ_INIT(0, 2);
IRQ_INIT(0, 3);
IRQ_INIT(0, 4);
#ifdef DT_INST_0_ST_STM32_DMA_IRQ_5
IRQ_INIT(0, 5);
IRQ_INIT(0, 6);
#ifdef DT_INST_0_ST_STM32_DMA_IRQ_7
IRQ_INIT(0, 7);
#endif
#endif
/* Either 5 or 7 or 8 channels for DMA1 across all stm32 series. */
}
#endif
#ifdef DT_INST_1_ST_STM32_DMA
DMA_INIT(1);
static void dma_stm32_config_irq_1(struct device *dev)
{
struct dma_stm32_data *data = dev->driver_data;
#ifdef DT_INST_1_ST_STM32_DMA_IRQ_0
IRQ_INIT(1, 0);
IRQ_INIT(1, 1);
IRQ_INIT(1, 2);
IRQ_INIT(1, 3);
IRQ_INIT(1, 4);
#ifdef DT_INST_1_ST_STM32_DMA_IRQ_5
IRQ_INIT(1, 5);
IRQ_INIT(1, 6);
#ifdef DT_INST_1_ST_STM32_DMA_IRQ_7
IRQ_INIT(1, 7);
#endif
#endif
#endif
/* Either 0 or 5 or 7 or 8 channels for DMA1 across all stm32 series. */
}
#endif

33
drivers/dma/dma_stm32.h Normal file
View file

@ -0,0 +1,33 @@
/*
* Copyright (c) 2019 Song Qiang <songqiang1304521@gmail.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <drivers/dma.h>
#include <soc.h>
extern u32_t table_ll_stream[];
extern u32_t (*func_ll_is_active_tc[])(DMA_TypeDef *DMAx);
extern void (*func_ll_clear_tc[])(DMA_TypeDef *DMAx);
extern u32_t (*func_ll_is_active_ht[])(DMA_TypeDef *DMAx);
extern void (*func_ll_clear_ht[])(DMA_TypeDef *DMAx);
#ifdef CONFIG_DMA_STM32_V1
extern u32_t table_ll_channel[];
#endif
void stm32_dma_dump_stream_irq(DMA_TypeDef *dma, u32_t id);
void stm32_dma_clear_stream_irq(DMA_TypeDef *dma, u32_t id);
bool stm32_dma_is_irq_happened(DMA_TypeDef *dma, u32_t id);
bool stm32_dma_is_unexpected_irq_happened(DMA_TypeDef *dma, u32_t id);
void stm32_dma_enable_stream(DMA_TypeDef *dma, u32_t id);
int stm32_dma_disable_stream(DMA_TypeDef *dma, u32_t id);
void stm32_dma_enable_fifo(DMA_TypeDef *dma, u32_t id);
void stm32_dma_config_channel_function(DMA_TypeDef *dma, u32_t id, u32_t slot);
#ifdef CONFIG_DMA_STM32_V1
void stm32_dma_disable_fifo_irq(DMA_TypeDef *dma, u32_t id);
bool stm32_dma_check_fifo_mburst(LL_DMA_InitTypeDef *DMAx);
u32_t stm32_dma_get_fifo_threshold(u16_t fifo_mode_control);
u32_t stm32_dma_get_mburst(struct dma_config *config, bool source_periph);
u32_t stm32_dma_get_pburst(struct dma_config *config, bool source_periph);
#endif

357
drivers/dma/dma_stm32_v1.c Normal file
View file

@ -0,0 +1,357 @@
/*
* Copyright (c) 2019 Song Qiang <songqiang1304521@gmail.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @brief DMA low level driver implementation for F2/F4/F7 series SoCs.
*/
#include <drivers/dma.h>
#include <soc.h>
#define LOG_LEVEL CONFIG_DMA_LOG_LEVEL
#include <logging/log.h>
LOG_MODULE_REGISTER(dma_stm32_v1);
/* DMA burst length */
#define BURST_TRANS_LENGTH_1 0
u32_t table_ll_stream[] = {
LL_DMA_STREAM_0,
LL_DMA_STREAM_1,
LL_DMA_STREAM_2,
LL_DMA_STREAM_3,
LL_DMA_STREAM_4,
LL_DMA_STREAM_5,
LL_DMA_STREAM_6,
LL_DMA_STREAM_7,
};
u32_t table_ll_channel[] = {
LL_DMA_CHANNEL_0,
LL_DMA_CHANNEL_1,
LL_DMA_CHANNEL_2,
LL_DMA_CHANNEL_3,
LL_DMA_CHANNEL_4,
LL_DMA_CHANNEL_5,
LL_DMA_CHANNEL_6,
LL_DMA_CHANNEL_7,
};
void (*func_ll_clear_ht[])(DMA_TypeDef *DMAx) = {
LL_DMA_ClearFlag_HT0,
LL_DMA_ClearFlag_HT1,
LL_DMA_ClearFlag_HT2,
LL_DMA_ClearFlag_HT3,
LL_DMA_ClearFlag_HT4,
LL_DMA_ClearFlag_HT5,
LL_DMA_ClearFlag_HT6,
LL_DMA_ClearFlag_HT7,
};
void (*func_ll_clear_tc[])(DMA_TypeDef *DMAx) = {
LL_DMA_ClearFlag_TC0,
LL_DMA_ClearFlag_TC1,
LL_DMA_ClearFlag_TC2,
LL_DMA_ClearFlag_TC3,
LL_DMA_ClearFlag_TC4,
LL_DMA_ClearFlag_TC5,
LL_DMA_ClearFlag_TC6,
LL_DMA_ClearFlag_TC7,
};
u32_t (*func_ll_is_active_ht[])(DMA_TypeDef *DMAx) = {
LL_DMA_IsActiveFlag_HT0,
LL_DMA_IsActiveFlag_HT1,
LL_DMA_IsActiveFlag_HT2,
LL_DMA_IsActiveFlag_HT3,
LL_DMA_IsActiveFlag_HT4,
LL_DMA_IsActiveFlag_HT5,
LL_DMA_IsActiveFlag_HT6,
LL_DMA_IsActiveFlag_HT7,
};
u32_t (*func_ll_is_active_tc[])(DMA_TypeDef *DMAx) = {
LL_DMA_IsActiveFlag_TC0,
LL_DMA_IsActiveFlag_TC1,
LL_DMA_IsActiveFlag_TC2,
LL_DMA_IsActiveFlag_TC3,
LL_DMA_IsActiveFlag_TC4,
LL_DMA_IsActiveFlag_TC5,
LL_DMA_IsActiveFlag_TC6,
LL_DMA_IsActiveFlag_TC7,
};
static void (*func_ll_clear_te[])(DMA_TypeDef *DMAx) = {
LL_DMA_ClearFlag_TE0,
LL_DMA_ClearFlag_TE1,
LL_DMA_ClearFlag_TE2,
LL_DMA_ClearFlag_TE3,
LL_DMA_ClearFlag_TE4,
LL_DMA_ClearFlag_TE5,
LL_DMA_ClearFlag_TE6,
LL_DMA_ClearFlag_TE7,
};
static void (*func_ll_clear_dme[])(DMA_TypeDef *DMAx) = {
LL_DMA_ClearFlag_DME0,
LL_DMA_ClearFlag_DME1,
LL_DMA_ClearFlag_DME2,
LL_DMA_ClearFlag_DME3,
LL_DMA_ClearFlag_DME4,
LL_DMA_ClearFlag_DME5,
LL_DMA_ClearFlag_DME6,
LL_DMA_ClearFlag_DME7,
};
static void (*func_ll_clear_fe[])(DMA_TypeDef *DMAx) = {
LL_DMA_ClearFlag_FE0,
LL_DMA_ClearFlag_FE1,
LL_DMA_ClearFlag_FE2,
LL_DMA_ClearFlag_FE3,
LL_DMA_ClearFlag_FE4,
LL_DMA_ClearFlag_FE5,
LL_DMA_ClearFlag_FE6,
LL_DMA_ClearFlag_FE7,
};
static u32_t (*func_ll_is_active_te[])(DMA_TypeDef *DMAx) = {
LL_DMA_IsActiveFlag_TE0,
LL_DMA_IsActiveFlag_TE1,
LL_DMA_IsActiveFlag_TE2,
LL_DMA_IsActiveFlag_TE3,
LL_DMA_IsActiveFlag_TE4,
LL_DMA_IsActiveFlag_TE5,
LL_DMA_IsActiveFlag_TE6,
LL_DMA_IsActiveFlag_TE7,
};
static u32_t (*func_ll_is_active_dme[])(DMA_TypeDef *DMAx) = {
LL_DMA_IsActiveFlag_DME0,
LL_DMA_IsActiveFlag_DME1,
LL_DMA_IsActiveFlag_DME2,
LL_DMA_IsActiveFlag_DME3,
LL_DMA_IsActiveFlag_DME4,
LL_DMA_IsActiveFlag_DME5,
LL_DMA_IsActiveFlag_DME6,
LL_DMA_IsActiveFlag_DME7,
};
static u32_t (*func_ll_is_active_fe[])(DMA_TypeDef *DMAx) = {
LL_DMA_IsActiveFlag_FE0,
LL_DMA_IsActiveFlag_FE1,
LL_DMA_IsActiveFlag_FE2,
LL_DMA_IsActiveFlag_FE3,
LL_DMA_IsActiveFlag_FE4,
LL_DMA_IsActiveFlag_FE5,
LL_DMA_IsActiveFlag_FE6,
LL_DMA_IsActiveFlag_FE7,
};
void stm32_dma_dump_stream_irq(DMA_TypeDef *dma, u32_t id)
{
LOG_INF("tc: %d, ht: %d, te: %d, dme: %d, fe: %d",
func_ll_is_active_tc[id](dma),
func_ll_is_active_ht[id](dma),
func_ll_is_active_te[id](dma),
func_ll_is_active_dme[id](dma),
func_ll_is_active_fe[id](dma));
}
void stm32_dma_clear_stream_irq(DMA_TypeDef *dma, u32_t id)
{
func_ll_clear_te[id](dma);
func_ll_clear_dme[id](dma);
func_ll_clear_fe[id](dma);
}
bool stm32_dma_is_irq_happened(DMA_TypeDef *dma, u32_t id)
{
if (func_ll_is_active_fe[id](dma)) {
return true;
}
return false;
}
bool stm32_dma_is_unexpected_irq_happened(DMA_TypeDef *dma, u32_t id)
{
if (func_ll_is_active_fe[id](dma)) {
LOG_ERR("FiFo error.");
stm32_dma_dump_stream_irq(dma, id);
stm32_dma_clear_stream_irq(dma, id);
return true;
}
return false;
}
void stm32_dma_enable_stream(DMA_TypeDef *dma, u32_t id)
{
LL_DMA_EnableStream(dma, table_ll_stream[id]);
}
int stm32_dma_disable_stream(DMA_TypeDef *dma, u32_t id)
{
if (!LL_DMA_IsEnabledStream(dma, table_ll_stream[id])) {
return 0;
}
LL_DMA_DisableStream(dma, table_ll_stream[id]);
return -EAGAIN;
}
void stm32_dma_enable_fifo(DMA_TypeDef *dma, u32_t id)
{
LL_DMA_EnableFifoMode(dma, table_ll_stream[id]);
LL_DMA_EnableIT_FE(dma, table_ll_stream[id]);
LL_DMA_SetFIFOThreshold(dma, table_ll_stream[id],
LL_DMA_FIFOTHRESHOLD_FULL);
}
void stm32_dma_disable_fifo_irq(DMA_TypeDef *dma, u32_t id)
{
LL_DMA_DisableIT_FE(dma, table_ll_stream[id]);
}
void stm32_dma_config_channel_function(DMA_TypeDef *dma, u32_t id, u32_t slot)
{
LL_DMA_SetChannelSelection(dma, table_ll_stream[id],
table_ll_channel[slot]);
}
u32_t stm32_dma_get_mburst(struct dma_config *config, bool source_periph)
{
u32_t memory_burst;
if (source_periph) {
memory_burst = config->dest_burst_length;
} else {
memory_burst = config->source_burst_length;
}
switch (memory_burst) {
case 1:
return LL_DMA_MBURST_SINGLE;
case 4:
return LL_DMA_MBURST_INC4;
case 8:
return LL_DMA_MBURST_INC8;
case 16:
return LL_DMA_MBURST_INC16;
default:
LOG_ERR("Memory burst size error,"
"using single burst as default");
return LL_DMA_MBURST_SINGLE;
}
}
u32_t stm32_dma_get_pburst(struct dma_config *config, bool source_periph)
{
u32_t periph_burst;
if (source_periph) {
periph_burst = config->source_burst_length;
} else {
periph_burst = config->dest_burst_length;
}
switch (periph_burst) {
case 1:
return LL_DMA_PBURST_SINGLE;
case 4:
return LL_DMA_PBURST_INC4;
case 8:
return LL_DMA_PBURST_INC8;
case 16:
return LL_DMA_PBURST_INC16;
default:
LOG_ERR("Peripheral burst size error,"
"using single burst as default");
return LL_DMA_PBURST_SINGLE;
}
}
/*
* This function checks if the msize, mburst and fifo level is
* compitable. If they are not compitable, refer to the 'FIFO'
* section in the 'DMA' chapter in the Reference Manual for more
* information.
* break is emitted since every path of the code has 'return'.
* This function does not have the obligation of checking the parameters.
*/
bool stm32_dma_check_fifo_mburst(LL_DMA_InitTypeDef *DMAx)
{
u32_t msize = DMAx->MemoryOrM2MDstDataSize;
u32_t fifo_level = DMAx->FIFOThreshold;
u32_t mburst = DMAx->MemBurst;
switch (msize) {
case LL_DMA_MDATAALIGN_BYTE:
switch (mburst) {
case LL_DMA_MBURST_INC4:
return true;
case LL_DMA_MBURST_INC8:
if (fifo_level == LL_DMA_FIFOTHRESHOLD_1_2 ||
fifo_level == LL_DMA_FIFOTHRESHOLD_FULL) {
return true;
} else {
return false;
}
case LL_DMA_MBURST_INC16:
if (fifo_level == LL_DMA_FIFOTHRESHOLD_FULL) {
return true;
} else {
return false;
}
}
case LL_DMA_MDATAALIGN_HALFWORD:
switch (mburst) {
case LL_DMA_MBURST_INC4:
if (fifo_level == LL_DMA_FIFOTHRESHOLD_1_2 ||
fifo_level == LL_DMA_FIFOTHRESHOLD_FULL) {
return true;
} else {
return false;
}
case LL_DMA_MBURST_INC8:
if (fifo_level == LL_DMA_FIFOTHRESHOLD_FULL) {
return true;
} else {
return false;
}
case LL_DMA_MBURST_INC16:
return false;
}
case LL_DMA_MDATAALIGN_WORD:
if (mburst == LL_DMA_MBURST_INC4 &&
fifo_level == LL_DMA_FIFOTHRESHOLD_FULL) {
return true;
} else {
return false;
}
default:
return false;
}
}
u32_t stm32_dma_get_fifo_threshold(u16_t fifo_mode_control)
{
switch (fifo_mode_control) {
case 0:
return LL_DMA_FIFOTHRESHOLD_1_4;
case 1:
return LL_DMA_FIFOTHRESHOLD_1_2;
case 2:
return LL_DMA_FIFOTHRESHOLD_3_4;
case 3:
return LL_DMA_FIFOTHRESHOLD_FULL;
default:
LOG_WRN("FIFO threshold parameter error, reset to 1/4");
return LL_DMA_FIFOTHRESHOLD_1_4;
}
}

153
drivers/dma/dma_stm32_v2.c Normal file
View file

@ -0,0 +1,153 @@
/*
* Copyright (c) 2019 Song Qiang <songqiang1304521@gmail.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @brief DMA low level driver implementation for F0/F1/F3/L0/L4 series SoCs.
*/
#include <drivers/dma.h>
#include <soc.h>
#define LOG_LEVEL CONFIG_DMA_LOG_LEVEL
#include <logging/log.h>
LOG_MODULE_REGISTER(dma_stm32_v2);
u32_t table_ll_stream[] = {
LL_DMA_CHANNEL_1,
LL_DMA_CHANNEL_2,
LL_DMA_CHANNEL_3,
LL_DMA_CHANNEL_4,
LL_DMA_CHANNEL_5,
LL_DMA_CHANNEL_6,
LL_DMA_CHANNEL_7,
};
void (*func_ll_clear_ht[])(DMA_TypeDef *DMAx) = {
LL_DMA_ClearFlag_HT1,
LL_DMA_ClearFlag_HT2,
LL_DMA_ClearFlag_HT3,
LL_DMA_ClearFlag_HT4,
LL_DMA_ClearFlag_HT5,
LL_DMA_ClearFlag_HT6,
LL_DMA_ClearFlag_HT7,
};
void (*func_ll_clear_tc[])(DMA_TypeDef *DMAx) = {
LL_DMA_ClearFlag_TC1,
LL_DMA_ClearFlag_TC2,
LL_DMA_ClearFlag_TC3,
LL_DMA_ClearFlag_TC4,
LL_DMA_ClearFlag_TC5,
LL_DMA_ClearFlag_TC6,
LL_DMA_ClearFlag_TC7,
};
u32_t (*func_ll_is_active_ht[])(DMA_TypeDef *DMAx) = {
LL_DMA_IsActiveFlag_HT1,
LL_DMA_IsActiveFlag_HT2,
LL_DMA_IsActiveFlag_HT3,
LL_DMA_IsActiveFlag_HT4,
LL_DMA_IsActiveFlag_HT5,
LL_DMA_IsActiveFlag_HT6,
LL_DMA_IsActiveFlag_HT7,
};
u32_t (*func_ll_is_active_tc[])(DMA_TypeDef *DMAx) = {
LL_DMA_IsActiveFlag_TC1,
LL_DMA_IsActiveFlag_TC2,
LL_DMA_IsActiveFlag_TC3,
LL_DMA_IsActiveFlag_TC4,
LL_DMA_IsActiveFlag_TC5,
LL_DMA_IsActiveFlag_TC6,
LL_DMA_IsActiveFlag_TC7,
};
static void (*func_ll_clear_te[])(DMA_TypeDef *DMAx) = {
LL_DMA_ClearFlag_TE1,
LL_DMA_ClearFlag_TE2,
LL_DMA_ClearFlag_TE3,
LL_DMA_ClearFlag_TE4,
LL_DMA_ClearFlag_TE5,
LL_DMA_ClearFlag_TE6,
LL_DMA_ClearFlag_TE7,
};
static void (*func_ll_clear_gi[])(DMA_TypeDef *DMAx) = {
LL_DMA_ClearFlag_GI1,
LL_DMA_ClearFlag_GI2,
LL_DMA_ClearFlag_GI3,
LL_DMA_ClearFlag_GI4,
LL_DMA_ClearFlag_GI5,
LL_DMA_ClearFlag_GI6,
LL_DMA_ClearFlag_GI7,
};
static u32_t (*func_ll_is_active_te[])(DMA_TypeDef *DMAx) = {
LL_DMA_IsActiveFlag_TE1,
LL_DMA_IsActiveFlag_TE2,
LL_DMA_IsActiveFlag_TE3,
LL_DMA_IsActiveFlag_TE4,
LL_DMA_IsActiveFlag_TE5,
LL_DMA_IsActiveFlag_TE6,
LL_DMA_IsActiveFlag_TE7,
};
static u32_t (*func_ll_is_active_gi[])(DMA_TypeDef *DMAx) = {
LL_DMA_IsActiveFlag_GI1,
LL_DMA_IsActiveFlag_GI2,
LL_DMA_IsActiveFlag_GI3,
LL_DMA_IsActiveFlag_GI4,
LL_DMA_IsActiveFlag_GI5,
LL_DMA_IsActiveFlag_GI6,
LL_DMA_IsActiveFlag_GI7,
};
void stm32_dma_dump_stream_irq(DMA_TypeDef *dma, u32_t id)
{
LOG_INF("tc: %d, ht: %d, te: %d, gi: %d",
func_ll_is_active_tc[id](dma),
func_ll_is_active_ht[id](dma),
func_ll_is_active_te[id](dma),
func_ll_is_active_gi[id](dma));
}
void stm32_dma_clear_stream_irq(DMA_TypeDef *dma, u32_t id)
{
func_ll_clear_te[id](dma);
func_ll_clear_gi[id](dma);
}
bool stm32_dma_is_irq_happened(DMA_TypeDef *dma, u32_t id)
{
if (func_ll_is_active_te[id](dma)) {
return true;
}
return false;
}
bool stm32_dma_is_unexpected_irq_happened(DMA_TypeDef *dma, u32_t id)
{
/* Preserve for future amending. */
return false;
}
void stm32_dma_enable_stream(DMA_TypeDef *dma, u32_t id)
{
LL_DMA_EnableChannel(dma, table_ll_stream[id]);
}
int stm32_dma_disable_stream(DMA_TypeDef *dma, u32_t id)
{
if (!LL_DMA_IsEnabledChannel(dma, table_ll_stream[id])) {
return 0;
}
LL_DMA_DisableChannel(dma, table_ll_stream[id]);
return -EAGAIN;
}

View file

@ -1,684 +0,0 @@
/*
* Copyright (c) 2016 Linaro Limited.
*
* SPDX-License-Identifier: Apache-2.0
*
*/
#include <device.h>
#include <drivers/dma.h>
#include <errno.h>
#include <init.h>
#include <stdio.h>
#include <soc.h>
#include <string.h>
#include <sys/util.h>
#define LOG_LEVEL CONFIG_DMA_LOG_LEVEL
#include <logging/log.h>
LOG_MODULE_REGISTER(dma_stm32f4x);
#include <clock_control/stm32_clock_control.h>
#define DMA_STM32_MAX_STREAMS 8 /* Number of streams per controller */
#define DMA_STM32_MAX_DEVS 2 /* Number of controllers */
#define DMA_STM32_1 0 /* First DMA controller */
#define DMA_STM32_2 1 /* Second DMA controller */
#define DMA_STM32_IRQ_PRI CONFIG_DMA_0_IRQ_PRI
struct dma_stm32_stream_reg {
/* Shared registers */
u32_t lisr;
u32_t hisr;
u32_t lifcr;
u32_t hifcr;
/* Per stream registers */
u32_t scr;
u32_t sndtr;
u32_t spar;
u32_t sm0ar;
u32_t sm1ar;
u32_t sfcr;
};
struct dma_stm32_stream {
u32_t direction;
struct device *dev;
struct dma_stm32_stream_reg regs;
bool busy;
void *callback_arg;
void (*dma_callback)(void *arg, u32_t id,
int error_code);
};
static struct dma_stm32_device {
u32_t base;
struct device *clk;
struct dma_stm32_stream stream[DMA_STM32_MAX_STREAMS];
bool mem2mem;
} device_data[DMA_STM32_MAX_DEVS];
struct dma_stm32_config {
struct stm32_pclken pclken;
void (*config)(struct dma_stm32_device *);
};
/* DMA burst length */
#define BURST_TRANS_LENGTH_1 0
/* DMA direction */
#define DMA_STM32_DEV_TO_MEM 0
#define DMA_STM32_MEM_TO_DEV 1
#define DMA_STM32_MEM_TO_MEM 2
/* DMA priority level */
#define DMA_STM32_PRIORITY_LOW 0
#define DMA_STM32_PRIORITY_MEDIUM 1
#define DMA_STM32_PRIORITY_HIGH 2
#define DMA_STM32_PRIORITY_VERY_HIGH 3
/* DMA FIFO threshold selection */
#define DMA_STM32_FIFO_THRESHOLD_1QUARTERFULL 0
#define DMA_STM32_FIFO_THRESHOLD_HALFFULL 1
#define DMA_STM32_FIFO_THRESHOLD_3QUARTERSFULL 2
#define DMA_STM32_FIFO_THRESHOLD_FULL 3
/* Maximum data sent in single transfer (Bytes) */
#define DMA_STM32_MAX_DATA_ITEMS 0xffff
#define DMA_STM32_1_BASE 0x40026000
#define DMA_STM32_2_BASE 0x40026400
/* Shared registers */
#define DMA_STM32_LISR 0x00 /* DMA low int status reg */
#define DMA_STM32_HISR 0x04 /* DMA high int status reg */
#define DMA_STM32_LIFCR 0x08 /* DMA low int flag clear reg */
#define DMA_STM32_HIFCR 0x0c /* DMA high int flag clear reg */
#define DMA_STM32_FEI BIT(0) /* FIFO error interrupt */
#define RESERVED_1 BIT(1)
#define DMA_STM32_DMEI BIT(2) /* Direct mode error interrupt */
#define DMA_STM32_TEI BIT(3) /* Transfer error interrupt */
#define DMA_STM32_HTI BIT(4) /* Transfer half complete interrupt */
#define DMA_STM32_TCI BIT(5) /* Transfer complete interrupt */
/* DMA Stream x Configuration Register */
#define DMA_STM32_SCR(x) (0x10 + 0x18 * (x))
#define DMA_STM32_SCR_EN BIT(0) /* Stream Enable */
#define DMA_STM32_SCR_DMEIE BIT(1) /* Direct Mode Err Int En */
#define DMA_STM32_SCR_TEIE BIT(2) /* Transfer Error Int En */
#define DMA_STM32_SCR_HTIE BIT(3) /* Transfer 1/2 Comp Int En */
#define DMA_STM32_SCR_TCIE BIT(4) /* Transfer Comp Int En */
#define DMA_STM32_SCR_PFCTRL BIT(5) /* Peripheral Flow Controller */
#define DMA_STM32_SCR_DIR_MASK GENMASK(7, 6) /* Transfer direction */
#define DMA_STM32_SCR_CIRC BIT(8) /* Circular mode */
#define DMA_STM32_SCR_PINC BIT(9) /* Peripheral increment mode */
#define DMA_STM32_SCR_MINC BIT(10) /* Memory increment mode */
#define DMA_STM32_SCR_PSIZE_MASK GENMASK(12, 11) /* Periph data size */
#define DMA_STM32_SCR_MSIZE_MASK GENMASK(14, 13) /* Memory data size */
#define DMA_STM32_SCR_PINCOS BIT(15) /* Periph inc offset size */
#define DMA_STM32_SCR_PL_MASK GENMASK(17, 16) /* Priority level */
#define DMA_STM32_SCR_DBM BIT(18) /* Double Buffer Mode */
#define DMA_STM32_SCR_CT BIT(19) /* Target in double buffer */
#define DMA_STM32_SCR_PBURST_MASK GENMASK(22, 21) /* Periph burst size */
#define DMA_STM32_SCR_MBURST_MASK GENMASK(24, 23) /* Memory burst size */
/* Setting MACROS */
#define DMA_STM32_SCR_DIR(n) ((n & 0x3) << 6)
#define DMA_STM32_SCR_PSIZE(n) ((n & 0x3) << 11)
#define DMA_STM32_SCR_MSIZE(n) ((n & 0x3) << 13)
#define DMA_STM32_SCR_PL(n) ((n & 0x3) << 16)
#define DMA_STM32_SCR_PBURST(n) ((n & 0x3) << 21)
#define DMA_STM32_SCR_MBURST(n) ((n & 0x3) << 23)
#define DMA_STM32_SCR_REQ(n) ((n & 0x7) << 25)
/* Getting MACROS */
#define DMA_STM32_SCR_PSIZE_GET(n) ((n & DMA_STM32_SCR_PSIZE_MASK) >> 11)
#define DMA_STM32_SCR_CFG_MASK (DMA_STM32_SCR_PINC \
| DMA_STM32_SCR_MINC \
| DMA_STM32_SCR_PINCOS \
| DMA_STM32_SCR_PL_MASK)
#define DMA_STM32_SCR_IRQ_MASK (DMA_STM32_SCR_TCIE \
| DMA_STM32_SCR_TEIE \
| DMA_STM32_SCR_DMEIE)
/* DMA stream x number of data register (len) */
#define DMA_STM32_SNDTR(x) (0x14 + 0x18 * (x))
/* DMA stream peripheral address register (source) */
#define DMA_STM32_SPAR(x) (0x18 + 0x18 * (x))
/* DMA stream x memory 0 address register (destination) */
#define DMA_STM32_SM0AR(x) (0x1c + 0x18 * (x))
/* DMA stream x memory 1 address register (destination - double buffer) */
#define DMA_STM32_SM1AR(x) (0x20 + 0x18 * (x))
/* DMA stream x FIFO control register */
#define DMA_STM32_SFCR(x) (0x24 + 0x18 * (x))
#define DMA_STM32_SFCR_FTH_MASK GENMASK(1, 0) /* FIFO threshold */
#define DMA_STM32_SFCR_DMDIS BIT(2) /* Direct mode disable */
#define DMA_STM32_SFCR_STAT_MASK GENMASK(5, 3) /* FIFO status */
#define RESERVED_6 BIT(6) /* Reserved */
#define DMA_STM32_SFCR_FEIE BIT(7) /* FIFO error interrupt enable */
/* Setting MACROS */
#define DMA_STM32_SFCR_FTH(n) (n & DMA_STM32_SFCR_FTH_MASK)
#define DMA_STM32_SFCR_MASK (DMA_STM32_SFCR_FEIE \
| DMA_STM32_SFCR_DMDIS)
#define LOG_U32 __attribute((__unused__)) u32_t
static void dma_stm32_1_config(struct dma_stm32_device *ddata);
static void dma_stm32_2_config(struct dma_stm32_device *ddata);
static u32_t dma_stm32_read(struct dma_stm32_device *ddata, u32_t reg)
{
return sys_read32(ddata->base + reg);
}
static void dma_stm32_write(struct dma_stm32_device *ddata,
u32_t reg, u32_t val)
{
sys_write32(val, ddata->base + reg);
}
static void dma_stm32_dump_reg(struct dma_stm32_device *ddata, u32_t id)
{
LOG_INF("Using stream: %d\n", id);
LOG_INF("SCR: 0x%x \t(config)\n",
dma_stm32_read(ddata, DMA_STM32_SCR(id)));
LOG_INF("SNDTR: 0x%x \t(length)\n",
dma_stm32_read(ddata, DMA_STM32_SNDTR(id)));
LOG_INF("SPAR: 0x%x \t(source)\n",
dma_stm32_read(ddata, DMA_STM32_SPAR(id)));
LOG_INF("SM0AR: 0x%x \t(destination)\n",
dma_stm32_read(ddata, DMA_STM32_SM0AR(id)));
LOG_INF("SM1AR: 0x%x \t(destination (double buffer mode))\n",
dma_stm32_read(ddata, DMA_STM32_SM1AR(id)));
LOG_INF("SFCR: 0x%x \t(fifo control)\n",
dma_stm32_read(ddata, DMA_STM32_SFCR(id)));
}
static u32_t dma_stm32_irq_status(struct dma_stm32_device *ddata,
u32_t id)
{
u32_t irqs;
if (id & 4) {
irqs = dma_stm32_read(ddata, DMA_STM32_HISR);
} else {
irqs = dma_stm32_read(ddata, DMA_STM32_LISR);
}
return (irqs >> (((id & 2) << 3) | ((id & 1) * 6U)));
}
static void dma_stm32_irq_clear(struct dma_stm32_device *ddata,
u32_t id, u32_t irqs)
{
irqs = irqs << (((id & 2) << 3) | ((id & 1) * 6U));
if (id & 4) {
dma_stm32_write(ddata, DMA_STM32_HIFCR, irqs);
} else {
dma_stm32_write(ddata, DMA_STM32_LIFCR, irqs);
}
}
static void dma_stm32_irq_handler(void *arg, u32_t id)
{
struct device *dev = arg;
struct dma_stm32_device *ddata = dev->driver_data;
struct dma_stm32_stream *stream = &ddata->stream[id];
u32_t irqstatus, config, sfcr;
irqstatus = dma_stm32_irq_status(ddata, id);
config = dma_stm32_read(ddata, DMA_STM32_SCR(id));
sfcr = dma_stm32_read(ddata, DMA_STM32_SFCR(id));
/* Silently ignore spurious transfer half complete IRQ */
if (irqstatus & DMA_STM32_HTI) {
dma_stm32_irq_clear(ddata, id, DMA_STM32_HTI);
return;
}
stream->busy = false;
if ((irqstatus & DMA_STM32_TCI) && (config & DMA_STM32_SCR_TCIE)) {
dma_stm32_irq_clear(ddata, id, DMA_STM32_TCI);
stream->dma_callback(stream->callback_arg, id, 0);
} else {
LOG_ERR("Internal error: IRQ status: 0x%x\n", irqstatus);
dma_stm32_irq_clear(ddata, id, irqstatus);
stream->dma_callback(stream->callback_arg, id, -EIO);
}
}
static int dma_stm32_disable_stream(struct dma_stm32_device *ddata,
u32_t id)
{
u32_t config;
int count = 0;
int ret = 0;
for (;;) {
config = dma_stm32_read(ddata, DMA_STM32_SCR(id));
/* Stream already disabled */
if (!(config & DMA_STM32_SCR_EN)) {
return 0;
}
/* Try to disable stream */
dma_stm32_write(ddata, DMA_STM32_SCR(id),
config &= ~DMA_STM32_SCR_EN);
/* After trying for 5 seconds, give up */
k_sleep(K_SECONDS(5));
if (count++ > (5 * 1000) / 50) {
LOG_ERR("DMA error: Stream in use\n");
return -EBUSY;
}
}
return ret;
}
static int dma_stm32_config_devcpy(struct device *dev, u32_t id,
struct dma_config *config)
{
struct dma_stm32_device *ddata = dev->driver_data;
struct dma_stm32_stream_reg *regs = &ddata->stream[id].regs;
u32_t src_bus_width = dma_width_index(config->source_data_size);
u32_t dst_bus_width = dma_width_index(config->dest_data_size);
u32_t src_burst_size = dma_burst_index(config->source_burst_length);
u32_t dst_burst_size = dma_burst_index(config->dest_burst_length);
enum dma_channel_direction direction = config->channel_direction;
switch (direction) {
case MEMORY_TO_PERIPHERAL:
regs->scr = DMA_STM32_SCR_DIR(DMA_STM32_MEM_TO_DEV) |
DMA_STM32_SCR_PSIZE(dst_bus_width) |
DMA_STM32_SCR_MSIZE(src_bus_width) |
DMA_STM32_SCR_PBURST(dst_burst_size) |
DMA_STM32_SCR_MBURST(src_burst_size) |
DMA_STM32_SCR_REQ(config->dma_slot) |
DMA_STM32_SCR_TCIE | DMA_STM32_SCR_TEIE |
DMA_STM32_SCR_MINC;
break;
case PERIPHERAL_TO_MEMORY:
regs->scr = DMA_STM32_SCR_DIR(DMA_STM32_DEV_TO_MEM) |
DMA_STM32_SCR_PSIZE(src_bus_width) |
DMA_STM32_SCR_MSIZE(dst_bus_width) |
DMA_STM32_SCR_PBURST(src_burst_size) |
DMA_STM32_SCR_MBURST(dst_burst_size) |
DMA_STM32_SCR_REQ(config->dma_slot) |
DMA_STM32_SCR_TCIE | DMA_STM32_SCR_TEIE |
DMA_STM32_SCR_MINC;
break;
default:
LOG_ERR("DMA error: Direction not supported: %d",
direction);
return -EINVAL;
}
if (src_burst_size == BURST_TRANS_LENGTH_1 &&
dst_burst_size == BURST_TRANS_LENGTH_1) {
/* Enable 'direct' mode error IRQ, disable 'FIFO' error IRQ */
regs->scr |= DMA_STM32_SCR_DMEIE;
regs->sfcr &= ~DMA_STM32_SFCR_MASK;
} else {
/* Enable 'FIFO' error IRQ, disable 'direct' mode error IRQ */
regs->sfcr |= DMA_STM32_SFCR_MASK;
regs->scr &= ~DMA_STM32_SCR_DMEIE;
}
return 0;
}
static int dma_stm32_config_memcpy(struct device *dev, u32_t id,
struct dma_config *config)
{
struct dma_stm32_device *ddata = dev->driver_data;
struct dma_stm32_stream_reg *regs = &ddata->stream[id].regs;
u32_t src_bus_width = dma_width_index(config->source_data_size);
u32_t dst_bus_width = dma_width_index(config->dest_data_size);
u32_t src_burst_size = dma_burst_index(config->source_burst_length);
u32_t dst_burst_size = dma_burst_index(config->dest_burst_length);
regs->scr = DMA_STM32_SCR_DIR(DMA_STM32_MEM_TO_MEM) |
DMA_STM32_SCR_PSIZE(src_bus_width) |
DMA_STM32_SCR_MSIZE(dst_bus_width) |
DMA_STM32_SCR_PBURST(src_burst_size) |
DMA_STM32_SCR_MBURST(dst_burst_size) |
DMA_STM32_SCR_MINC | /* Memory increment mode */
DMA_STM32_SCR_PINC | /* Peripheral increment mode */
DMA_STM32_SCR_TCIE | /* Transfer comp IRQ enable */
DMA_STM32_SCR_TEIE; /* Transfer error IRQ enable */
regs->sfcr = DMA_STM32_SFCR_DMDIS | /* Direct mode disable */
DMA_STM32_SFCR_FTH(DMA_STM32_FIFO_THRESHOLD_FULL) |
DMA_STM32_SFCR_FEIE; /* FIFI error IRQ enable */
return 0;
}
static int dma_stm32_config(struct device *dev, u32_t id,
struct dma_config *config)
{
struct dma_stm32_device *ddata = dev->driver_data;
struct dma_stm32_stream *stream = &ddata->stream[id];
struct dma_stm32_stream_reg *regs = &ddata->stream[id].regs;
int ret;
if (id >= DMA_STM32_MAX_STREAMS) {
return -EINVAL;
}
if (stream->busy) {
return -EBUSY;
}
if (config->head_block->block_size > DMA_STM32_MAX_DATA_ITEMS) {
LOG_ERR("DMA error: Data size too big: %d\n",
config->head_block->block_size);
return -EINVAL;
}
if (MEMORY_TO_MEMORY == config->channel_direction && !ddata->mem2mem) {
LOG_ERR("DMA error: Memcopy not supported for device %s",
dev->config->name);
return -EINVAL;
}
stream->busy = true;
stream->dma_callback = config->dma_callback;
stream->direction = config->channel_direction;
stream->callback_arg = config->callback_arg;
if (stream->direction == MEMORY_TO_PERIPHERAL) {
regs->sm0ar = (u32_t)config->head_block->source_address;
regs->spar = (u32_t)config->head_block->dest_address;
} else {
regs->spar = (u32_t)config->head_block->source_address;
regs->sm0ar = (u32_t)config->head_block->dest_address;
}
if (stream->direction == MEMORY_TO_MEMORY) {
ret = dma_stm32_config_memcpy(dev, id, config);
} else {
ret = dma_stm32_config_devcpy(dev, id, config);
}
regs->sndtr = config->head_block->block_size;
return ret;
}
static int dma_stm32_reload(struct device *dev, u32_t id,
u32_t src, u32_t dst, size_t size)
{
struct dma_stm32_device *ddata = dev->driver_data;
struct dma_stm32_stream_reg *regs = &ddata->stream[id].regs;
struct dma_stm32_stream *stream = &ddata->stream[id];
if (id >= DMA_STM32_MAX_STREAMS) {
return -EINVAL;
}
switch (stream->direction) {
case MEMORY_TO_PERIPHERAL:
regs->sm0ar = src;
regs->spar = dst;
break;
case MEMORY_TO_MEMORY:
case PERIPHERAL_TO_MEMORY:
regs->spar = src;
regs->sm0ar = dst;
break;
default:
return -EINVAL;
}
regs->sndtr = size;
return 0;
}
static int dma_stm32_start(struct device *dev, u32_t id)
{
struct dma_stm32_device *ddata = dev->driver_data;
struct dma_stm32_stream_reg *regs = &ddata->stream[id].regs;
u32_t irqstatus;
int ret;
if (id >= DMA_STM32_MAX_STREAMS) {
return -EINVAL;
}
ret = dma_stm32_disable_stream(ddata, id);
if (ret) {
return ret;
}
dma_stm32_write(ddata, DMA_STM32_SCR(id), regs->scr);
dma_stm32_write(ddata, DMA_STM32_SPAR(id), regs->spar);
dma_stm32_write(ddata, DMA_STM32_SM0AR(id), regs->sm0ar);
dma_stm32_write(ddata, DMA_STM32_SFCR(id), regs->sfcr);
dma_stm32_write(ddata, DMA_STM32_SM1AR(id), regs->sm1ar);
dma_stm32_write(ddata, DMA_STM32_SNDTR(id), regs->sndtr);
/* Clear remanent IRQs from previous transfers */
irqstatus = dma_stm32_irq_status(ddata, id);
if (irqstatus) {
dma_stm32_irq_clear(ddata, id, irqstatus);
}
dma_stm32_dump_reg(ddata, id);
/* Push the start button */
dma_stm32_write(ddata, DMA_STM32_SCR(id),
regs->scr | DMA_STM32_SCR_EN);
return 0;
}
static int dma_stm32_stop(struct device *dev, u32_t id)
{
struct dma_stm32_device *ddata = dev->driver_data;
struct dma_stm32_stream *stream = &ddata->stream[id];
u32_t scr, sfcr, irqstatus;
int ret;
if (id >= DMA_STM32_MAX_STREAMS) {
return -EINVAL;
}
/* Disable all IRQs */
scr = dma_stm32_read(ddata, DMA_STM32_SCR(id));
scr &= ~DMA_STM32_SCR_IRQ_MASK;
dma_stm32_write(ddata, DMA_STM32_SCR(id), scr);
sfcr = dma_stm32_read(ddata, DMA_STM32_SFCR(id));
sfcr &= ~DMA_STM32_SFCR_FEIE;
dma_stm32_write(ddata, DMA_STM32_SFCR(id), sfcr);
/* Disable stream */
ret = dma_stm32_disable_stream(ddata, id);
if (ret) {
return ret;
}
/* Clear remanent IRQs from previous transfers */
irqstatus = dma_stm32_irq_status(ddata, id);
if (irqstatus) {
dma_stm32_irq_clear(ddata, id, irqstatus);
}
/* Finally, flag stream as free */
stream->busy = false;
return 0;
}
static int dma_stm32_get_status(struct device *dev, u32_t id,
struct dma_status *stat)
{
struct dma_stm32_device *ddata = dev->driver_data;
if (id >= DMA_STM32_MAX_STREAMS || stat == NULL) {
return -EINVAL;
}
stat->dir = ddata->stream[id].direction;
stat->busy = ddata->stream[id].busy;
stat->pending_length = dma_stm32_read(ddata, DMA_STM32_SNDTR(id));
return 0;
}
static int dma_stm32_init(struct device *dev)
{
struct dma_stm32_device *ddata = dev->driver_data;
const struct dma_stm32_config *cdata = dev->config->config_info;
int i;
for (i = 0; i < DMA_STM32_MAX_STREAMS; i++) {
ddata->stream[i].dev = dev;
ddata->stream[i].busy = false;
}
/* Enable DMA clock */
ddata->clk = device_get_binding(STM32_CLOCK_CONTROL_NAME);
__ASSERT_NO_MSG(ddata->clk);
if (clock_control_on(ddata->clk,
(clock_control_subsys_t *) &cdata->pclken) != 0) {
LOG_ERR("Could not enable DMA clock\n");
return -EIO;
}
/* Set controller specific configuration */
cdata->config(ddata);
return 0;
}
static const struct dma_driver_api dma_funcs = {
.reload = dma_stm32_reload,
.config = dma_stm32_config,
.start = dma_stm32_start,
.stop = dma_stm32_stop,
.get_status = dma_stm32_get_status,
};
const struct dma_stm32_config dma_stm32_1_cdata = {
.pclken = { .bus = STM32_CLOCK_BUS_AHB1,
.enr = LL_AHB1_GRP1_PERIPH_DMA1 },
.config = dma_stm32_1_config,
};
DEVICE_AND_API_INIT(dma_stm32_1, CONFIG_DMA_1_NAME, &dma_stm32_init,
&device_data[DMA_STM32_1], &dma_stm32_1_cdata,
POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE,
(void *)&dma_funcs);
static const struct dma_stm32_config dma_stm32_2_cdata = {
.pclken = { .bus = STM32_CLOCK_BUS_AHB1,
.enr = LL_AHB1_GRP1_PERIPH_DMA2 },
.config = dma_stm32_2_config,
};
DEVICE_AND_API_INIT(dma_stm32_2, CONFIG_DMA_2_NAME, &dma_stm32_init,
&device_data[DMA_STM32_2], &dma_stm32_2_cdata,
POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE,
(void *)&dma_funcs);
static void dma_stm32_irq_0(void *arg) { dma_stm32_irq_handler(arg, 0); }
static void dma_stm32_irq_1(void *arg) { dma_stm32_irq_handler(arg, 1); }
static void dma_stm32_irq_2(void *arg) { dma_stm32_irq_handler(arg, 2); }
static void dma_stm32_irq_3(void *arg) { dma_stm32_irq_handler(arg, 3); }
static void dma_stm32_irq_4(void *arg) { dma_stm32_irq_handler(arg, 4); }
static void dma_stm32_irq_5(void *arg) { dma_stm32_irq_handler(arg, 5); }
static void dma_stm32_irq_6(void *arg) { dma_stm32_irq_handler(arg, 6); }
static void dma_stm32_irq_7(void *arg) { dma_stm32_irq_handler(arg, 7); }
static void dma_stm32_1_config(struct dma_stm32_device *ddata)
{
ddata->base = DMA_STM32_1_BASE;
ddata->mem2mem = false;
IRQ_CONNECT(DMA1_Stream0_IRQn, DMA_STM32_IRQ_PRI,
dma_stm32_irq_0, DEVICE_GET(dma_stm32_1), 0);
irq_enable(DMA1_Stream0_IRQn);
IRQ_CONNECT(DMA1_Stream1_IRQn, DMA_STM32_IRQ_PRI,
dma_stm32_irq_1, DEVICE_GET(dma_stm32_1), 0);
irq_enable(DMA1_Stream1_IRQn);
IRQ_CONNECT(DMA1_Stream2_IRQn, DMA_STM32_IRQ_PRI,
dma_stm32_irq_2, DEVICE_GET(dma_stm32_1), 0);
irq_enable(DMA1_Stream2_IRQn);
IRQ_CONNECT(DMA1_Stream3_IRQn, DMA_STM32_IRQ_PRI,
dma_stm32_irq_3, DEVICE_GET(dma_stm32_1), 0);
irq_enable(DMA1_Stream3_IRQn);
IRQ_CONNECT(DMA1_Stream4_IRQn, DMA_STM32_IRQ_PRI,
dma_stm32_irq_4, DEVICE_GET(dma_stm32_1), 0);
irq_enable(DMA1_Stream4_IRQn);
IRQ_CONNECT(DMA1_Stream5_IRQn, DMA_STM32_IRQ_PRI,
dma_stm32_irq_5, DEVICE_GET(dma_stm32_1), 0);
irq_enable(DMA1_Stream5_IRQn);
IRQ_CONNECT(DMA1_Stream6_IRQn, DMA_STM32_IRQ_PRI,
dma_stm32_irq_6, DEVICE_GET(dma_stm32_1), 0);
irq_enable(DMA1_Stream6_IRQn);
IRQ_CONNECT(DMA1_Stream7_IRQn, DMA_STM32_IRQ_PRI,
dma_stm32_irq_7, DEVICE_GET(dma_stm32_1), 0);
irq_enable(DMA1_Stream7_IRQn);
}
static void dma_stm32_2_config(struct dma_stm32_device *ddata)
{
ddata->base = DMA_STM32_2_BASE;
ddata->mem2mem = true;
IRQ_CONNECT(DMA2_Stream0_IRQn, DMA_STM32_IRQ_PRI,
dma_stm32_irq_0, DEVICE_GET(dma_stm32_2), 0);
irq_enable(DMA2_Stream0_IRQn);
IRQ_CONNECT(DMA2_Stream1_IRQn, DMA_STM32_IRQ_PRI,
dma_stm32_irq_1, DEVICE_GET(dma_stm32_2), 0);
irq_enable(DMA2_Stream1_IRQn);
IRQ_CONNECT(DMA2_Stream2_IRQn, DMA_STM32_IRQ_PRI,
dma_stm32_irq_2, DEVICE_GET(dma_stm32_2), 0);
irq_enable(DMA2_Stream2_IRQn);
IRQ_CONNECT(DMA2_Stream3_IRQn, DMA_STM32_IRQ_PRI,
dma_stm32_irq_3, DEVICE_GET(dma_stm32_2), 0);
irq_enable(DMA2_Stream3_IRQn);
IRQ_CONNECT(DMA2_Stream4_IRQn, DMA_STM32_IRQ_PRI,
dma_stm32_irq_4, DEVICE_GET(dma_stm32_2), 0);
irq_enable(DMA2_Stream4_IRQn);
IRQ_CONNECT(DMA2_Stream5_IRQn, DMA_STM32_IRQ_PRI,
dma_stm32_irq_5, DEVICE_GET(dma_stm32_2), 0);
irq_enable(DMA2_Stream5_IRQn);
IRQ_CONNECT(DMA2_Stream6_IRQn, DMA_STM32_IRQ_PRI,
dma_stm32_irq_6, DEVICE_GET(dma_stm32_2), 0);
irq_enable(DMA2_Stream6_IRQn);
IRQ_CONNECT(DMA2_Stream7_IRQn, DMA_STM32_IRQ_PRI,
dma_stm32_irq_7, DEVICE_GET(dma_stm32_2), 0);
irq_enable(DMA2_Stream7_IRQn);
}

View file

@ -107,4 +107,11 @@ config ADC_STM32
endif # ADC
if DMA
config DMA_STM32
default y
endif # DMA
endif # SOC_FAMILY_STM32

View file

@ -31,4 +31,11 @@ config I2C_STM32_V2
endif # I2C_STM32
if DMA_STM32
config DMA_STM32_V2
default y
endif # DMA
endif # SOC_SERIES_STM32F0X

View file

@ -67,6 +67,10 @@
#include <stm32f0xx_ll_adc.h>
#endif
#ifdef CONFIG_DMA_STM32
#include <stm32f0xx_ll_dma.h>
#endif
#endif /* !_ASMLANGUAGE */
#endif /* _STM32F0_SOC_H_ */

View file

@ -28,4 +28,11 @@ config I2C_STM32_V1
endif # I2C_STM32
if DMA_STM32
config DMA_STM32_V2
default y
endif # DMA
endif # SOC_SERIES_STM32F1X

View file

@ -67,6 +67,10 @@
#include <stm32f1xx_ll_adc.h>
#endif
#ifdef CONFIG_DMA_STM32
#include <stm32f1xx_ll_dma.h>
#endif
#endif /* !_ASMLANGUAGE */
#endif /* _STM32F1_SOC_H_ */

View file

@ -36,4 +36,11 @@ config GPIO_STM32_PORTI
endif # GPIO_STM32
if DMA_STM32
config DMA_STM32_V1
default y
endif # DMA
endif # SOC_SERIES_STM32F2X

View file

@ -58,6 +58,10 @@
#include <stm32f2xx_ll_adc.h>
#endif
#ifdef CONFIG_DMA_STM32
#include <stm32f2xx_ll_dma.h>
#endif
#endif /* !_ASMLANGUAGE */
#endif /* _STM32F2_SOC_H_ */

View file

@ -28,4 +28,11 @@ config I2C_STM32_V2
endif # I2C_STM32
if DMA_STM32
config DMA_STM32_V2
default y
endif # DMA
endif # SOC_SERIES_STM32F3X

View file

@ -74,6 +74,10 @@
#include <stm32f3xx_ll_adc.h>
#endif
#ifdef CONFIG_DMA_STM32
#include <stm32f3xx_ll_dma.h>
#endif
#endif /* !_ASMLANGUAGE */
#endif /* _STM32F3_SOC_H_ */

View file

@ -34,9 +34,9 @@ config I2C_STM32_V1
endif # I2C_STM32
if DMA
if DMA_STM32
config DMA_STM32F4X
config DMA_STM32_V1
default y
endif # DMA

View file

@ -76,6 +76,10 @@
#include <stm32f4xx_ll_adc.h>
#endif
#ifdef CONFIG_DMA_STM32
#include <stm32f4xx_ll_dma.h>
#endif
#endif /* !_ASMLANGUAGE */
#endif /* _STM32F4_SOC_H_ */

View file

@ -50,4 +50,11 @@ config ENTROPY_STM32_RNG
endif # ENTROPY_GENERATOR
if DMA_STM32
config DMA_STM32_V1
default y
endif # DMA
endif # SOC_SERIES_STM32F7X

View file

@ -75,6 +75,10 @@
#include <stm32f7xx_ll_adc.h>
#endif
#ifdef CONFIG_DMA_STM32
#include <stm32f7xx_ll_dma.h>
#endif
#endif /* !_ASMLANGUAGE */
#endif /* _STM32F7_SOC_H_ */

View file

@ -25,4 +25,11 @@ config I2C_STM32_V2
endif # I2C_STM32
if DMA_STM32
config DMA_STM32_V2
default y
endif # DMA
endif # SOC_SERIES_STM32L0X

View file

@ -68,6 +68,10 @@
#include <stm32l0xx_ll_adc.h>
#endif
#ifdef CONFIG_DMA_STM32
#include <stm32l0xx_ll_dma.h>
#endif
#endif /* !_ASMLANGUAGE */
#endif /* _STM32L0_SOC_H_ */

View file

@ -37,4 +37,11 @@ config ENTROPY_STM32_RNG
endif # ENTROPY_GENERATOR
if DMA_STM32
config DMA_STM32_V2
default y
endif # DMA
endif # SOC_SERIES_STM32L4X

View file

@ -90,6 +90,10 @@
#include <stm32l4xx_ll_adc.h>
#endif
#ifdef CONFIG_DMA_STM32
#include <stm32l4xx_ll_dma.h>
#endif
#endif /* !_ASMLANGUAGE */
#endif /* _STM32L4X_SOC_H_ */