drivers: dma: Introduce driver for NXP's eDMA IP

This commit introduces a driver for NXP's eDMA IP.

The main reasons for introducing a new driver are the following:

	1) The HAL EDMA wrappers don't support well different
	eDMA versions (e.g: i.MX93 and i.MX8QM). As such, a new
	revision had to be introduced, thus requiring a new Zephyr
	driver.

	2) The eDMA versions found on i.MX93, i.MX8QM, and i.MX8QXP
	don't use the DMAMUX IP (instead, channel MUX-ing is performed
	through an eDMA register in the case of i.MX93).

Signed-off-by: Laurentiu Mihalcea <laurentiu.mihalcea@nxp.com>
This commit is contained in:
Laurentiu Mihalcea 2023-10-02 16:21:53 +03:00 committed by Anas Nashif
parent c3a185f4ee
commit 6abc5921e1
6 changed files with 1277 additions and 0 deletions

View file

@ -39,3 +39,4 @@ zephyr_library_sources_ifdef(CONFIG_DMA_SEDI dma_sedi.c)
zephyr_library_sources_ifdef(CONFIG_DMA_SMARTBOND dma_smartbond.c)
zephyr_library_sources_ifdef(CONFIG_DMA_NXP_SOF_HOST_DMA dma_nxp_sof_host_dma.c)
zephyr_library_sources_ifdef(CONFIG_DMA_EMUL dma_emul.c)
zephyr_library_sources_ifdef(CONFIG_DMA_NXP_EDMA dma_nxp_edma.c)

View file

@ -74,4 +74,6 @@ source "drivers/dma/Kconfig.nxp_sof_host_dma"
source "drivers/dma/Kconfig.emul"
source "drivers/dma/Kconfig.nxp_edma"
endif # DMA

View file

@ -0,0 +1,34 @@
# Copyright 2024 NXP
# SPDX-License-Identifier: Apache-2.0
config DMA_NXP_EDMA
bool "NXP enhanced Direct Memory Access (eDMA) driver"
default y
depends on DT_HAS_NXP_EDMA_ENABLED
help
Enable driver for NXP's eDMA IP.
if DMA_NXP_EDMA
config DMA_NXP_EDMA_ALIGN
int "Alignment (in bytes) required for the transfers"
default 8
help
Use this to set the alignment (in bytes)
used by entities employing this driver to
adjust the addresses and sizes of the memory
regions involved in the transfer process.
This value needs to match one of the possible
values for SSIZE and DSIZE, otherwise the
driver will return an error upon configuration.
config DMA_NXP_EDMA_ENABLE_HALFMAJOR_IRQ
bool "Set if CPU should be interrupted when CITER = BITER / 2"
default n
help
Enable this configuration if the CPU should be
interrupted when CITER = BITER / 2. Using this,
the CPU will be interrupted when CITER = BITER and
when CITER = BITER / 2.
endif # DMA_NXP_EDMA

669
drivers/dma/dma_nxp_edma.c Normal file
View file

@ -0,0 +1,669 @@
/*
* Copyright 2024 NXP
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "dma_nxp_edma.h"
/* TODO list:
* 1) Support for requesting a specific channel.
* 2) Support for checking if DMA transfer is pending when attempting config. (?)
* 3) Support for error interrupt.
* 4) Support for error if buffer overflow/underrun.
* 5) Ideally, HALFMAJOR should be set on a per-channel basis not through a
* config. If not possible, this should be done through a DTS property. Also,
* maybe do the same for INTMAJOR IRQ.
*/
static void edma_isr(const void *parameter)
{
const struct edma_config *cfg;
struct edma_data *data;
struct edma_channel *chan;
int ret;
uint32_t update_size;
chan = (struct edma_channel *)parameter;
cfg = chan->dev->config;
data = chan->dev->data;
if (!EDMA_ChannelRegRead(data->hal_cfg, chan->id, EDMA_TCD_CH_INT)) {
/* skip, interrupt was probably triggered by another channel */
return;
}
/* clear interrupt */
EDMA_ChannelRegUpdate(data->hal_cfg, chan->id,
EDMA_TCD_CH_INT, EDMA_TCD_CH_INT_MASK, 0);
if (chan->cyclic_buffer) {
update_size = chan->bsize;
if (IS_ENABLED(CONFIG_DMA_NXP_EDMA_ENABLE_HALFMAJOR_IRQ)) {
update_size = chan->bsize / 2;
} else {
update_size = chan->bsize;
}
/* TODO: add support for error handling here */
ret = EDMA_CHAN_PRODUCE_CONSUME_A(chan, update_size);
if (ret < 0) {
LOG_ERR("chan %d buffer overflow/underrun", chan->id);
}
}
/* TODO: are there any sanity checks we have to perform before invoking
* the registered callback?
*/
if (chan->cb) {
chan->cb(chan->dev, chan->arg, chan->id, DMA_STATUS_COMPLETE);
}
}
static struct edma_channel *lookup_channel(const struct device *dev,
uint32_t chan_id)
{
struct edma_data *data;
const struct edma_config *cfg;
int i;
data = dev->data;
cfg = dev->config;
/* optimization: if dma-channels property is present then
* the channel data associated with the passed channel ID
* can be found at index chan_id in the array of channels.
*/
if (cfg->contiguous_channels) {
/* check for index out of bounds */
if (chan_id >= data->ctx.dma_channels) {
return NULL;
}
return &data->channels[chan_id];
}
/* channels are passed through the valid-channels property.
* As such, since some channels may be missing we need to
* look through the entire channels array for an ID match.
*/
for (i = 0; i < data->ctx.dma_channels; i++) {
if (data->channels[i].id == chan_id) {
return &data->channels[i];
}
}
return NULL;
}
static int edma_config(const struct device *dev, uint32_t chan_id,
struct dma_config *dma_cfg)
{
struct edma_data *data;
const struct edma_config *cfg;
struct edma_channel *chan;
uint32_t transfer_type;
int ret;
data = dev->data;
cfg = dev->config;
if (!dma_cfg->head_block) {
LOG_ERR("head block shouldn't be NULL");
return -EINVAL;
}
/* validate source data size (SSIZE) */
if (!EDMA_TransferWidthIsValid(data->hal_cfg, dma_cfg->source_data_size)) {
LOG_ERR("invalid source data size: %d",
dma_cfg->source_data_size);
return -EINVAL;
}
/* validate destination data size (DSIZE) */
if (!EDMA_TransferWidthIsValid(data->hal_cfg, dma_cfg->dest_data_size)) {
LOG_ERR("invalid destination data size: %d",
dma_cfg->dest_data_size);
return -EINVAL;
}
/* validate configured alignment */
if (!EDMA_TransferWidthIsValid(data->hal_cfg, CONFIG_DMA_NXP_EDMA_ALIGN)) {
LOG_ERR("configured alignment %d is invalid",
CONFIG_DMA_NXP_EDMA_ALIGN);
return -EINVAL;
}
/* Scatter-Gather configurations currently not supported */
if (dma_cfg->block_count != 1) {
LOG_ERR("number of blocks %d not supported", dma_cfg->block_count);
return -ENOTSUP;
}
/* source address shouldn't be NULL */
if (!dma_cfg->head_block->source_address) {
LOG_ERR("source address cannot be NULL");
return -EINVAL;
}
/* destination address shouldn't be NULL */
if (!dma_cfg->head_block->dest_address) {
LOG_ERR("destination address cannot be NULL");
return -EINVAL;
}
/* check source address's (SADDR) alignment with respect to the data size (SSIZE)
*
* Failing to meet this condition will lead to the assertion of the SAE
* bit (see CHn_ES register).
*
* TODO: this will also restrict scenarios such as the following:
* SADDR is 8B aligned and SSIZE is 16B. I've tested this
* scenario and seems to raise no hardware errors (I'm assuming
* because this doesn't break the 8B boundary of the 64-bit system
* I tested it on). Is there a need to allow such a scenario?
*/
if (dma_cfg->head_block->source_address % dma_cfg->source_data_size) {
LOG_ERR("source address 0x%x alignment doesn't match data size %d",
dma_cfg->head_block->source_address,
dma_cfg->source_data_size);
return -EINVAL;
}
/* check destination address's (DADDR) alignment with respect to the data size (DSIZE)
* Failing to meet this condition will lead to the assertion of the DAE
* bit (see CHn_ES register).
*/
if (dma_cfg->head_block->dest_address % dma_cfg->dest_data_size) {
LOG_ERR("destination address 0x%x alignment doesn't match data size %d",
dma_cfg->head_block->dest_address,
dma_cfg->dest_data_size);
return -EINVAL;
}
/* source burst length should match destination burst length.
* This is because the burst length is the equivalent of NBYTES which
* is used for both the destination and the source.
*/
if (dma_cfg->source_burst_length !=
dma_cfg->dest_burst_length) {
LOG_ERR("source burst length %d doesn't match destination burst length %d",
dma_cfg->source_burst_length,
dma_cfg->dest_burst_length);
return -EINVAL;
}
/* total number of bytes should be a multiple of NBYTES.
*
* This is needed because the EDMA engine performs transfers based
* on CITER (integer value) and NBYTES, thus it has no knowledge of
* the total transfer size. If the total transfer size is not a
* multiple of NBYTES then we'll end up with copying a wrong number
* of bytes (CITER = TOTAL_SIZE / BITER). This, of course, raises
* no error in the hardware but it's still wrong.
*/
if (dma_cfg->head_block->block_size % dma_cfg->source_burst_length) {
LOG_ERR("block size %d should be a multiple of NBYTES %d",
dma_cfg->head_block->block_size,
dma_cfg->source_burst_length);
return -EINVAL;
}
/* check if NBYTES is a multiple of MAX(SSIZE, DSIZE).
*
* This stems from the fact that NBYTES needs to be a multiple
* of SSIZE AND DSIZE. If NBYTES is a multiple of MAX(SSIZE, DSIZE)
* then it will for sure satisfy the aforementioned condition (since
* SSIZE and DSIZE are powers of 2).
*
* Failing to meet this condition will lead to the assertion of the
* NCE bit (see CHn_ES register).
*/
if (dma_cfg->source_burst_length %
MAX(dma_cfg->source_data_size, dma_cfg->dest_data_size)) {
LOG_ERR("NBYTES %d should be a multiple of MAX(SSIZE(%d), DSIZE(%d))",
dma_cfg->source_burst_length,
dma_cfg->source_data_size,
dma_cfg->dest_data_size);
return -EINVAL;
}
/* fetch channel data */
chan = lookup_channel(dev, chan_id);
if (!chan) {
LOG_ERR("channel ID %u is not valid", chan_id);
return -EINVAL;
}
/* save the block size for later usage in edma_reload */
chan->bsize = dma_cfg->head_block->block_size;
if (dma_cfg->cyclic) {
chan->cyclic_buffer = true;
chan->stat.read_position = 0;
chan->stat.write_position = 0;
/* ASSUMPTION: for CONSUMER-type channels, the buffer from
* which the engine consumes should be full, while in the
* case of PRODUCER-type channels it should be empty.
*/
switch (dma_cfg->channel_direction) {
case MEMORY_TO_PERIPHERAL:
chan->type = CHAN_TYPE_CONSUMER;
chan->stat.free = 0;
chan->stat.pending_length = chan->bsize;
break;
case PERIPHERAL_TO_MEMORY:
chan->type = CHAN_TYPE_PRODUCER;
chan->stat.pending_length = 0;
chan->stat.free = chan->bsize;
break;
default:
LOG_ERR("unsupported transfer dir %d for cyclic mode",
dma_cfg->channel_direction);
return -ENOTSUP;
}
} else {
chan->cyclic_buffer = false;
}
/* change channel's state to CONFIGURED */
ret = channel_change_state(chan, CHAN_STATE_CONFIGURED);
if (ret < 0) {
LOG_ERR("failed to change channel %d state to CONFIGURED", chan_id);
return ret;
}
ret = get_transfer_type(dma_cfg->channel_direction, &transfer_type);
if (ret < 0) {
return ret;
}
chan->cb = dma_cfg->dma_callback;
chan->arg = dma_cfg->user_data;
/* warning: this sets SOFF and DOFF to SSIZE and DSIZE which are POSITIVE. */
ret = EDMA_ConfigureTransfer(data->hal_cfg, chan_id,
dma_cfg->head_block->source_address,
dma_cfg->head_block->dest_address,
dma_cfg->source_data_size,
dma_cfg->dest_data_size,
dma_cfg->source_burst_length,
dma_cfg->head_block->block_size,
transfer_type);
if (ret < 0) {
LOG_ERR("failed to configure transfer");
return to_std_error(ret);
}
/* TODO: channel MUX should be forced to 0 based on the previous state */
if (EDMA_HAS_MUX(data->hal_cfg)) {
ret = EDMA_SetChannelMux(data->hal_cfg, chan_id, dma_cfg->dma_slot);
if (ret < 0) {
LOG_ERR("failed to set channel MUX");
return to_std_error(ret);
}
}
/* set SLAST and DLAST */
ret = set_slast_dlast(dma_cfg, transfer_type, data, chan_id);
if (ret < 0) {
return ret;
}
/* allow interrupting the CPU when a major cycle is completed.
*
* interesting note: only 1 major loop is performed per slave peripheral
* DMA request. For instance, if block_size = 768 and burst_size = 192
* we're going to get 4 transfers of 192 bytes. Each of these transfers
* translates to a DMA request made by the slave peripheral.
*/
EDMA_ChannelRegUpdate(data->hal_cfg, chan_id,
EDMA_TCD_CSR, EDMA_TCD_CSR_INTMAJOR_MASK, 0);
if (IS_ENABLED(CONFIG_DMA_NXP_EDMA_ENABLE_HALFMAJOR_IRQ)) {
/* if enabled through the above configuration, also
* allow the CPU to be interrupted when CITER = BITER / 2.
*/
EDMA_ChannelRegUpdate(data->hal_cfg, chan_id, EDMA_TCD_CSR,
EDMA_TCD_CSR_INTHALF_MASK, 0);
}
/* enable channel interrupt */
irq_enable(chan->irq);
/* dump register status - for debugging purposes */
edma_dump_channel_registers(data, chan_id);
return 0;
}
static int edma_get_status(const struct device *dev, uint32_t chan_id,
struct dma_status *stat)
{
struct edma_data *data;
struct edma_channel *chan;
uint32_t citer, biter, done;
unsigned int key;
data = dev->data;
/* fetch channel data */
chan = lookup_channel(dev, chan_id);
if (!chan) {
LOG_ERR("channel ID %u is not valid", chan_id);
return -EINVAL;
}
if (chan->cyclic_buffer) {
key = irq_lock();
stat->free = chan->stat.free;
stat->pending_length = chan->stat.pending_length;
irq_unlock(key);
} else {
/* note: no locking required here. The DMA interrupts
* have no effect over CITER and BITER.
*/
citer = EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_CITER);
biter = EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_BITER);
done = EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_CH_CSR) &
EDMA_TCD_CH_CSR_DONE_MASK;
if (done) {
stat->free = chan->bsize;
stat->pending_length = 0;
} else {
stat->free = (biter - citer) * (chan->bsize / biter);
stat->pending_length = chan->bsize - stat->free;
}
}
LOG_DBG("free: %d, pending: %d", stat->free, stat->pending_length);
return 0;
}
static int edma_suspend(const struct device *dev, uint32_t chan_id)
{
struct edma_data *data;
const struct edma_config *cfg;
struct edma_channel *chan;
int ret;
data = dev->data;
cfg = dev->config;
/* fetch channel data */
chan = lookup_channel(dev, chan_id);
if (!chan) {
LOG_ERR("channel ID %u is not valid", chan_id);
return -EINVAL;
}
edma_dump_channel_registers(data, chan_id);
/* change channel's state to SUSPENDED */
ret = channel_change_state(chan, CHAN_STATE_SUSPENDED);
if (ret < 0) {
LOG_ERR("failed to change channel %d state to SUSPENDED", chan_id);
return ret;
}
LOG_DBG("suspending channel %u", chan_id);
/* disable HW requests */
EDMA_ChannelRegUpdate(data->hal_cfg, chan_id,
EDMA_TCD_CH_CSR, 0, EDMA_TCD_CH_CSR_ERQ_MASK);
return 0;
}
static int edma_stop(const struct device *dev, uint32_t chan_id)
{
struct edma_data *data;
const struct edma_config *cfg;
struct edma_channel *chan;
enum channel_state prev_state;
int ret;
data = dev->data;
cfg = dev->config;
/* fetch channel data */
chan = lookup_channel(dev, chan_id);
if (!chan) {
LOG_ERR("channel ID %u is not valid", chan_id);
return -EINVAL;
}
prev_state = chan->state;
/* change channel's state to STOPPED */
ret = channel_change_state(chan, CHAN_STATE_STOPPED);
if (ret < 0) {
LOG_ERR("failed to change channel %d state to STOPPED", chan_id);
return ret;
}
LOG_DBG("stopping channel %u", chan_id);
if (prev_state == CHAN_STATE_SUSPENDED) {
/* if the channel has been suspended then there's
* no point in disabling the HW requests again. Just
* jump to the channel release operation.
*/
goto out_release_channel;
}
/* disable HW requests */
EDMA_ChannelRegUpdate(data->hal_cfg, chan_id, EDMA_TCD_CH_CSR, 0,
EDMA_TCD_CH_CSR_ERQ_MASK);
out_release_channel:
/* clear the channel MUX so that it can used by a different peripheral.
*
* note: because the channel is released during dma_stop() that means
* dma_start() can no longer be immediately called. This is because
* one needs to re-configure the channel MUX which can only be done
* through dma_config(). As such, if one intends to reuse the current
* configuration then please call dma_suspend() instead of dma_stop().
*/
if (EDMA_HAS_MUX(data->hal_cfg)) {
ret = EDMA_SetChannelMux(data->hal_cfg, chan_id, 0);
if (ret < 0) {
LOG_ERR("failed to set channel MUX");
return to_std_error(ret);
}
}
edma_dump_channel_registers(data, chan_id);
return 0;
}
static int edma_start(const struct device *dev, uint32_t chan_id)
{
struct edma_data *data;
const struct edma_config *cfg;
struct edma_channel *chan;
int ret;
data = dev->data;
cfg = dev->config;
/* fetch channel data */
chan = lookup_channel(dev, chan_id);
if (!chan) {
LOG_ERR("channel ID %u is not valid", chan_id);
return -EINVAL;
}
/* change channel's state to STARTED */
ret = channel_change_state(chan, CHAN_STATE_STARTED);
if (ret < 0) {
LOG_ERR("failed to change channel %d state to STARTED", chan_id);
return ret;
}
LOG_DBG("starting channel %u", chan_id);
/* enable HW requests */
EDMA_ChannelRegUpdate(data->hal_cfg, chan_id,
EDMA_TCD_CH_CSR, EDMA_TCD_CH_CSR_ERQ_MASK, 0);
return 0;
}
static int edma_reload(const struct device *dev, uint32_t chan_id, uint32_t src,
uint32_t dst, size_t size)
{
struct edma_data *data;
struct edma_channel *chan;
int ret;
unsigned int key;
data = dev->data;
/* fetch channel data */
chan = lookup_channel(dev, chan_id);
if (!chan) {
LOG_ERR("channel ID %u is not valid", chan_id);
return -EINVAL;
}
/* channel needs to be started to allow reloading */
if (chan->state != CHAN_STATE_STARTED) {
LOG_ERR("reload is only supported on started channels");
return -EINVAL;
}
if (chan->cyclic_buffer) {
key = irq_lock();
ret = EDMA_CHAN_PRODUCE_CONSUME_B(chan, size);
irq_unlock(key);
if (ret < 0) {
LOG_ERR("chan %d buffer overflow/underrun", chan_id);
return ret;
}
}
return 0;
}
static int edma_get_attribute(const struct device *dev, uint32_t type, uint32_t *val)
{
switch (type) {
case DMA_ATTR_BUFFER_SIZE_ALIGNMENT:
case DMA_ATTR_BUFFER_ADDRESS_ALIGNMENT:
*val = CONFIG_DMA_NXP_EDMA_ALIGN;
break;
case DMA_ATTR_MAX_BLOCK_COUNT:
/* this is restricted to 1 because SG configurations are not supported */
*val = 1;
break;
default:
LOG_ERR("invalid attribute type: %d", type);
return -EINVAL;
}
return 0;
}
static const struct dma_driver_api edma_api = {
.reload = edma_reload,
.config = edma_config,
.start = edma_start,
.stop = edma_stop,
.suspend = edma_suspend,
.resume = edma_start,
.get_status = edma_get_status,
.get_attribute = edma_get_attribute,
};
static int edma_init(const struct device *dev)
{
const struct edma_config *cfg;
struct edma_data *data;
mm_reg_t regmap;
data = dev->data;
cfg = dev->config;
/* map instance MMIO */
device_map(&regmap, cfg->regmap_phys, cfg->regmap_size, K_MEM_CACHE_NONE);
/* overwrite physical address set in the HAL configuration.
* We can down-cast the virtual address to a 32-bit address because
* we know we're working with 32-bit addresses only.
*/
data->hal_cfg->regmap = (uint32_t)POINTER_TO_UINT(regmap);
cfg->irq_config();
/* dma_request_channel() uses this variable to keep track of the
* available channels. As such, it needs to be initialized with NULL
* which signifies that all channels are initially available.
*/
data->channel_flags = ATOMIC_INIT(0);
data->ctx.atomic = &data->channel_flags;
return 0;
}
/* a few comments about the BUILD_ASSERT statements:
* 1) dma-channels and valid-channels should be mutually exclusive.
* This means that you specify the one or the other. There's no real
* need to have both of them.
* 2) Number of channels should match the number of interrupts for
* said channels (TODO: what about error interrupts?)
* 3) The channel-mux property shouldn't be specified unless
* the eDMA is MUX-capable (signaled via the EDMA_HAS_CHAN_MUX
* configuration).
*/
#define EDMA_INIT(inst) \
\
BUILD_ASSERT(!DT_NODE_HAS_PROP(DT_INST(inst, DT_DRV_COMPAT), dma_channels) || \
!DT_NODE_HAS_PROP(DT_INST(inst, DT_DRV_COMPAT), valid_channels), \
"dma_channels and valid_channels are mutually exclusive"); \
\
BUILD_ASSERT(DT_INST_PROP_OR(inst, dma_channels, 0) == \
DT_NUM_IRQS(DT_INST(inst, DT_DRV_COMPAT)) || \
DT_INST_PROP_LEN_OR(inst, valid_channels, 0) == \
DT_NUM_IRQS(DT_INST(inst, DT_DRV_COMPAT)), \
"number of interrupts needs to match number of channels"); \
\
BUILD_ASSERT(DT_PROP_OR(DT_INST(inst, DT_DRV_COMPAT), hal_cfg_index, 0) < \
ARRAY_SIZE(s_edmaConfigs), \
"HAL configuration index out of bounds"); \
\
static struct edma_channel channels_##inst[] = EDMA_CHANNEL_ARRAY_GET(inst); \
\
static void interrupt_config_function_##inst(void) \
{ \
EDMA_CONNECT_INTERRUPTS(inst); \
} \
\
static struct edma_config edma_config_##inst = { \
.regmap_phys = DT_INST_REG_ADDR(inst), \
.regmap_size = DT_INST_REG_SIZE(inst), \
.irq_config = interrupt_config_function_##inst, \
.contiguous_channels = EDMA_CHANS_ARE_CONTIGUOUS(inst), \
}; \
\
static struct edma_data edma_data_##inst = { \
.channels = channels_##inst, \
.ctx.dma_channels = ARRAY_SIZE(channels_##inst), \
.ctx.magic = DMA_MAGIC, \
.hal_cfg = &EDMA_HAL_CFG_GET(inst), \
}; \
\
DEVICE_DT_INST_DEFINE(inst, &edma_init, NULL, \
&edma_data_##inst, &edma_config_##inst, \
PRE_KERNEL_1, CONFIG_DMA_INIT_PRIORITY, \
&edma_api); \
DT_INST_FOREACH_STATUS_OKAY(EDMA_INIT);

530
drivers/dma/dma_nxp_edma.h Normal file
View file

@ -0,0 +1,530 @@
/*
* Copyright 2024 NXP
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_DRIVERS_DMA_DMA_NXP_EDMA_H_
#define ZEPHYR_DRIVERS_DMA_DMA_NXP_EDMA_H_
#include <zephyr/device.h>
#include <zephyr/irq.h>
#include <zephyr/drivers/dma.h>
#include <zephyr/logging/log.h>
#include "fsl_edma_soc_rev2.h"
LOG_MODULE_REGISTER(nxp_edma);
/* used for driver binding */
#define DT_DRV_COMPAT nxp_edma
/* workaround the fact that device_map() is not defined for SoCs with no MMU */
#ifndef DEVICE_MMIO_IS_IN_RAM
#define device_map(virt, phys, size, flags) *(virt) = (phys)
#endif /* DEVICE_MMIO_IS_IN_RAM */
/* macros used to parse DTS properties */
/* used in conjunction with LISTIFY which expects F to also take a variable
* number of arguments. Since IDENTITY doesn't do that we need to use a version
* of it which also takes a variable number of arguments.
*/
#define IDENTITY_VARGS(V, ...) IDENTITY(V)
/* used to generate an array of indexes for the channels */
#define _EDMA_CHANNEL_INDEX_ARRAY(inst)\
LISTIFY(DT_INST_PROP_LEN_OR(inst, valid_channels, 0), IDENTITY_VARGS, (,))
/* used to generate an array of indexes for the channels - this is different
* from _EDMA_CHANNEL_INDEX_ARRAY because the number of channels is passed
* explicitly through dma-channels so no need to deduce it from the length
* of the valid-channels property.
*/
#define _EDMA_CHANNEL_INDEX_ARRAY_EXPLICIT(inst)\
LISTIFY(DT_INST_PROP_OR(inst, dma_channels, 0), IDENTITY_VARGS, (,))
/* used to generate an array of indexes for the interrupt */
#define _EDMA_INT_INDEX_ARRAY(inst)\
LISTIFY(DT_NUM_IRQS(DT_INST(inst, DT_DRV_COMPAT)), IDENTITY_VARGS, (,))
/* used to register an ISR/arg pair. TODO: should we also use the priority? */
#define _EDMA_INT_CONNECT(idx, inst) \
IRQ_CONNECT(DT_INST_IRQ_BY_IDX(inst, idx, irq), \
0, edma_isr, \
&channels_##inst[idx], 0)
/* used to declare a struct edma_channel by the non-explicit macro suite */
#define _EDMA_CHANNEL_DECLARE(idx, inst) \
{ \
.id = DT_INST_PROP_BY_IDX(inst, valid_channels, idx), \
.dev = DEVICE_DT_INST_GET(inst), \
.irq = DT_INST_IRQ_BY_IDX(inst, idx, irq), \
}
/* used to declare a struct edma_channel by the explicit macro suite */
#define _EDMA_CHANNEL_DECLARE_EXPLICIT(idx, inst) \
{ \
.id = idx, \
.dev = DEVICE_DT_INST_GET(inst), \
.irq = DT_INST_IRQ_BY_IDX(inst, idx, irq), \
}
/* used to create an array of channel IDs via the valid-channels property */
#define _EDMA_CHANNEL_ARRAY(inst) \
{ FOR_EACH_FIXED_ARG(_EDMA_CHANNEL_DECLARE, (,), \
inst, _EDMA_CHANNEL_INDEX_ARRAY(inst)) }
/* used to create an array of channel IDs via the dma-channels property */
#define _EDMA_CHANNEL_ARRAY_EXPLICIT(inst) \
{ FOR_EACH_FIXED_ARG(_EDMA_CHANNEL_DECLARE_EXPLICIT, (,), inst, \
_EDMA_CHANNEL_INDEX_ARRAY_EXPLICIT(inst)) }
/* used to construct the channel array based on the specified property:
* dma-channels or valid-channels.
*/
#define EDMA_CHANNEL_ARRAY_GET(inst) \
COND_CODE_1(DT_NODE_HAS_PROP(DT_INST(inst, DT_DRV_COMPAT), dma_channels), \
(_EDMA_CHANNEL_ARRAY_EXPLICIT(inst)), \
(_EDMA_CHANNEL_ARRAY(inst)))
#define EDMA_HAL_CFG_GET(inst) \
COND_CODE_1(DT_NODE_HAS_PROP(DT_INST(inst, DT_DRV_COMPAT), hal_cfg_index), \
(s_edmaConfigs[DT_INST_PROP(inst, hal_cfg_index)]), \
(s_edmaConfigs[0]))
/* used to register edma_isr for all specified interrupts */
#define EDMA_CONNECT_INTERRUPTS(inst) \
FOR_EACH_FIXED_ARG(_EDMA_INT_CONNECT, (;), \
inst, _EDMA_INT_INDEX_ARRAY(inst))
#define EDMA_CHANS_ARE_CONTIGUOUS(inst)\
DT_NODE_HAS_PROP(DT_INST(inst, DT_DRV_COMPAT), dma_channels)
/* utility macros */
/* a few words about EDMA_CHAN_PRODUCE_CONSUME_{A/B}:
* - in the context of cyclic buffers we introduce
* the concepts of consumer and producer channels.
*
* - a consumer channel is a channel for which the
* DMA copies data from a buffer, thus leading to
* less data in said buffer (data is consumed with
* each transfer).
*
* - a producer channel is a channel for which the
* DMA copies data into a buffer, thus leading to
* more data in said buffer (data is produced with
* each transfer).
*
* - for consumer channels, each DMA interrupt will
* signal that an amount of data has been consumed
* from the buffer (half of the buffer size if
* HALFMAJOR is enabled, the whole buffer otherwise).
*
* - for producer channels, each DMA interrupt will
* signal that an amount of data has been added
* to the buffer.
*
* - to signal this, the ISR uses EDMA_CHAN_PRODUCE_CONSUME_A
* which will "consume" data from the buffer for
* consumer channels and "produce" data for
* producer channels.
*
* - since the upper layers using this driver need
* to let the EDMA driver know whenever they've produced
* (in the case of consumer channels) or consumed
* data (in the case of producer channels) they can
* do so through the reload() function.
*
* - reload() uses EDMA_CHAN_PRODUCE_CONSUME_B which
* for consumer channels will "produce" data and
* "consume" data for producer channels, thus letting
* the driver know what action the upper layer has
* performed (if the channel is a consumer it's only
* natural that the upper layer will write/produce more
* data to the buffer. The same rationale applies to
* producer channels).
*
* - EDMA_CHAN_PRODUCE_CONSUME_B is just the opposite
* of EDMA_CHAN_PRODUCE_CONSUME_A. If one produces
* data, the other will consume and vice-versa.
*
* - all of this information is valid only in the
* context of cyclic buffers. If this behaviour is
* not enabled, querying the status will simply
* resolve to querying CITER and BITER.
*/
#define EDMA_CHAN_PRODUCE_CONSUME_A(chan, size)\
((chan)->type == CHAN_TYPE_CONSUMER ?\
edma_chan_cyclic_consume(chan, size) :\
edma_chan_cyclic_produce(chan, size))
#define EDMA_CHAN_PRODUCE_CONSUME_B(chan, size)\
((chan)->type == CHAN_TYPE_CONSUMER ?\
edma_chan_cyclic_produce(chan, size) :\
edma_chan_cyclic_consume(chan, size))
enum channel_type {
CHAN_TYPE_CONSUMER = 0,
CHAN_TYPE_PRODUCER,
};
enum channel_state {
CHAN_STATE_INIT = 0,
CHAN_STATE_CONFIGURED,
CHAN_STATE_STARTED,
CHAN_STATE_STOPPED,
CHAN_STATE_SUSPENDED,
};
struct edma_channel {
/* channel ID, needs to be the same as the hardware channel ID */
uint32_t id;
/* pointer to device representing the EDMA instance, used by edma_isr */
const struct device *dev;
/* current state of the channel */
enum channel_state state;
/* type of the channel (PRODUCER/CONSUMER) - only applicable to cyclic
* buffer configurations.
*/
enum channel_type type;
/* argument passed to the user-defined DMA callback */
void *arg;
/* user-defined callback, called at the end of a channel's interrupt
* handling.
*/
dma_callback_t cb;
/* INTID associated with the channel */
int irq;
/* the channel's status */
struct dma_status stat;
/* cyclic buffer size - currently, this is set to head_block's size */
uint32_t bsize;
/* set to true if the channel uses a cyclic buffer configuration */
bool cyclic_buffer;
};
struct edma_data {
/* this needs to be the first member */
struct dma_context ctx;
mm_reg_t regmap;
struct edma_channel *channels;
atomic_t channel_flags;
edma_config_t *hal_cfg;
};
struct edma_config {
uint32_t regmap_phys;
uint32_t regmap_size;
void (*irq_config)(void);
/* true if channels are contiguous. The channels may not be contiguous
* if the valid-channels property is used instead of dma-channels. This
* is used to improve the time complexity of the channel lookup
* function.
*/
bool contiguous_channels;
};
static inline int channel_change_state(struct edma_channel *chan,
enum channel_state next)
{
enum channel_state prev = chan->state;
LOG_DBG("attempting to change state from %d to %d for channel %d", prev, next, chan->id);
/* validate transition */
switch (prev) {
case CHAN_STATE_INIT:
if (next != CHAN_STATE_CONFIGURED) {
return -EPERM;
}
break;
case CHAN_STATE_CONFIGURED:
if (next != CHAN_STATE_STARTED) {
return -EPERM;
}
break;
case CHAN_STATE_STARTED:
if (next != CHAN_STATE_STOPPED &&
next != CHAN_STATE_SUSPENDED) {
return -EPERM;
}
break;
case CHAN_STATE_STOPPED:
if (next != CHAN_STATE_CONFIGURED) {
return -EPERM;
}
break;
case CHAN_STATE_SUSPENDED:
if (next != CHAN_STATE_STARTED &&
next != CHAN_STATE_STOPPED) {
return -EPERM;
}
break;
default:
LOG_ERR("invalid channel previous state: %d", prev);
return -EINVAL;
}
/* transition OK, proceed */
chan->state = next;
return 0;
}
static inline int get_transfer_type(enum dma_channel_direction dir, uint32_t *type)
{
switch (dir) {
case MEMORY_TO_MEMORY:
*type = kEDMA_TransferTypeM2M;
break;
case MEMORY_TO_PERIPHERAL:
*type = kEDMA_TransferTypeM2P;
break;
case PERIPHERAL_TO_MEMORY:
*type = kEDMA_TransferTypeP2M;
break;
default:
LOG_ERR("invalid channel direction: %d", dir);
return -EINVAL;
}
return 0;
}
static inline bool data_size_is_valid(uint16_t size)
{
switch (size) {
case 1:
case 2:
case 4:
case 8:
case 16:
case 32:
case 64:
break;
default:
return false;
}
return true;
}
/* TODO: we may require setting the channel type through DTS
* or through struct dma_config. For now, we'll only support
* MEMORY_TO_PERIPHERAL and PERIPHERAL_TO_MEMORY directions
* and assume that these are bound to a certain channel type.
*/
static inline int edma_set_channel_type(struct edma_channel *chan,
enum dma_channel_direction dir)
{
switch (dir) {
case MEMORY_TO_PERIPHERAL:
chan->type = CHAN_TYPE_CONSUMER;
break;
case PERIPHERAL_TO_MEMORY:
chan->type = CHAN_TYPE_PRODUCER;
break;
default:
LOG_ERR("unsupported transfer direction: %d", dir);
return -ENOTSUP;
}
return 0;
}
/* this function is used in cyclic buffer configurations. What it does
* is it updates the channel's read position based on the number of
* bytes requested. If the number of bytes that's being read is higher
* than the number of bytes available in the buffer (pending_length)
* this will lead to an error. The main point of this check is to
* provide a way for the user to determine if data is consumed at a
* higher rate than it is being produced.
*
* This function is used in edma_isr() for CONSUMER channels to mark
* that data has been consumed (i.e: data has been transferred to the
* destination) (this is done via EDMA_CHAN_PRODUCE_CONSUME_A that's
* called in edma_isr()). For producer channels, this function is used
* in edma_reload() to mark the fact that the user of the EDMA driver
* has consumed data.
*/
static inline int edma_chan_cyclic_consume(struct edma_channel *chan,
uint32_t bytes)
{
if (bytes > chan->stat.pending_length) {
return -EINVAL;
}
chan->stat.read_position =
(chan->stat.read_position + bytes) % chan->bsize;
if (chan->stat.read_position > chan->stat.write_position) {
chan->stat.free = chan->stat.read_position -
chan->stat.write_position;
} else if (chan->stat.read_position == chan->stat.write_position) {
chan->stat.free = chan->bsize;
} else {
chan->stat.free = chan->bsize -
(chan->stat.write_position - chan->stat.read_position);
}
chan->stat.pending_length = chan->bsize - chan->stat.free;
return 0;
}
/* this function is used in cyclic buffer configurations. What it does
* is it updates the channel's write position based on the number of
* bytes requested. If the number of bytes that's being written is higher
* than the number of free bytes in the buffer this will lead to an error.
* The main point of this check is to provide a way for the user to determine
* if data is produced at a higher rate than it is being consumed.
*
* This function is used in edma_isr() for PRODUCER channels to mark
* that data has been produced (i.e: data has been transferred to the
* destination) (this is done via EDMA_CHAN_PRODUCE_CONSUME_A that's
* called in edma_isr()). For consumer channels, this function is used
* in edma_reload() to mark the fact that the user of the EDMA driver
* has produced data.
*/
static inline int edma_chan_cyclic_produce(struct edma_channel *chan,
uint32_t bytes)
{
if (bytes > chan->stat.free) {
return -EINVAL;
}
chan->stat.write_position =
(chan->stat.write_position + bytes) % chan->bsize;
if (chan->stat.write_position > chan->stat.read_position) {
chan->stat.pending_length = chan->stat.write_position -
chan->stat.read_position;
} else if (chan->stat.write_position == chan->stat.read_position) {
chan->stat.pending_length = chan->bsize;
} else {
chan->stat.pending_length = chan->bsize -
(chan->stat.read_position - chan->stat.write_position);
}
chan->stat.free = chan->bsize - chan->stat.pending_length;
return 0;
}
static inline void edma_dump_channel_registers(struct edma_data *data,
uint32_t chan_id)
{
LOG_DBG("dumping channel data for channel %d", chan_id);
LOG_DBG("CH_CSR: 0x%x",
EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_CH_CSR));
LOG_DBG("CH_ES: 0x%x",
EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_CH_ES));
LOG_DBG("CH_INT: 0x%x",
EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_CH_INT));
LOG_DBG("CH_SBR: 0x%x",
EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_CH_SBR));
LOG_DBG("CH_PRI: 0x%x",
EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_CH_PRI));
if (EDMA_HAS_MUX(data->hal_cfg)) {
LOG_DBG("CH_MUX: 0x%x",
EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_CH_MUX));
}
LOG_DBG("TCD_SADDR: 0x%x",
EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_SADDR));
LOG_DBG("TCD_SOFF: 0x%x",
EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_SOFF));
LOG_DBG("TCD_ATTR: 0x%x",
EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_ATTR));
LOG_DBG("TCD_NBYTES: 0x%x",
EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_NBYTES));
LOG_DBG("TCD_SLAST_SDA: 0x%x",
EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_SLAST_SDA));
LOG_DBG("TCD_DADDR: 0x%x",
EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_DADDR));
LOG_DBG("TCD_DOFF: 0x%x",
EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_DOFF));
LOG_DBG("TCD_CITER: 0x%x",
EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_CITER));
LOG_DBG("TCD_DLAST_SGA: 0x%x",
EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_DLAST_SGA));
LOG_DBG("TCD_CSR: 0x%x",
EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_CSR));
LOG_DBG("TCD_BITER: 0x%x",
EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_BITER));
}
static inline int set_slast_dlast(struct dma_config *dma_cfg,
uint32_t transfer_type,
struct edma_data *data,
uint32_t chan_id)
{
int32_t slast, dlast;
if (transfer_type == kEDMA_TransferTypeP2M) {
slast = 0;
} else {
switch (dma_cfg->head_block->source_addr_adj) {
case DMA_ADDR_ADJ_INCREMENT:
slast = (int32_t)dma_cfg->head_block->block_size;
break;
case DMA_ADDR_ADJ_DECREMENT:
slast = (-1) * (int32_t)dma_cfg->head_block->block_size;
break;
default:
LOG_ERR("unsupported SADDR adjustment: %d",
dma_cfg->head_block->source_addr_adj);
return -EINVAL;
}
}
if (transfer_type == kEDMA_TransferTypeM2P) {
dlast = 0;
} else {
switch (dma_cfg->head_block->dest_addr_adj) {
case DMA_ADDR_ADJ_INCREMENT:
dlast = (int32_t)dma_cfg->head_block->block_size;
break;
case DMA_ADDR_ADJ_DECREMENT:
dlast = (-1) * (int32_t)dma_cfg->head_block->block_size;
break;
default:
LOG_ERR("unsupported DADDR adjustment: %d",
dma_cfg->head_block->dest_addr_adj);
return -EINVAL;
}
}
LOG_DBG("attempting to commit SLAST %d", slast);
LOG_DBG("attempting to commit DLAST %d", dlast);
/* commit configuration */
EDMA_ChannelRegWrite(data->hal_cfg, chan_id, EDMA_TCD_SLAST_SDA, slast);
EDMA_ChannelRegWrite(data->hal_cfg, chan_id, EDMA_TCD_DLAST_SGA, dlast);
return 0;
}
/* the NXP HAL EDMA driver uses some custom return values
* that need to be converted to standard error codes. This function
* performs exactly this translation.
*/
static inline int to_std_error(int edma_err)
{
switch (edma_err) {
case kStatus_EDMA_InvalidConfiguration:
case kStatus_InvalidArgument:
return -EINVAL;
case kStatus_Busy:
return -EBUSY;
default:
LOG_ERR("unknown EDMA error code: %d", edma_err);
return -EINVAL;
}
}
#endif /* ZEPHYR_DRIVERS_DMA_DMA_NXP_EDMA_H_ */

View file

@ -0,0 +1,41 @@
# Copyright 2024 NXP
# SPDX-License-Identifier: Apache-2.0
description: NXP enhanced Direct Memory Access (eDMA) node
compatible: "nxp,edma"
include: [dma-controller.yaml, base.yaml]
properties:
reg:
required: true
valid-channels:
type: array
description: |
Use this property to specify which channel indexes are
to be considered valid. The difference between this
property and "dma-channels" is the fact that this
property allows you to have "gaps" between the channel
indexes. This is useful in cases where you know you're
not going to be using all of the possible channels, thus
leading to a more readable DTS. Of course, this property
and "dma-channels" are mutually exclusive, meaning you
can't specify both properties as this will lead to a
BUILD_ASSERT() failure.
hal-cfg-index:
type: int
description: |
Use this property to specify which HAL configuration
should be used. In the case of some SoCs (e.g: i.MX93),
there can be multiple eDMA variants, each of them having
different configurations (e.g: i.MX93 eDMA3 has 31 channels,
i.MX93 eDMA4 has 64 channels and both of them have slightly
different register layouts). To overcome this issue, the HAL
exposes an array of configurations called "edma_hal_configs".
To perform various operations, the HAL uses an eDMA configuration
which will tell it what register layout the IP has, the number of
channels, various flags and offsets. As such, if there's multiple
configurations available, the user will have to specify which
configuration to use through this property. If missing, the
configuration found at index 0 will be used.