uart_mcux_lpuart: Enable Asynchronous UART API.

This PR enables the Ansynchronous UART API for using the MCUX drivers.
It is tested on the RT1062EVKB.

Signed-off-by: Nickolas Lapp <nickolaslapp@gmail.com>
This commit is contained in:
Nickolas Lapp 2022-04-08 16:55:08 -04:00 committed by David Leach
parent c959ae81d6
commit f1b0b458b0
3 changed files with 639 additions and 2 deletions

View file

@ -8,5 +8,6 @@ config UART_MCUX
depends on HAS_MCUX && CLOCK_CONTROL
select SERIAL_HAS_DRIVER
select SERIAL_SUPPORT_INTERRUPT
select SERIAL_SUPPORT_ASYNC
help
Enable the MCUX uart driver.

View file

@ -8,5 +8,7 @@ config UART_MCUX_LPUART
depends on HAS_MCUX_LPUART && CLOCK_CONTROL
select SERIAL_HAS_DRIVER
select SERIAL_SUPPORT_INTERRUPT
select SERIAL_SUPPORT_ASYNC
select DMA if UART_ASYNC_API
help
Enable the MCUX LPUART driver.

View file

@ -17,6 +17,21 @@
#ifdef CONFIG_PINCTRL
#include <drivers/pinctrl.h>
#endif
#ifdef CONFIG_UART_ASYNC_API
#include <drivers/dma.h>
#endif
#include <logging/log.h>
LOG_MODULE_REGISTER(uart_mcux_lpuart, LOG_LEVEL_ERR);
#ifdef CONFIG_UART_ASYNC_API
struct lpuart_dma_config {
const struct device *dma_dev;
const uint32_t dma_channel;
struct dma_config dma_cfg;
};
#endif /* CONFIG_UART_ASYNC_API */
struct mcux_lpuart_config {
LPUART_Type *base;
@ -30,8 +45,43 @@ struct mcux_lpuart_config {
#if defined(CONFIG_UART_INTERRUPT_DRIVEN) || defined(CONFIG_PM)
void (*irq_config_func)(const struct device *dev);
#endif
#ifdef CONFIG_UART_ASYNC_API
const struct lpuart_dma_config rx_dma_config;
const struct lpuart_dma_config tx_dma_config;
void (*async_config_func)(const struct device *dev);
#endif /* CONFIG_UART_ASYNC_API */
};
#ifdef CONFIG_UART_ASYNC_API
struct mcux_lpuart_rx_dma_params {
struct dma_block_config active_dma_block;
uint8_t *buf;
size_t buf_len;
size_t offset;
size_t counter;
struct k_work_delayable timeout_work;
size_t timeout_us;
};
struct mcux_lpuart_tx_dma_params {
struct dma_block_config active_dma_block;
const uint8_t *buf;
size_t buf_len;
struct k_work_delayable timeout_work;
size_t timeout_us;
};
struct mcux_lpuart_async_data {
const struct device *uart_dev;
struct mcux_lpuart_tx_dma_params tx_dma_params;
struct mcux_lpuart_rx_dma_params rx_dma_params;
uint8_t *next_rx_buffer;
size_t next_rx_buffer_len;
uart_callback_t user_callback;
void *user_data;
};
#endif
struct mcux_lpuart_data {
#ifdef CONFIG_UART_INTERRUPT_DRIVEN
uart_irq_callback_user_data_t callback;
@ -42,6 +92,9 @@ struct mcux_lpuart_data {
bool tx_poll_stream_on;
bool tx_int_stream_on;
#endif /* CONFIG_PM */
#ifdef CONFIG_UART_ASYNC_API
struct mcux_lpuart_async_data async;
#endif
struct uart_config uart_config;
};
@ -342,8 +395,483 @@ static void mcux_lpuart_isr(const struct device *dev)
}
#endif /* CONFIG_UART_INTERRUPT_DRIVEN || CONFIG_PM */
static int mcux_lpuart_configure_init(const struct device *dev,
const struct uart_config *cfg)
#ifdef CONFIG_UART_ASYNC_API
static inline void async_timer_start(struct k_work_delayable *work, size_t timeout_us)
{
if ((timeout_us != SYS_FOREVER_US) && (timeout_us != 0)) {
LOG_DBG("async timer started for %d us", timeout_us);
k_work_reschedule(work, K_USEC(timeout_us));
}
}
static void mcux_lpuart_async_isr(const struct device *dev)
{
struct mcux_lpuart_data *data = dev->data;
const struct mcux_lpuart_config *config = dev->config;
LPUART_Type *lpuart = config->base;
const uint32_t status = LPUART_GetStatusFlags(lpuart);
if (kLPUART_IdleLineFlag & status) {
async_timer_start(&data->async.rx_dma_params.timeout_work,
data->async.rx_dma_params.timeout_us);
assert(LPUART_ClearStatusFlags(lpuart, kLPUART_IdleLineFlag) == 0U);
}
}
static void async_user_callback(const struct device *dev, struct uart_event *evt)
{
const struct mcux_lpuart_data *data = dev->data;
if (data->async.user_callback) {
data->async.user_callback(dev, evt, data->async.user_data);
}
}
static void async_evt_tx_done(struct device *dev)
{
struct mcux_lpuart_data *data = dev->data;
(void)k_work_cancel_delayable(&data->async.tx_dma_params.timeout_work);
LOG_DBG("TX done: %d", data->async.tx_dma_params.buf_len);
struct uart_event event = {
.type = UART_TX_DONE,
.data.tx.buf = data->async.tx_dma_params.buf,
.data.tx.len = data->async.tx_dma_params.buf_len
};
/* Reset TX Buffer */
data->async.tx_dma_params.buf = NULL;
data->async.tx_dma_params.buf_len = 0U;
async_user_callback(dev, &event);
}
static void async_evt_rx_rdy(const struct device *dev)
{
struct mcux_lpuart_data *data = dev->data;
struct mcux_lpuart_rx_dma_params *dma_params = &data->async.rx_dma_params;
struct uart_event event = {
.type = UART_RX_RDY,
.data.rx.buf = dma_params->buf,
.data.rx.len = dma_params->counter - dma_params->offset,
.data.rx.offset = dma_params->offset
};
LOG_DBG("RX Ready: (len: %d off: %d buf: %x)", event.data.rx.len, event.data.rx.offset,
(uint32_t)event.data.rx.buf);
/* Update the current pos for new data */
dma_params->offset = dma_params->counter;
/* Only send event for new data */
if (event.data.rx.len > 0) {
async_user_callback(dev, &event);
}
}
static void async_evt_rx_buf_request(const struct device *dev)
{
struct uart_event evt = {
.type = UART_RX_BUF_REQUEST,
};
async_user_callback(dev, &evt);
}
static void async_evt_rx_buf_release(const struct device *dev)
{
struct mcux_lpuart_data *data = (struct mcux_lpuart_data *)dev->data;
struct uart_event evt = {
.type = UART_RX_BUF_RELEASED,
.data.rx_buf.buf = data->async.rx_dma_params.buf,
};
async_user_callback(dev, &evt);
data->async.rx_dma_params.buf = NULL;
data->async.rx_dma_params.buf_len = 0U;
data->async.rx_dma_params.offset = 0U;
data->async.rx_dma_params.counter = 0U;
}
static void mcux_lpuart_async_rx_flush(const struct device *dev)
{
struct dma_status status;
struct mcux_lpuart_data *data = dev->data;
const struct mcux_lpuart_config *config = dev->config;
const int get_status_result = dma_get_status(config->rx_dma_config.dma_dev,
config->rx_dma_config.dma_channel,
&status);
if (get_status_result == 0) {
const size_t rx_rcv_len = data->async.rx_dma_params.buf_len -
status.pending_length;
if (rx_rcv_len > data->async.rx_dma_params.counter) {
data->async.rx_dma_params.counter = rx_rcv_len;
async_evt_rx_rdy(dev);
}
} else {
LOG_ERR("Error getting DMA status");
}
}
static int mcux_lpuart_rx_disable(const struct device *dev)
{
LOG_INF("Disabling UART RX DMA");
const struct mcux_lpuart_config *config = dev->config;
struct mcux_lpuart_data *data = (struct mcux_lpuart_data *)dev->data;
LPUART_Type *lpuart = config->base;
const int key = irq_lock();
LPUART_EnableRx(lpuart, false);
(void)k_work_cancel_delayable(&data->async.rx_dma_params.timeout_work);
LPUART_DisableInterrupts(lpuart, kLPUART_IdleLineInterruptEnable);
LPUART_ClearStatusFlags(lpuart, kLPUART_IdleLineFlag);
LPUART_EnableRxDMA(lpuart, false);
/* No active RX buffer, cannot disable */
if (!data->async.rx_dma_params.buf) {
LOG_ERR("No buffers to release from RX DMA!");
} else {
mcux_lpuart_async_rx_flush(dev);
async_evt_rx_buf_release(dev);
if (data->async.next_rx_buffer != NULL) {
data->async.rx_dma_params.buf = data->async.next_rx_buffer;
data->async.rx_dma_params.buf_len = data->async.next_rx_buffer_len;
data->async.next_rx_buffer = NULL;
data->async.next_rx_buffer_len = 0;
/* Release the next buffer as well */
async_evt_rx_buf_release(dev);
}
}
const int ret = dma_stop(config->rx_dma_config.dma_dev,
config->rx_dma_config.dma_channel);
if (ret != 0) {
LOG_ERR("Error stopping rx DMA. Reason: %x", ret);
}
LOG_DBG("RX: Disabled");
struct uart_event disabled_event = {
.type = UART_RX_DISABLED
};
async_user_callback(dev, &disabled_event);
irq_unlock(key);
return ret;
}
static void prepare_rx_dma_block_config(const struct device *dev)
{
struct mcux_lpuart_data *data = (struct mcux_lpuart_data *)dev->data;
const struct mcux_lpuart_config *config = dev->config;
LPUART_Type *lpuart = config->base;
struct mcux_lpuart_rx_dma_params *rx_dma_params = &data->async.rx_dma_params;
assert(rx_dma_params->buf != NULL);
assert(rx_dma_params->buf_len > 0);
struct dma_block_config *head_block_config = &rx_dma_params->active_dma_block;
head_block_config->dest_address = (uint32_t)rx_dma_params->buf;
head_block_config->source_address = LPUART_GetDataRegisterAddress(lpuart);
head_block_config->block_size = rx_dma_params->buf_len;
head_block_config->dest_scatter_en = true;
}
static int configure_and_start_rx_dma(
const struct mcux_lpuart_config *config, struct mcux_lpuart_data *data,
LPUART_Type *lpuart)
{
LOG_DBG("Configuring and Starting UART RX DMA");
int ret = dma_config(config->rx_dma_config.dma_dev,
config->rx_dma_config.dma_channel,
(struct dma_config *)&config->rx_dma_config.dma_cfg);
if (ret != 0) {
LOG_ERR("Failed to Configure RX DMA: err: %d", ret);
return ret;
}
ret = dma_start(config->rx_dma_config.dma_dev, config->rx_dma_config.dma_channel);
if (ret < 0) {
LOG_ERR("Failed to start DMA(Rx) Ch %d(%d)",
config->rx_dma_config.dma_channel,
ret);
}
LPUART_EnableRxDMA(lpuart, true);
return ret;
}
static int uart_mcux_lpuart_dma_replace_rx_buffer(const struct device *dev)
{
struct mcux_lpuart_data *data = (struct mcux_lpuart_data *)dev->data;
const struct mcux_lpuart_config *config = dev->config;
LPUART_Type *lpuart = config->base;
LOG_DBG("Replacing RX buffer, new length: %d", data->async.next_rx_buffer_len);
/* There must be a buffer to replace this one with */
assert(data->async.next_rx_buffer != NULL);
assert(data->async.next_rx_buffer_len != 0U);
const int success = dma_reload(config->rx_dma_config.dma_dev,
config->rx_dma_config.dma_channel,
LPUART_GetDataRegisterAddress(lpuart),
(uint32_t)data->async.next_rx_buffer,
data->async.next_rx_buffer_len);
if (success != 0) {
LOG_ERR("Error %d reloading DMA with next RX buffer", success);
}
return success;
}
static void dma_callback(const struct device *dma_dev, void *callback_arg, uint32_t channel,
int error_code)
{
struct device *dev = (struct device *)callback_arg;
const struct mcux_lpuart_config *config = dev->config;
LPUART_Type *lpuart = config->base;
struct mcux_lpuart_data *data = (struct mcux_lpuart_data *)dev->data;
LOG_DBG("DMA call back on channel %d", channel);
struct dma_status status;
const int get_status_result = dma_get_status(dma_dev, channel, &status);
if (get_status_result < 0) {
LOG_ERR("error on status get: %d", get_status_result);
} else {
LOG_DBG("DMA Status: b: %d dir: %d len_remain: %d", status.busy, status.dir,
status.pending_length);
}
if (error_code != 0) {
LOG_ERR("Got error : %d", error_code);
}
if (channel == config->tx_dma_config.dma_channel) {
LOG_DBG("TX Channel");
LPUART_EnableTxDMA(lpuart, false);
async_evt_tx_done(dev);
} else if (channel == config->rx_dma_config.dma_channel) {
LOG_DBG("RX Channel");
struct mcux_lpuart_rx_dma_params *rx_dma_params = &data->async.rx_dma_params;
/* The RX Event indicates DMA transfer is complete and full buffer is available. */
rx_dma_params->counter = rx_dma_params->buf_len;
LOG_DBG("Current Buf (%x) full, swapping to new buf: %x",
(uint32_t)rx_dma_params->buf,
(uint32_t)data->async.next_rx_buffer);
async_evt_rx_rdy(dev);
async_evt_rx_buf_release(dev);
rx_dma_params->buf = data->async.next_rx_buffer;
rx_dma_params->buf_len = data->async.next_rx_buffer_len;
data->async.next_rx_buffer = NULL;
data->async.next_rx_buffer_len = 0U;
/* A new buffer was available (and already loaded into the DMA engine) */
if (rx_dma_params->buf != NULL &&
rx_dma_params->buf_len > 0) {
/* Request the next buffer */
async_evt_rx_buf_request(dev);
} else {
/* Buffer full without valid next buffer, disable RX DMA */
LOG_INF("Disabled RX DMA, no valid next buffer ");
mcux_lpuart_rx_disable(dev);
}
} else {
LOG_ERR("Got unexpected DMA Channel: %d", channel);
}
}
static int mcux_lpuart_callback_set(const struct device *dev, uart_callback_t callback,
void *user_data)
{
struct mcux_lpuart_data *data = dev->data;
data->async.user_callback = callback;
data->async.user_data = user_data;
return 0;
}
static int mcux_lpuart_tx(const struct device *dev, const uint8_t *buf, size_t len,
int32_t timeout_us)
{
struct mcux_lpuart_data *data = dev->data;
const struct mcux_lpuart_config *config = dev->config;
LPUART_Type *lpuart = config->base;
int key = irq_lock();
/* Check for an ongiong transfer and abort if it is pending */
struct dma_status status;
const int get_status_result = dma_get_status(config->tx_dma_config.dma_dev,
config->tx_dma_config.dma_channel,
&status);
if (get_status_result < 0 || status.busy) {
irq_unlock(key);
LOG_ERR("Unable to submit UART DMA Transfer.");
return get_status_result < 0 ? get_status_result : -EBUSY;
}
int ret;
LPUART_EnableTxDMA(lpuart, false);
data->async.tx_dma_params.buf = buf;
data->async.tx_dma_params.buf_len = len;
data->async.tx_dma_params.active_dma_block.source_address = (uint32_t)buf;
data->async.tx_dma_params.active_dma_block.dest_address =
LPUART_GetDataRegisterAddress(lpuart);
data->async.tx_dma_params.active_dma_block.block_size = len;
data->async.tx_dma_params.active_dma_block.next_block = NULL;
ret = dma_config(config->tx_dma_config.dma_dev,
config->tx_dma_config.dma_channel,
(struct dma_config *)&config->tx_dma_config.dma_cfg);
if (ret == 0) {
LOG_DBG("Starting UART DMA TX Ch %u", config->tx_dma_config.dma_channel);
ret = dma_start(config->tx_dma_config.dma_dev,
config->tx_dma_config.dma_channel);
LPUART_EnableTxDMA(lpuart, true);
if (ret != 0) {
LOG_ERR("Failed to start DMA(Tx) Ch %d",
config->tx_dma_config.dma_channel);
}
async_timer_start(&data->async.tx_dma_params.timeout_work, timeout_us);
} else {
LOG_ERR("Error configuring UART DMA: %x", ret);
}
irq_unlock(key);
return ret;
}
static int mcux_lpuart_tx_abort(const struct device *dev)
{
struct mcux_lpuart_data *data = dev->data;
const struct mcux_lpuart_config *config = dev->config;
LPUART_Type *lpuart = config->base;
LPUART_EnableTxDMA(lpuart, false);
(void)k_work_cancel_delayable(&data->async.tx_dma_params.timeout_work);
struct dma_status status;
const int get_status_result = dma_get_status(config->tx_dma_config.dma_dev,
config->tx_dma_config.dma_channel,
&status);
if (get_status_result < 0) {
LOG_ERR("Error querying TX DMA Status during abort.");
}
const size_t bytes_transmitted = (get_status_result == 0) ?
data->async.tx_dma_params.buf_len - status.pending_length : 0;
const int ret = dma_stop(config->tx_dma_config.dma_dev, config->tx_dma_config.dma_channel);
if (ret == 0) {
struct uart_event tx_aborted_event = {
.type = UART_TX_ABORTED,
.data.tx.buf = data->async.tx_dma_params.buf,
.data.tx.len = bytes_transmitted
};
async_user_callback(dev, &tx_aborted_event);
}
return ret;
}
static int mcux_lpuart_rx_enable(const struct device *dev, uint8_t *buf, const size_t len,
const int32_t timeout_us)
{
LOG_DBG("Enabling UART RX DMA");
struct mcux_lpuart_data *data = dev->data;
const struct mcux_lpuart_config *config = dev->config;
LPUART_Type *lpuart = config->base;
struct mcux_lpuart_rx_dma_params *rx_dma_params = &data->async.rx_dma_params;
int key = irq_lock();
struct dma_status status;
const int get_status_result = dma_get_status(config->rx_dma_config.dma_dev,
config->rx_dma_config.dma_channel,
&status);
if (get_status_result < 0 || status.busy) {
LOG_ERR("Unable to start receive on UART.");
irq_unlock(key);
return get_status_result < 0 ? get_status_result : -EBUSY;
}
rx_dma_params->timeout_us = timeout_us;
rx_dma_params->buf = buf;
rx_dma_params->buf_len = len;
LPUART_EnableInterrupts(config->base, kLPUART_IdleLineInterruptEnable);
prepare_rx_dma_block_config(dev);
const int ret = configure_and_start_rx_dma(config, data, lpuart);
/* Request the next buffer for when this buffer is full for continuous reception */
async_evt_rx_buf_request(dev);
/* Clear these status flags as they can prevent the UART device from receiving data */
LPUART_ClearStatusFlags(config->base, kLPUART_RxOverrunFlag |
kLPUART_ParityErrorFlag |
kLPUART_FramingErrorFlag);
LPUART_EnableRx(lpuart, true);
irq_unlock(key);
return ret;
}
static int mcux_lpuart_rx_buf_rsp(const struct device *dev, uint8_t *buf, size_t len)
{
struct mcux_lpuart_data *data = dev->data;
assert(data->async.next_rx_buffer == NULL);
assert(data->async.next_rx_buffer_len == 0);
data->async.next_rx_buffer = buf;
data->async.next_rx_buffer_len = len;
uart_mcux_lpuart_dma_replace_rx_buffer(dev);
return 0;
}
static void mcux_lpuart_async_rx_timeout(struct k_work *work)
{
struct mcux_lpuart_rx_dma_params *rx_params = CONTAINER_OF(work,
struct mcux_lpuart_rx_dma_params,
timeout_work);
struct mcux_lpuart_async_data *async_data = CONTAINER_OF(rx_params,
struct mcux_lpuart_async_data,
rx_dma_params);
const struct device *dev = async_data->uart_dev;
LOG_DBG("RX timeout");
mcux_lpuart_async_rx_flush(dev);
}
static void mcux_lpuart_async_tx_timeout(struct k_work *work)
{
struct mcux_lpuart_tx_dma_params *tx_params = CONTAINER_OF(work,
struct mcux_lpuart_tx_dma_params,
timeout_work);
struct mcux_lpuart_async_data *async_data = CONTAINER_OF(tx_params,
struct mcux_lpuart_async_data,
tx_dma_params);
const struct device *dev = async_data->uart_dev;
LOG_DBG("TX timeout");
(void)mcux_lpuart_tx_abort(dev);
}
#endif /* CONFIG_UART_ASYNC_API */
static int mcux_lpuart_configure_init(const struct device *dev, const struct uart_config *cfg)
{
const struct mcux_lpuart_config *config = dev->config;
struct mcux_lpuart_data *data = dev->data;
@ -415,10 +943,29 @@ static int mcux_lpuart_configure_init(const struct device *dev,
return -ENOTSUP;
}
#endif
uart_config.baudRate_Bps = cfg->baudrate;
uart_config.enableTx = true;
uart_config.enableRx = true;
#ifdef CONFIG_UART_ASYNC_API
uart_config.rxIdleType = kLPUART_IdleTypeStopBit;
uart_config.rxIdleConfig = kLPUART_IdleCharacter1;
data->async.next_rx_buffer = NULL;
data->async.next_rx_buffer_len = 0;
data->async.uart_dev = dev;
k_work_init_delayable(&data->async.rx_dma_params.timeout_work,
mcux_lpuart_async_rx_timeout);
k_work_init_delayable(&data->async.tx_dma_params.timeout_work,
mcux_lpuart_async_tx_timeout);
/* Disable the UART Receiver until the async API provides a buffer to
* to receive into with rx_enable
*/
uart_config.enableRx = false;
#endif /* CONFIG_UART_ASYNC_API */
LPUART_Init(config->base, &uart_config, clock_freq);
/* update internal uart_config */
@ -488,6 +1035,9 @@ static int mcux_lpuart_init(const struct device *dev)
data->tx_poll_stream_on = false;
data->tx_int_stream_on = false;
#endif
#ifdef CONFIG_UART_ASYNC_API
config->async_config_func(dev);
#endif
return 0;
}
@ -516,6 +1066,14 @@ static const struct uart_driver_api mcux_lpuart_driver_api = {
.irq_update = mcux_lpuart_irq_update,
.irq_callback_set = mcux_lpuart_irq_callback_set,
#endif
#ifdef CONFIG_UART_ASYNC_API
.callback_set = mcux_lpuart_callback_set,
.tx = mcux_lpuart_tx,
.tx_abort = mcux_lpuart_tx_abort,
.rx_enable = mcux_lpuart_rx_enable,
.rx_buf_rsp = mcux_lpuart_rx_buf_rsp,
.rx_disable = mcux_lpuart_rx_disable,
#endif /* CONFIG_UART_ASYNC_API */
};
@ -542,6 +1100,78 @@ static const struct uart_driver_api mcux_lpuart_driver_api = {
#define MCUX_LPUART_IRQ_DEFINE(n)
#endif /* CONFIG_UART_INTERRUPT_DRIVEN */
#ifdef CONFIG_UART_ASYNC_API
#define TX_DMA_CONFIG(id) \
.tx_dma_config = { \
.dma_dev = \
DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(id, tx)), \
.dma_channel = \
DT_INST_DMAS_CELL_BY_NAME(id, tx, mux), \
.dma_cfg = { \
.source_burst_length = 1, \
.dest_burst_length = 1, \
.source_data_size = 1, \
.dest_data_size = 1, \
.complete_callback_en = 1, \
.error_callback_en = 1, \
.block_count = 1, \
.head_block = \
&mcux_lpuart_##id##_data.async.tx_dma_params.active_dma_block, \
.channel_direction = MEMORY_TO_PERIPHERAL, \
.dma_slot = DT_INST_DMAS_CELL_BY_NAME( \
id, tx, source), \
.dma_callback = dma_callback, \
.user_data = (void *)DEVICE_DT_INST_GET(id) \
}, \
},
#define RX_DMA_CONFIG(id) \
.rx_dma_config = { \
.dma_dev = \
DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(id, rx)), \
.dma_channel = \
DT_INST_DMAS_CELL_BY_NAME(id, rx, mux), \
.dma_cfg = { \
.source_burst_length = 1, \
.dest_burst_length = 1, \
.source_data_size = 1, \
.dest_data_size = 1, \
.complete_callback_en = 1, \
.error_callback_en = 1, \
.block_count = 1, \
.head_block = \
&mcux_lpuart_##id##_data.async.rx_dma_params.active_dma_block, \
.channel_direction = PERIPHERAL_TO_MEMORY, \
.dma_slot = DT_INST_DMAS_CELL_BY_NAME( \
id, rx, source), \
.dma_callback = dma_callback, \
.user_data = (void *)DEVICE_DT_INST_GET(id) \
}, \
},
#define MCUX_LPUART_ASYNC_IRQ_INSTALL(n, i) \
do { \
IRQ_CONNECT(DT_INST_IRQ_BY_IDX(n, i, irq), \
DT_INST_IRQ_BY_IDX(n, i, priority), \
mcux_lpuart_async_isr, DEVICE_DT_INST_GET(n), 0); \
\
irq_enable(DT_INST_IRQ_BY_IDX(n, i, irq)); \
} while (0)
#define MCUX_LPUART_ASYNC_IRQ_DEFINE(n) \
static void mcux_lpuart_async_config_func_##n(const struct device *dev) \
{ \
MCUX_LPUART_ASYNC_IRQ_INSTALL(n, 0); \
\
IF_ENABLED(DT_INST_IRQ_HAS_IDX(n, 1), \
(MCUX_LPUART_ASYNC_IRQ_INSTALL(n, 1);)) \
}
#define MCUX_LPUART_ASYNC_IRQ_INIT(n) \
.async_config_func = mcux_lpuart_async_config_func_##n,
#else
#define RX_DMA_CONFIG(n)
#define TX_DMA_CONFIG(n)
#define MCUX_LPUART_ASYNC_IRQ_INIT(n)
#define MCUX_LPUART_ASYNC_IRQ_DEFINE(n)
#endif /* CONFIG_UART_ASYNC_API */
#if CONFIG_PINCTRL
#define PINCTRL_DEFINE(n) PINCTRL_DT_INST_DEFINE(n);
@ -561,6 +1191,9 @@ static const struct mcux_lpuart_config mcux_lpuart_##n##_config = { \
UART_CFG_FLOW_CTRL_RTS_CTS : UART_CFG_FLOW_CTRL_NONE, \
PINCTRL_INIT(n) \
MCUX_LPUART_IRQ_INIT(n) \
MCUX_LPUART_ASYNC_IRQ_INIT(n) \
RX_DMA_CONFIG(n) \
TX_DMA_CONFIG(n) \
};
#define LPUART_MCUX_INIT(n) \
@ -569,6 +1202,7 @@ static const struct mcux_lpuart_config mcux_lpuart_##n##_config = { \
\
PINCTRL_DEFINE(n) \
MCUX_LPUART_IRQ_DEFINE(n) \
MCUX_LPUART_ASYNC_IRQ_DEFINE(n) \
\
LPUART_MCUX_DECLARE_CFG(n) \
\