drivers: uart_xmc4xxx: Add async support

Adds async uart for xmc4xxx SoCs.

Signed-off-by: Andriy Gelman <andriy.gelman@gmail.com>
This commit is contained in:
Andriy Gelman 2022-12-15 15:29:26 -05:00 committed by Fabio Baltieri
parent cfeaada65e
commit 30b11260be
3 changed files with 644 additions and 17 deletions

View file

@ -9,6 +9,8 @@ config UART_XMC4XXX
depends on DT_HAS_INFINEON_XMC4XXX_UART_ENABLED
select SERIAL_HAS_DRIVER
select SERIAL_SUPPORT_INTERRUPT
select SERIAL_SUPPORT_ASYNC if DT_HAS_INFINEON_XMC4XXX_DMA_ENABLED
select DMA if UART_ASYNC_API
help
This option enables the XMC4XX UART driver, for UART_0.

View file

@ -8,6 +8,7 @@
#define DT_DRV_COMPAT infineon_xmc4xxx_uart
#include <xmc_uart.h>
#include <zephyr/drivers/dma.h>
#include <zephyr/drivers/pinctrl.h>
#include <zephyr/drivers/uart.h>
#include <zephyr/sys/util.h>
@ -18,11 +19,14 @@
#define USIC_IRQ_MAX 101
#define IRQS_PER_USIC 6
#define CURRENT_BUFFER 0
#define NEXT_BUFFER 1
struct uart_xmc4xxx_config {
XMC_USIC_CH_t *uart;
const struct pinctrl_dev_config *pcfg;
uint8_t input_src;
#if defined(CONFIG_UART_INTERRUPT_DRIVEN)
#if defined(CONFIG_UART_INTERRUPT_DRIVEN) || defined(CONFIG_UART_ASYNC_API)
uart_irq_config_func_t irq_config_func;
uint8_t irq_num_tx;
uint8_t irq_num_rx;
@ -32,14 +36,40 @@ struct uart_xmc4xxx_config {
uint8_t fifo_rx_size;
};
#ifdef CONFIG_UART_ASYNC_API
struct uart_dma_stream {
const struct device *dma_dev;
uint32_t dma_channel;
struct dma_config dma_cfg;
struct dma_block_config blk_cfg;
uint8_t *buffer;
size_t buffer_len;
size_t offset;
size_t counter;
int32_t timeout;
struct k_work_delayable timeout_work;
};
#endif
struct uart_xmc4xxx_data {
XMC_UART_CH_CONFIG_t config;
#if defined(CONFIG_UART_INTERRUPT_DRIVEN)
uart_irq_callback_user_data_t user_cb;
void *user_data;
#endif
#if defined(CONFIG_UART_INTERRUPT_DRIVEN) || defined(CONFIG_UART_ASYNC_API)
uint8_t service_request_tx;
uint8_t service_request_rx;
#endif
#if defined(CONFIG_UART_ASYNC_API)
const struct device *dev;
uart_callback_t async_cb;
void *async_user_data;
struct uart_dma_stream dma_rx;
struct uart_dma_stream dma_tx;
uint8_t *rx_next_buffer;
size_t rx_next_buffer_len;
#endif
};
static int uart_xmc4xxx_poll_in(const struct device *dev, unsigned char *c)
@ -72,16 +102,76 @@ static void uart_xmc4xxx_poll_out(const struct device *dev, unsigned char c)
XMC_UART_CH_Transmit(config->uart, c);
}
#if defined(CONFIG_UART_INTERRUPT_DRIVEN)
#if defined(CONFIG_UART_ASYNC_API)
static inline void async_timer_start(struct k_work_delayable *work, int32_t timeout)
{
if ((timeout != SYS_FOREVER_US) && (timeout != 0)) {
k_work_reschedule(work, K_USEC(timeout));
}
}
static void disable_tx_events(const struct uart_xmc4xxx_config *config)
{
if (config->fifo_tx_size > 0) {
XMC_USIC_CH_TXFIFO_DisableEvent(config->uart,
XMC_USIC_CH_TXFIFO_EVENT_CONF_STANDARD);
} else {
XMC_USIC_CH_DisableEvent(config->uart, XMC_USIC_CH_EVENT_TRANSMIT_BUFFER);
}
}
#endif
#if defined(CONFIG_UART_INTERRUPT_DRIVEN) || defined(CONFIG_UART_ASYNC_API)
static void enable_tx_events(const struct uart_xmc4xxx_config *config)
{
if (config->fifo_tx_size > 0) {
/* wait till the fifo has at least 1 byte free */
while (XMC_USIC_CH_TXFIFO_IsFull(config->uart)) {
}
XMC_USIC_CH_TXFIFO_EnableEvent(config->uart,
XMC_USIC_CH_TXFIFO_EVENT_CONF_STANDARD);
} else {
XMC_USIC_CH_EnableEvent(config->uart, XMC_USIC_CH_EVENT_TRANSMIT_BUFFER);
}
}
#define NVIC_ICPR_BASE 0xe000e280u
static void clear_pending_interrupt(int irq_num)
{
uint32_t *clearpend = (uint32_t *)(NVIC_ICPR_BASE) + irq_num / 32;
irq_num = irq_num & 0x1f;
/* writing zero has not effect, i.e. we only clear irq_num */
*clearpend = BIT(irq_num);
}
static void uart_xmc4xxx_isr(void *arg)
{
const struct device *dev = arg;
struct uart_xmc4xxx_data *data = dev->data;
#if defined(CONFIG_UART_INTERRUPT_DRIVEN)
if (data->user_cb) {
data->user_cb(dev, data->user_data);
}
#endif
#if defined(CONFIG_UART_ASYNC_API)
const struct uart_xmc4xxx_config *config = dev->config;
unsigned int key = irq_lock();
if (data->dma_rx.buffer_len) {
/* We only need to trigger this irq once to start timer */
/* event. Everything else is handled by the timer callback and dma_rx_callback. */
/* Note that we can't simply disable the event that triggers this irq, since the */
/* same service_request gets routed to the dma. Thus we disable the nvic irq */
/* below. Any pending irq must be cleared before irq_enable() is called. */
irq_disable(config->irq_num_rx);
async_timer_start(&data->dma_rx.timeout_work, data->dma_rx.timeout);
}
irq_unlock(key);
#endif
}
static void uart_xmc4xxx_configure_service_requests(const struct device *dev)
@ -154,6 +244,10 @@ static void uart_xmc4xxx_irq_rx_enable(const struct device *dev)
const struct uart_xmc4xxx_config *config = dev->config;
uint32_t recv_status;
/* re-enable the IRQ as it may have been disabled during async_rx */
clear_pending_interrupt(config->irq_num_rx);
irq_enable(config->irq_num_rx);
if (config->fifo_rx_size > 0) {
XMC_USIC_CH_RXFIFO_Flush(config->uart);
XMC_USIC_CH_RXFIFO_SetSizeTriggerLimit(config->uart, config->fifo_rx_size, 0);
@ -177,6 +271,9 @@ static void uart_xmc4xxx_irq_rx_enable(const struct device *dev)
XMC_USIC_CH_EVENT_ALTERNATIVE_RECEIVE);
}
}
#endif
#if defined(CONFIG_UART_INTERRUPT_DRIVEN)
static int uart_xmc4xxx_fifo_fill(const struct device *dev, const uint8_t *tx_data, int len)
{
@ -225,15 +322,11 @@ static void uart_xmc4xxx_irq_tx_enable(const struct device *dev)
const struct uart_xmc4xxx_config *config = dev->config;
const struct uart_xmc4xxx_data *data = dev->data;
if (config->fifo_tx_size > 0) {
/* wait till the fifo has at least 1 byte free */
while (XMC_USIC_CH_TXFIFO_IsFull(config->uart)) {
}
XMC_USIC_CH_TXFIFO_EnableEvent(config->uart,
XMC_USIC_CH_TXFIFO_EVENT_CONF_STANDARD);
} else {
XMC_USIC_CH_EnableEvent(config->uart, XMC_USIC_CH_EVENT_TRANSMIT_BUFFER);
}
clear_pending_interrupt(config->irq_num_tx);
irq_enable(config->irq_num_tx);
enable_tx_events(config);
XMC_USIC_CH_TriggerServiceRequest(config->uart, data->service_request_tx);
}
@ -293,6 +386,464 @@ static int uart_xmc4xxx_irq_is_pending(const struct device *dev)
}
#endif
#if defined(CONFIG_UART_ASYNC_API)
static inline void async_evt_rx_buf_request(struct uart_xmc4xxx_data *data)
{
struct uart_event evt = {.type = UART_RX_BUF_REQUEST};
if (data->async_cb) {
data->async_cb(data->dev, &evt, data->async_user_data);
}
}
static inline void async_evt_rx_release_buffer(struct uart_xmc4xxx_data *data, int buffer_type)
{
struct uart_event event = {.type = UART_RX_BUF_RELEASED};
if (buffer_type == NEXT_BUFFER && !data->rx_next_buffer) {
return;
}
if (buffer_type == CURRENT_BUFFER && !data->dma_rx.buffer) {
return;
}
if (buffer_type == NEXT_BUFFER) {
event.data.rx_buf.buf = data->rx_next_buffer;
data->rx_next_buffer = NULL;
data->rx_next_buffer_len = 0;
} else {
event.data.rx_buf.buf = data->dma_rx.buffer;
data->dma_rx.buffer = NULL;
data->dma_rx.buffer_len = 0;
}
if (data->async_cb) {
data->async_cb(data->dev, &event, data->async_user_data);
}
}
static inline void async_evt_rx_disabled(struct uart_xmc4xxx_data *data)
{
struct uart_event event = {.type = UART_RX_DISABLED};
data->dma_rx.buffer = NULL;
data->dma_rx.buffer_len = 0;
data->dma_rx.offset = 0;
data->dma_rx.counter = 0;
if (data->async_cb) {
data->async_cb(data->dev, &event, data->async_user_data);
}
}
static inline void async_evt_rx_rdy(struct uart_xmc4xxx_data *data)
{
struct uart_event event = {.type = UART_RX_RDY,
.data.rx.buf = (uint8_t *)data->dma_rx.buffer,
.data.rx.len = data->dma_rx.counter - data->dma_rx.offset,
.data.rx.offset = data->dma_rx.offset};
data->dma_rx.offset = data->dma_rx.counter;
if (event.data.rx.len > 0 && data->async_cb) {
data->async_cb(data->dev, &event, data->async_user_data);
}
}
static inline void async_evt_tx_done(struct uart_xmc4xxx_data *data)
{
struct uart_event event = {.type = UART_TX_DONE,
.data.tx.buf = data->dma_tx.buffer,
.data.tx.len = data->dma_tx.counter};
data->dma_tx.buffer = NULL;
data->dma_tx.buffer_len = 0;
data->dma_tx.counter = 0;
if (data->async_cb) {
data->async_cb(data->dev, &event, data->async_user_data);
}
}
static inline void async_evt_tx_abort(struct uart_xmc4xxx_data *data)
{
struct uart_event event = {.type = UART_TX_ABORTED,
.data.tx.buf = data->dma_tx.buffer,
.data.tx.len = data->dma_tx.counter};
data->dma_tx.buffer = NULL;
data->dma_tx.buffer_len = 0;
data->dma_tx.counter = 0;
if (data->async_cb) {
data->async_cb(data->dev, &event, data->async_user_data);
}
}
static void uart_xmc4xxx_async_rx_timeout(struct k_work *work)
{
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
struct uart_dma_stream *rx_stream =
CONTAINER_OF(dwork, struct uart_dma_stream, timeout_work);
struct uart_xmc4xxx_data *data = CONTAINER_OF(rx_stream, struct uart_xmc4xxx_data, dma_rx);
struct dma_status stat;
unsigned int key = irq_lock();
if (dma_get_status(data->dma_rx.dma_dev, data->dma_rx.dma_channel, &stat) == 0) {
size_t rx_rcv_len = data->dma_rx.buffer_len - stat.pending_length;
if (rx_rcv_len > data->dma_rx.offset) {
data->dma_rx.counter = rx_rcv_len;
async_evt_rx_rdy(data);
}
}
irq_unlock(key);
async_timer_start(&data->dma_rx.timeout_work, data->dma_rx.timeout);
}
static int uart_xmc4xxx_async_tx_abort(const struct device *dev)
{
struct uart_xmc4xxx_data *data = dev->data;
struct dma_status stat;
size_t tx_buffer_len;
unsigned int key = irq_lock();
k_work_cancel_delayable(&data->dma_tx.timeout_work);
tx_buffer_len = data->dma_tx.buffer_len;
if (tx_buffer_len == 0) {
irq_unlock(key);
return -EINVAL;
}
if (!dma_get_status(data->dma_tx.dma_dev, data->dma_tx.dma_channel, &stat)) {
data->dma_tx.counter = tx_buffer_len - stat.pending_length;
}
dma_stop(data->dma_tx.dma_dev, data->dma_tx.dma_channel);
disable_tx_events(dev->config);
async_evt_tx_abort(data);
irq_unlock(key);
return 0;
}
static void uart_xmc4xxx_async_tx_timeout(struct k_work *work)
{
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
struct uart_dma_stream *tx_stream =
CONTAINER_OF(dwork, struct uart_dma_stream, timeout_work);
struct uart_xmc4xxx_data *data = CONTAINER_OF(tx_stream, struct uart_xmc4xxx_data, dma_tx);
uart_xmc4xxx_async_tx_abort(data->dev);
}
static int uart_xmc4xxx_async_init(const struct device *dev)
{
const struct uart_xmc4xxx_config *config = dev->config;
struct uart_xmc4xxx_data *data = dev->data;
data->dev = dev;
if (data->dma_rx.dma_dev != NULL) {
if (!device_is_ready(data->dma_rx.dma_dev)) {
return -ENODEV;
}
k_work_init_delayable(&data->dma_rx.timeout_work, uart_xmc4xxx_async_rx_timeout);
if (config->fifo_rx_size > 0) {
data->dma_rx.blk_cfg.source_address = (uint32_t)&config->uart->OUTR;
} else {
data->dma_rx.blk_cfg.source_address = (uint32_t)&config->uart->RBUF;
}
data->dma_rx.blk_cfg.source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
data->dma_rx.blk_cfg.dest_addr_adj = DMA_ADDR_ADJ_INCREMENT;
data->dma_rx.dma_cfg.head_block = &data->dma_rx.blk_cfg;
data->dma_rx.dma_cfg.user_data = (void *)dev;
}
if (data->dma_tx.dma_dev != NULL) {
if (!device_is_ready(data->dma_tx.dma_dev)) {
return -ENODEV;
}
k_work_init_delayable(&data->dma_tx.timeout_work, uart_xmc4xxx_async_tx_timeout);
if (config->fifo_tx_size > 0) {
data->dma_tx.blk_cfg.dest_address = (uint32_t)&config->uart->IN[0];
} else {
data->dma_tx.blk_cfg.dest_address = (uint32_t)&config->uart->TBUF[0];
}
data->dma_tx.blk_cfg.source_addr_adj = DMA_ADDR_ADJ_INCREMENT;
data->dma_tx.blk_cfg.dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
data->dma_tx.dma_cfg.head_block = &data->dma_tx.blk_cfg;
data->dma_tx.dma_cfg.user_data = (void *)dev;
}
return 0;
}
static int uart_xmc4xxx_async_callback_set(const struct device *dev, uart_callback_t callback,
void *user_data)
{
struct uart_xmc4xxx_data *data = dev->data;
data->async_cb = callback;
data->async_user_data = user_data;
return 0;
}
static int uart_xmc4xxx_async_tx(const struct device *dev, const uint8_t *tx_data, size_t buf_size,
int32_t timeout)
{
struct uart_xmc4xxx_data *data = dev->data;
const struct uart_xmc4xxx_config *config = dev->config;
int ret;
/* Assume threads are pre-emptive so this call cannot be interrupted */
/* by uart_xmc4xxx_async_tx_abort */
if (data->dma_tx.dma_dev == NULL) {
return -ENODEV;
}
if (tx_data == NULL || buf_size == 0) {
return -EINVAL;
}
/* No need to lock irq. Isr uart_xmc4xxx_dma_tx_cb() will only trigger if */
/* dma_tx.buffer_len != 0 */
if (data->dma_tx.buffer_len != 0) {
return -EBUSY;
}
data->dma_tx.buffer = (uint8_t *)tx_data;
data->dma_tx.buffer_len = buf_size;
data->dma_tx.timeout = timeout;
/* set source address */
data->dma_tx.blk_cfg.source_address = (uint32_t)data->dma_tx.buffer;
data->dma_tx.blk_cfg.block_size = data->dma_tx.buffer_len;
ret = dma_config(data->dma_tx.dma_dev, data->dma_tx.dma_channel, &data->dma_tx.dma_cfg);
if (ret < 0) {
return ret;
}
/* make sure the tx is not transmitting */
while (!uart_xmc4xxx_irq_tx_ready(dev)) {
};
/* Tx irq is not used in async mode so disable it */
irq_disable(config->irq_num_tx);
enable_tx_events(config);
XMC_USIC_CH_TriggerServiceRequest(config->uart, data->service_request_tx);
async_timer_start(&data->dma_tx.timeout_work, data->dma_tx.timeout);
ret = dma_start(data->dma_tx.dma_dev, data->dma_tx.dma_channel);
if (ret < 0) {
return ret;
}
return ret;
}
static int uart_xmc4xxx_async_rx_enable(const struct device *dev, uint8_t *buf, size_t len,
int32_t timeout)
{
struct uart_xmc4xxx_data *data = dev->data;
int ret;
if (data->dma_rx.dma_dev == NULL) {
return -ENODEV;
}
if (data->dma_rx.buffer_len != 0) {
return -EBUSY;
}
uart_xmc4xxx_irq_rx_disable(dev);
data->dma_rx.buffer = buf;
data->dma_rx.buffer_len = len;
data->dma_rx.timeout = timeout;
data->dma_rx.blk_cfg.dest_address = (uint32_t)data->dma_rx.buffer;
data->dma_rx.blk_cfg.block_size = data->dma_rx.buffer_len;
ret = dma_config(data->dma_rx.dma_dev, data->dma_rx.dma_channel, &data->dma_rx.dma_cfg);
if (ret < 0) {
return ret;
}
/* Request buffers before enabling rx. It's unlikely, but we may not */
/* request a new buffer in time (for example if receive buffer size is one byte). */
async_evt_rx_buf_request(data);
uart_xmc4xxx_irq_rx_enable(dev);
ret = dma_start(data->dma_rx.dma_dev, data->dma_rx.dma_channel);
return ret;
}
static void uart_xmc4xxx_dma_rx_cb(const struct device *dma_dev, void *user_data, uint32_t channel,
int status)
{
const struct device *dev_uart = user_data;
struct uart_xmc4xxx_data *data = dev_uart->data;
unsigned int key;
int ret;
if (status != 0) {
return;
}
__ASSERT_NO_MSG(channel == data->dma_rx.dma_channel);
key = irq_lock();
k_work_cancel_delayable(&data->dma_rx.timeout_work);
if (data->dma_rx.buffer_len == 0) {
goto done;
}
data->dma_rx.counter = data->dma_rx.buffer_len;
async_evt_rx_rdy(data);
async_evt_rx_release_buffer(data, CURRENT_BUFFER);
if (!data->rx_next_buffer) {
dma_stop(data->dma_rx.dma_dev, data->dma_rx.dma_channel);
uart_xmc4xxx_irq_rx_disable(dev_uart);
async_evt_rx_disabled(data);
goto done;
}
data->dma_rx.buffer = data->rx_next_buffer;
data->dma_rx.buffer_len = data->rx_next_buffer_len;
data->dma_rx.offset = 0;
data->dma_rx.counter = 0;
data->rx_next_buffer = NULL;
data->rx_next_buffer_len = 0;
ret = dma_reload(data->dma_rx.dma_dev, data->dma_rx.dma_channel,
data->dma_rx.blk_cfg.source_address, (uint32_t)data->dma_rx.buffer,
data->dma_rx.buffer_len);
if (ret < 0) {
dma_stop(data->dma_rx.dma_dev, data->dma_rx.dma_channel);
uart_xmc4xxx_irq_rx_disable(dev_uart);
async_evt_rx_release_buffer(data, CURRENT_BUFFER);
async_evt_rx_disabled(data);
goto done;
}
dma_start(data->dma_rx.dma_dev, data->dma_rx.dma_channel);
async_evt_rx_buf_request(data);
async_timer_start(&data->dma_rx.timeout_work, data->dma_rx.timeout);
done:
irq_unlock(key);
}
static int uart_xmc4xxx_async_rx_disable(const struct device *dev)
{
struct uart_xmc4xxx_data *data = dev->data;
struct dma_status stat;
unsigned int key;
k_work_cancel_delayable(&data->dma_rx.timeout_work);
key = irq_lock();
if (data->dma_rx.buffer_len == 0) {
__ASSERT_NO_MSG(data->dma_rx.buffer == NULL);
irq_unlock(key);
return -EINVAL;
}
dma_stop(data->dma_rx.dma_dev, data->dma_rx.dma_channel);
uart_xmc4xxx_irq_rx_disable(dev);
if (dma_get_status(data->dma_rx.dma_dev, data->dma_rx.dma_channel, &stat) == 0) {
size_t rx_rcv_len = data->dma_rx.buffer_len - stat.pending_length;
if (rx_rcv_len > data->dma_rx.offset) {
data->dma_rx.counter = rx_rcv_len;
async_evt_rx_rdy(data);
}
}
async_evt_rx_release_buffer(data, CURRENT_BUFFER);
async_evt_rx_release_buffer(data, NEXT_BUFFER);
async_evt_rx_disabled(data);
irq_unlock(key);
return 0;
}
static void uart_xmc4xxx_dma_tx_cb(const struct device *dma_dev, void *user_data, uint32_t channel,
int status)
{
const struct device *dev_uart = user_data;
struct uart_xmc4xxx_data *data = dev_uart->data;
size_t tx_buffer_len = data->dma_tx.buffer_len;
struct dma_status stat;
if (status != 0) {
return;
}
__ASSERT_NO_MSG(channel == data->dma_tx.dma_channel);
k_work_cancel_delayable(&data->dma_tx.timeout_work);
if (tx_buffer_len == 0) {
return;
}
if (!dma_get_status(data->dma_tx.dma_dev, channel, &stat)) {
data->dma_tx.counter = tx_buffer_len - stat.pending_length;
}
async_evt_tx_done(data);
/* if the callback doesn't doesn't do a chained uart_tx write, then stop the dma */
if (data->dma_tx.buffer == NULL) {
dma_stop(data->dma_tx.dma_dev, data->dma_tx.dma_channel);
disable_tx_events(dev_uart->config);
}
}
static int uart_xmc4xxx_rx_buf_rsp(const struct device *dev, uint8_t *buf, size_t len)
{
struct uart_xmc4xxx_data *data = dev->data;
unsigned int key;
int ret = 0;
key = irq_lock();
if (data->dma_rx.buffer_len == 0U) {
ret = -EACCES;
goto done;
}
if (data->rx_next_buffer_len != 0U) {
ret = -EBUSY;
goto done;
}
data->rx_next_buffer = buf;
data->rx_next_buffer_len = len;
done:
irq_unlock(key);
return ret;
}
#endif
static int uart_xmc4xxx_init(const struct device *dev)
{
int ret;
@ -338,12 +889,16 @@ static int uart_xmc4xxx_init(const struct device *dev)
XMC_UART_CH_SetInputSource(config->uart, XMC_UART_CH_INPUT_RXD,
config->input_src);
#if defined(CONFIG_UART_INTERRUPT_DRIVEN)
#if defined(CONFIG_UART_INTERRUPT_DRIVEN) || defined(CONFIG_UART_ASYNC_API)
config->irq_config_func(dev);
uart_xmc4xxx_configure_service_requests(dev);
#endif
return 0;
#if defined(CONFIG_UART_ASYNC_API)
ret = uart_xmc4xxx_async_init(dev);
#endif
return ret;
}
static const struct uart_driver_api uart_xmc4xxx_driver_api = {
@ -361,9 +916,41 @@ static const struct uart_driver_api uart_xmc4xxx_driver_api = {
.irq_callback_set = uart_xmc4xxx_irq_callback_set,
.irq_is_pending = uart_xmc4xxx_irq_is_pending,
#endif
#if defined(CONFIG_UART_ASYNC_API)
.callback_set = uart_xmc4xxx_async_callback_set,
.tx = uart_xmc4xxx_async_tx,
.tx_abort = uart_xmc4xxx_async_tx_abort,
.rx_enable = uart_xmc4xxx_async_rx_enable,
.rx_buf_rsp = uart_xmc4xxx_rx_buf_rsp,
.rx_disable = uart_xmc4xxx_async_rx_disable,
#endif
};
#if defined(CONFIG_UART_INTERRUPT_DRIVEN)
#ifdef CONFIG_UART_ASYNC_API
#define UART_DMA_CHANNEL_INIT(index, dir, ch_dir, src_burst, dst_burst) \
.dma_dev = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(index, dir)), \
.dma_channel = DT_INST_DMAS_CELL_BY_NAME(index, dir, channel), \
.dma_cfg = { \
.dma_slot = DT_INST_DMAS_CELL_BY_NAME(index, dir, config), \
.channel_direction = ch_dir, \
.channel_priority = DT_INST_DMAS_CELL_BY_NAME(index, dir, priority), \
.source_data_size = 1, \
.dest_data_size = 1, \
.source_burst_length = src_burst, \
.dest_burst_length = dst_burst, \
.block_count = 1, \
.dma_callback = uart_xmc4xxx_dma_##dir##_cb, \
},
#define UART_DMA_CHANNEL(index, dir, ch_dir, src_burst, dst_burst) \
.dma_##dir = {COND_CODE_1( \
DT_INST_DMAS_HAS_NAME(index, dir), \
(UART_DMA_CHANNEL_INIT(index, dir, ch_dir, src_burst, dst_burst)), (NULL))},
#else
#define UART_DMA_CHANNEL(index, dir, ch_dir, src_burst, dst_burst)
#endif
#if defined(CONFIG_UART_INTERRUPT_DRIVEN) || defined(CONFIG_UART_ASYNC_API)
#define XMC4XXX_IRQ_HANDLER(index) \
static void uart_xmc4xxx_irq_setup_##index(const struct device *dev) \
{ \
@ -391,7 +978,9 @@ static void uart_xmc4xxx_irq_setup_##index(const struct device *dev)
PINCTRL_DT_INST_DEFINE(index); \
XMC4XXX_IRQ_HANDLER(index) \
static struct uart_xmc4xxx_data xmc4xxx_data_##index = { \
.config.baudrate = DT_INST_PROP(index, current_speed) \
.config.baudrate = DT_INST_PROP(index, current_speed), \
UART_DMA_CHANNEL(index, tx, MEMORY_TO_PERIPHERAL, 8, 1) \
UART_DMA_CHANNEL(index, rx, PERIPHERAL_TO_MEMORY, 1, 8) \
}; \
\
static const struct uart_xmc4xxx_config xmc4xxx_config_##index = { \

View file

@ -46,7 +46,8 @@ properties:
fifo-tx-size:
description: |
Fifo size used for buffering transmit bytes. A value of 0 implies that
the fifo is not used while transmitting.
the fifo is not used while transmitting. transmitting. If the UART is used in async mode
then fifo-tx-size should be set to 0.
required: true
type: int
enum:
@ -61,7 +62,8 @@ properties:
fifo-rx-size:
description: |
Fifo size used for buffering received bytes. A value of 0 implies that
the fifo is not used while receiving.
the fifo is not used while receiving. If the UART is used in async mode
then fifo-rx-size should be set to 0.
required: true
type: int
enum:
@ -80,3 +82,37 @@ properties:
USIC0 = [84, 89]
USIC1 = [90, 95]
USIC2 = [96, 101]
dmas:
description: |
Optional TX & RX dma specifiers used by async UART.
The dmas are referenced in the UART node using the following syntax:
dmas = <&dma1 1 0 XMC4XXX_SET_CONFIG(10,6)>, <&dma1 2 0 XMC4XXX_SET_CONFIG(11,6)>;
where the first entry is for the TX, and the second for RX.
The parameters in the dma entry are: dma device phandle, dma channel, dma priority (0 is
lowest and 7 is highest), and an opaque entry for the dma line routing parameters set
by the macro XMC4XXX_SET_CONFIG(line, request_source). Use the following steps to properly
select parameters line, request_source:
1. Select a dma device and a free dma channel.
1. Select a free dma line. dma0 device can only connect to lines [0, 7] and
dma1 can connect to lines [8, 11].
2. For a given interrupt, calculate the service request (SR) number. Note the following
simple mapping: in USIC0 interrupt 84->SR0, interrupt 85->SR1, ... etc.
In USIC1, intterupt 90->SR0, 91->SR1, etc.
3. Select request_source from Table "DMA Request Source Selection" in XMC4XXX reference
manual.
For example, say we select interrupt 85 on USIC0, dma0, channel 3, priority 4, and line 7.
The interrupt would map to SR1. From Table "DMA Request Source Selection", request_source
would need to be set to 10 and the dts entry would be:
dma = <&dma0 3 4 XMC4XXX_SET_CONFIG(7,10) ... >;
dma-names:
description: |
Required if the dmas property exists. Should be set to "tx" and "rx"
to match the dmas property.
For example
dma-names = "tx", "rx";