2017-11-18 11:59:08 +01:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2017 Google LLC.
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
|
|
|
|
2020-03-24 21:41:31 +01:00
|
|
|
#define DT_DRV_COMPAT atmel_sam0_uart
|
|
|
|
|
2017-11-18 11:59:08 +01:00
|
|
|
#include <device.h>
|
|
|
|
#include <errno.h>
|
|
|
|
#include <init.h>
|
2019-06-26 16:33:39 +02:00
|
|
|
#include <sys/__assert.h>
|
2017-11-18 11:59:08 +01:00
|
|
|
#include <soc.h>
|
2019-06-25 21:54:01 +02:00
|
|
|
#include <drivers/uart.h>
|
2019-06-25 21:53:48 +02:00
|
|
|
#include <drivers/dma.h>
|
2020-04-16 14:23:40 +02:00
|
|
|
#include <string.h>
|
2017-11-18 11:59:08 +01:00
|
|
|
|
2019-03-26 16:33:43 +01:00
|
|
|
#ifndef SERCOM_USART_CTRLA_MODE_USART_INT_CLK
|
|
|
|
#define SERCOM_USART_CTRLA_MODE_USART_INT_CLK SERCOM_USART_CTRLA_MODE(0x1)
|
|
|
|
#endif
|
|
|
|
|
2017-11-18 11:59:08 +01:00
|
|
|
/* Device constant configuration parameters */
|
|
|
|
struct uart_sam0_dev_cfg {
|
|
|
|
SercomUsart *regs;
|
2020-05-27 18:26:57 +02:00
|
|
|
uint32_t baudrate;
|
|
|
|
uint32_t pads;
|
2019-03-26 16:33:43 +01:00
|
|
|
#ifdef MCLK
|
|
|
|
volatile uint32_t *mclk;
|
2020-05-27 18:26:57 +02:00
|
|
|
uint32_t mclk_mask;
|
|
|
|
uint16_t gclk_core_id;
|
2019-03-26 16:33:43 +01:00
|
|
|
#else
|
2020-05-27 18:26:57 +02:00
|
|
|
uint32_t pm_apbcmask;
|
|
|
|
uint16_t gclk_clkctrl_id;
|
2019-03-26 16:33:43 +01:00
|
|
|
#endif
|
2019-03-24 15:23:32 +01:00
|
|
|
#if CONFIG_UART_INTERRUPT_DRIVEN || CONFIG_UART_ASYNC_API
|
2020-04-30 20:33:38 +02:00
|
|
|
void (*irq_config_func)(const struct device *dev);
|
2017-11-18 11:59:08 +01:00
|
|
|
#endif
|
2019-03-24 15:23:32 +01:00
|
|
|
#if CONFIG_UART_ASYNC_API
|
2020-05-05 12:24:57 +02:00
|
|
|
char *dma_dev;
|
2020-05-27 18:26:57 +02:00
|
|
|
uint8_t tx_dma_request;
|
|
|
|
uint8_t tx_dma_channel;
|
|
|
|
uint8_t rx_dma_request;
|
|
|
|
uint8_t rx_dma_channel;
|
2019-03-24 15:23:32 +01:00
|
|
|
#endif
|
2017-11-18 11:59:08 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Device run time data */
|
|
|
|
struct uart_sam0_dev_data {
|
2020-04-16 14:23:40 +02:00
|
|
|
struct uart_config config_cache;
|
2017-11-18 11:59:08 +01:00
|
|
|
#ifdef CONFIG_UART_INTERRUPT_DRIVEN
|
2018-07-16 20:12:26 +02:00
|
|
|
uart_irq_callback_user_data_t cb;
|
|
|
|
void *cb_data;
|
2017-11-18 11:59:08 +01:00
|
|
|
#endif
|
2019-03-24 15:23:32 +01:00
|
|
|
#if CONFIG_UART_ASYNC_API
|
2020-04-30 20:33:38 +02:00
|
|
|
const struct device *dev;
|
2019-03-24 15:23:32 +01:00
|
|
|
const struct uart_sam0_dev_cfg *cfg;
|
2020-04-30 20:33:38 +02:00
|
|
|
const struct device *dma;
|
2019-03-24 15:23:32 +01:00
|
|
|
|
|
|
|
uart_callback_t async_cb;
|
|
|
|
void *async_cb_data;
|
|
|
|
|
|
|
|
struct k_delayed_work tx_timeout_work;
|
2020-05-27 18:26:57 +02:00
|
|
|
const uint8_t *tx_buf;
|
2019-03-24 15:23:32 +01:00
|
|
|
size_t tx_len;
|
|
|
|
|
|
|
|
struct k_delayed_work rx_timeout_work;
|
|
|
|
size_t rx_timeout_time;
|
|
|
|
size_t rx_timeout_chunk;
|
2020-05-27 18:26:57 +02:00
|
|
|
uint32_t rx_timeout_start;
|
|
|
|
uint8_t *rx_buf;
|
2019-03-24 15:23:32 +01:00
|
|
|
size_t rx_len;
|
|
|
|
size_t rx_processed_len;
|
2020-05-27 18:26:57 +02:00
|
|
|
uint8_t *rx_next_buf;
|
2019-03-24 15:23:32 +01:00
|
|
|
size_t rx_next_len;
|
|
|
|
bool rx_waiting_for_irq;
|
|
|
|
bool rx_timeout_from_isr;
|
|
|
|
#endif
|
2017-11-18 11:59:08 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
#define DEV_CFG(dev) \
|
2020-05-28 20:44:16 +02:00
|
|
|
((const struct uart_sam0_dev_cfg *const)(dev)->config)
|
2020-05-28 21:23:02 +02:00
|
|
|
#define DEV_DATA(dev) ((struct uart_sam0_dev_data * const)(dev)->data)
|
2017-11-18 11:59:08 +01:00
|
|
|
|
|
|
|
static void wait_synchronization(SercomUsart *const usart)
|
|
|
|
{
|
2018-01-22 18:58:11 +01:00
|
|
|
#if defined(SERCOM_USART_SYNCBUSY_MASK)
|
|
|
|
/* SYNCBUSY is a register */
|
2017-11-18 11:59:08 +01:00
|
|
|
while ((usart->SYNCBUSY.reg & SERCOM_USART_SYNCBUSY_MASK) != 0) {
|
|
|
|
}
|
2018-01-22 18:58:11 +01:00
|
|
|
#elif defined(SERCOM_USART_STATUS_SYNCBUSY)
|
|
|
|
/* SYNCBUSY is a bit */
|
|
|
|
while ((usart->STATUS.reg & SERCOM_USART_STATUS_SYNCBUSY) != 0) {
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
#error Unsupported device
|
|
|
|
#endif
|
2017-11-18 11:59:08 +01:00
|
|
|
}
|
|
|
|
|
2020-05-27 18:26:57 +02:00
|
|
|
static int uart_sam0_set_baudrate(SercomUsart *const usart, uint32_t baudrate,
|
|
|
|
uint32_t clk_freq_hz)
|
2017-11-18 11:59:08 +01:00
|
|
|
{
|
2020-05-27 18:26:57 +02:00
|
|
|
uint64_t tmp;
|
|
|
|
uint16_t baud;
|
2017-11-18 11:59:08 +01:00
|
|
|
|
2020-05-27 18:26:57 +02:00
|
|
|
tmp = (uint64_t)baudrate << 20;
|
2017-11-18 11:59:08 +01:00
|
|
|
tmp = (tmp + (clk_freq_hz >> 1)) / clk_freq_hz;
|
|
|
|
|
|
|
|
/* Verify that the calculated result is within range */
|
|
|
|
if (tmp < 1 || tmp > UINT16_MAX) {
|
|
|
|
return -ERANGE;
|
|
|
|
}
|
|
|
|
|
2020-05-27 18:26:57 +02:00
|
|
|
baud = 65536 - (uint16_t)tmp;
|
2017-11-18 11:59:08 +01:00
|
|
|
usart->BAUD.reg = baud;
|
|
|
|
wait_synchronization(usart);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-04-16 14:23:40 +02:00
|
|
|
|
2019-03-24 15:23:32 +01:00
|
|
|
#if CONFIG_UART_ASYNC_API
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static void uart_sam0_dma_tx_done(const struct device *dma_dev, void *arg,
|
2020-07-08 21:19:29 +02:00
|
|
|
uint32_t id, int error_code)
|
2019-03-24 15:23:32 +01:00
|
|
|
{
|
2020-07-08 21:19:29 +02:00
|
|
|
ARG_UNUSED(dma_dev);
|
2019-03-24 15:23:32 +01:00
|
|
|
ARG_UNUSED(id);
|
|
|
|
ARG_UNUSED(error_code);
|
|
|
|
|
2020-07-08 13:37:36 +02:00
|
|
|
struct uart_sam0_dev_data *const dev_data =
|
|
|
|
(struct uart_sam0_dev_data *const) arg;
|
|
|
|
const struct device *dev = dev_data->dev;
|
2019-03-24 15:23:32 +01:00
|
|
|
|
|
|
|
k_delayed_work_cancel(&dev_data->tx_timeout_work);
|
|
|
|
|
|
|
|
int key = irq_lock();
|
|
|
|
|
|
|
|
struct uart_event evt = {
|
|
|
|
.type = UART_TX_DONE,
|
|
|
|
.data.tx = {
|
|
|
|
.buf = dev_data->tx_buf,
|
|
|
|
.len = dev_data->tx_len,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
dev_data->tx_buf = NULL;
|
|
|
|
dev_data->tx_len = 0U;
|
|
|
|
|
|
|
|
if (evt.data.tx.len != 0U && dev_data->async_cb) {
|
2020-06-24 14:28:05 +02:00
|
|
|
dev_data->async_cb(dev, &evt, dev_data->async_cb_data);
|
2019-03-24 15:23:32 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
irq_unlock(key);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int uart_sam0_tx_halt(struct uart_sam0_dev_data *dev_data)
|
|
|
|
{
|
|
|
|
const struct uart_sam0_dev_cfg *const cfg = dev_data->cfg;
|
|
|
|
int key = irq_lock();
|
|
|
|
size_t tx_active = dev_data->tx_len;
|
|
|
|
struct dma_status st;
|
|
|
|
|
|
|
|
struct uart_event evt = {
|
|
|
|
.type = UART_TX_ABORTED,
|
|
|
|
.data.tx = {
|
|
|
|
.buf = dev_data->tx_buf,
|
|
|
|
.len = 0U,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
dev_data->tx_buf = NULL;
|
|
|
|
dev_data->tx_len = 0U;
|
|
|
|
|
|
|
|
dma_stop(dev_data->dma, cfg->tx_dma_channel);
|
|
|
|
|
|
|
|
irq_unlock(key);
|
|
|
|
|
|
|
|
if (dma_get_status(dev_data->dma, cfg->tx_dma_channel, &st) == 0) {
|
|
|
|
evt.data.tx.len = tx_active - st.pending_length;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (tx_active) {
|
|
|
|
if (dev_data->async_cb) {
|
2020-06-24 14:28:05 +02:00
|
|
|
dev_data->async_cb(dev_data->dev,
|
|
|
|
&evt, dev_data->async_cb_data);
|
2019-03-24 15:23:32 +01:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void uart_sam0_tx_timeout(struct k_work *work)
|
|
|
|
{
|
|
|
|
struct uart_sam0_dev_data *dev_data = CONTAINER_OF(work,
|
|
|
|
struct uart_sam0_dev_data, tx_timeout_work);
|
|
|
|
|
|
|
|
uart_sam0_tx_halt(dev_data);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void uart_sam0_notify_rx_processed(struct uart_sam0_dev_data *dev_data,
|
|
|
|
size_t processed)
|
|
|
|
{
|
|
|
|
if (!dev_data->async_cb) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dev_data->rx_processed_len == processed) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct uart_event evt = {
|
|
|
|
.type = UART_RX_RDY,
|
|
|
|
.data.rx = {
|
|
|
|
.buf = dev_data->rx_buf,
|
|
|
|
.offset = dev_data->rx_processed_len,
|
|
|
|
.len = processed - dev_data->rx_processed_len,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
dev_data->rx_processed_len = processed;
|
|
|
|
|
2020-06-24 14:28:05 +02:00
|
|
|
dev_data->async_cb(dev_data->dev,
|
|
|
|
&evt, dev_data->async_cb_data);
|
2019-03-24 15:23:32 +01:00
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static void uart_sam0_dma_rx_done(const struct device *dma_dev, void *arg,
|
2020-07-08 21:19:29 +02:00
|
|
|
uint32_t id, int error_code)
|
2019-03-24 15:23:32 +01:00
|
|
|
{
|
2020-07-08 21:19:29 +02:00
|
|
|
ARG_UNUSED(dma_dev);
|
2019-03-24 15:23:32 +01:00
|
|
|
ARG_UNUSED(id);
|
|
|
|
ARG_UNUSED(error_code);
|
|
|
|
|
2020-07-08 13:37:36 +02:00
|
|
|
struct uart_sam0_dev_data *const dev_data =
|
|
|
|
(struct uart_sam0_dev_data *const)arg;
|
|
|
|
const struct device *dev = dev_data->dev;
|
2019-03-24 15:23:32 +01:00
|
|
|
const struct uart_sam0_dev_cfg *const cfg = dev_data->cfg;
|
|
|
|
SercomUsart * const regs = cfg->regs;
|
|
|
|
int key = irq_lock();
|
|
|
|
|
|
|
|
if (dev_data->rx_len == 0U) {
|
|
|
|
irq_unlock(key);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
uart_sam0_notify_rx_processed(dev_data, dev_data->rx_len);
|
|
|
|
|
|
|
|
if (dev_data->async_cb) {
|
|
|
|
struct uart_event evt = {
|
|
|
|
.type = UART_RX_BUF_RELEASED,
|
|
|
|
.data.rx_buf = {
|
|
|
|
.buf = dev_data->rx_buf,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2020-06-24 14:28:05 +02:00
|
|
|
dev_data->async_cb(dev, &evt, dev_data->async_cb_data);
|
2019-03-24 15:23:32 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* No next buffer, so end the transfer */
|
|
|
|
if (!dev_data->rx_next_len) {
|
|
|
|
dev_data->rx_buf = NULL;
|
|
|
|
dev_data->rx_len = 0U;
|
|
|
|
|
|
|
|
if (dev_data->async_cb) {
|
|
|
|
struct uart_event evt = {
|
|
|
|
.type = UART_RX_DISABLED,
|
|
|
|
};
|
|
|
|
|
2020-06-24 14:28:05 +02:00
|
|
|
dev_data->async_cb(dev, &evt, dev_data->async_cb_data);
|
2019-03-24 15:23:32 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
irq_unlock(key);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev_data->rx_buf = dev_data->rx_next_buf;
|
|
|
|
dev_data->rx_len = dev_data->rx_next_len;
|
|
|
|
dev_data->rx_next_buf = NULL;
|
|
|
|
dev_data->rx_next_len = 0U;
|
|
|
|
dev_data->rx_processed_len = 0U;
|
|
|
|
|
|
|
|
dma_reload(dev_data->dma, cfg->rx_dma_channel,
|
2020-05-27 18:26:57 +02:00
|
|
|
(uint32_t)(&(regs->DATA.reg)),
|
|
|
|
(uint32_t)dev_data->rx_buf, dev_data->rx_len);
|
2019-03-24 15:23:32 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If there should be a timeout, handle starting the DMA in the
|
|
|
|
* ISR, since reception resets it and DMA completion implies
|
|
|
|
* reception. This also catches the case of DMA completion during
|
|
|
|
* timeout handling.
|
|
|
|
*/
|
2020-05-07 21:51:28 +02:00
|
|
|
if (dev_data->rx_timeout_time != SYS_FOREVER_MS) {
|
2019-03-24 15:23:32 +01:00
|
|
|
dev_data->rx_waiting_for_irq = true;
|
|
|
|
regs->INTENSET.reg = SERCOM_USART_INTENSET_RXC;
|
|
|
|
irq_unlock(key);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Otherwise, start the transfer immediately. */
|
|
|
|
dma_start(dev_data->dma, cfg->rx_dma_channel);
|
|
|
|
|
|
|
|
struct uart_event evt = {
|
|
|
|
.type = UART_RX_BUF_REQUEST,
|
|
|
|
};
|
|
|
|
|
2020-06-24 14:28:05 +02:00
|
|
|
dev_data->async_cb(dev, &evt, dev_data->async_cb_data);
|
2019-03-24 15:23:32 +01:00
|
|
|
|
|
|
|
irq_unlock(key);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void uart_sam0_rx_timeout(struct k_work *work)
|
|
|
|
{
|
|
|
|
struct uart_sam0_dev_data *dev_data = CONTAINER_OF(work,
|
|
|
|
struct uart_sam0_dev_data, rx_timeout_work);
|
|
|
|
const struct uart_sam0_dev_cfg *const cfg = dev_data->cfg;
|
|
|
|
SercomUsart * const regs = cfg->regs;
|
|
|
|
struct dma_status st;
|
|
|
|
int key = irq_lock();
|
|
|
|
|
|
|
|
if (dev_data->rx_len == 0U) {
|
|
|
|
irq_unlock(key);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Stop the DMA transfer and restart the interrupt read
|
|
|
|
* component (so the timeout restarts if there's still data).
|
|
|
|
* However, just ignore it if the transfer has completed (nothing
|
|
|
|
* pending) that means the DMA ISR is already pending, so just let
|
|
|
|
* it handle things instead when we re-enable IRQs.
|
|
|
|
*/
|
|
|
|
dma_stop(dev_data->dma, cfg->rx_dma_channel);
|
|
|
|
if (dma_get_status(dev_data->dma, cfg->rx_dma_channel,
|
|
|
|
&st) == 0 && st.pending_length == 0U) {
|
|
|
|
irq_unlock(key);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-05-27 18:26:57 +02:00
|
|
|
uint8_t *rx_dma_start = dev_data->rx_buf + dev_data->rx_len -
|
2019-03-24 15:23:32 +01:00
|
|
|
st.pending_length;
|
|
|
|
size_t rx_processed = rx_dma_start - dev_data->rx_buf;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We know we still have space, since the above will catch the
|
|
|
|
* empty buffer, so always restart the transfer.
|
|
|
|
*/
|
|
|
|
dma_reload(dev_data->dma, cfg->rx_dma_channel,
|
2020-05-27 18:26:57 +02:00
|
|
|
(uint32_t)(&(regs->DATA.reg)),
|
|
|
|
(uint32_t)rx_dma_start,
|
2019-03-24 15:23:32 +01:00
|
|
|
dev_data->rx_len - rx_processed);
|
|
|
|
|
|
|
|
dev_data->rx_waiting_for_irq = true;
|
|
|
|
regs->INTENSET.reg = SERCOM_USART_INTENSET_RXC;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Never do a notify on a timeout started from the ISR: timing
|
|
|
|
* granularity means the first timeout can be in the middle
|
|
|
|
* of reception but still have the total elapsed time exhausted.
|
|
|
|
* So we require a timeout chunk with no data seen at all
|
|
|
|
* (i.e. no ISR entry).
|
|
|
|
*/
|
|
|
|
if (dev_data->rx_timeout_from_isr) {
|
|
|
|
dev_data->rx_timeout_from_isr = false;
|
|
|
|
k_delayed_work_submit(&dev_data->rx_timeout_work,
|
2020-05-01 11:58:15 +02:00
|
|
|
K_MSEC(dev_data->rx_timeout_chunk));
|
2019-03-24 15:23:32 +01:00
|
|
|
irq_unlock(key);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-05-27 18:26:57 +02:00
|
|
|
uint32_t now = k_uptime_get_32();
|
|
|
|
uint32_t elapsed = now - dev_data->rx_timeout_start;
|
2019-03-24 15:23:32 +01:00
|
|
|
|
|
|
|
if (elapsed >= dev_data->rx_timeout_time) {
|
|
|
|
/*
|
|
|
|
* No time left, so call the handler, and let the ISR
|
|
|
|
* restart the timeout when it sees data.
|
|
|
|
*/
|
|
|
|
uart_sam0_notify_rx_processed(dev_data, rx_processed);
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Still have time left, so start another timeout.
|
|
|
|
*/
|
2020-05-27 18:26:57 +02:00
|
|
|
uint32_t remaining = MIN(dev_data->rx_timeout_time - elapsed,
|
2019-03-24 15:23:32 +01:00
|
|
|
dev_data->rx_timeout_chunk);
|
|
|
|
|
2020-05-01 11:58:15 +02:00
|
|
|
k_delayed_work_submit(&dev_data->rx_timeout_work,
|
|
|
|
K_MSEC(remaining));
|
2019-03-24 15:23:32 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
irq_unlock(key);
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uart_sam0_configure(const struct device *dev,
|
2020-04-16 14:23:40 +02:00
|
|
|
const struct uart_config *new_cfg)
|
|
|
|
{
|
|
|
|
int retval;
|
|
|
|
|
|
|
|
const struct uart_sam0_dev_cfg *const cfg = DEV_CFG(dev);
|
|
|
|
struct uart_sam0_dev_data *const dev_data = DEV_DATA(dev);
|
|
|
|
SercomUsart * const usart = cfg->regs;
|
|
|
|
|
|
|
|
wait_synchronization(usart);
|
|
|
|
|
|
|
|
usart->CTRLA.bit.ENABLE = 0;
|
|
|
|
wait_synchronization(usart);
|
|
|
|
|
|
|
|
if (new_cfg->flow_ctrl != UART_CFG_FLOW_CTRL_NONE) {
|
|
|
|
/* Flow control not yet supported though in principle possible
|
|
|
|
* on this soc family.
|
|
|
|
*/
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev_data->config_cache.flow_ctrl = new_cfg->flow_ctrl;
|
|
|
|
|
|
|
|
SERCOM_USART_CTRLA_Type CTRLA_temp = usart->CTRLA;
|
|
|
|
SERCOM_USART_CTRLB_Type CTRLB_temp = usart->CTRLB;
|
|
|
|
|
|
|
|
switch (new_cfg->parity) {
|
|
|
|
case UART_CFG_PARITY_NONE:
|
|
|
|
CTRLA_temp.bit.FORM = 0x0;
|
|
|
|
break;
|
|
|
|
case UART_CFG_PARITY_ODD:
|
|
|
|
CTRLA_temp.bit.FORM = 0x1;
|
|
|
|
CTRLB_temp.bit.PMODE = 1;
|
|
|
|
break;
|
|
|
|
case UART_CFG_PARITY_EVEN:
|
|
|
|
CTRLA_temp.bit.FORM = 0x1;
|
|
|
|
CTRLB_temp.bit.PMODE = 0;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev_data->config_cache.parity = new_cfg->parity;
|
|
|
|
|
|
|
|
switch (new_cfg->stop_bits) {
|
|
|
|
case UART_CFG_STOP_BITS_1:
|
|
|
|
CTRLB_temp.bit.SBMODE = 0;
|
|
|
|
break;
|
|
|
|
case UART_CFG_STOP_BITS_2:
|
|
|
|
CTRLB_temp.bit.SBMODE = 1;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev_data->config_cache.stop_bits = new_cfg->stop_bits;
|
|
|
|
|
|
|
|
switch (new_cfg->data_bits) {
|
|
|
|
case UART_CFG_DATA_BITS_5:
|
|
|
|
CTRLB_temp.bit.CHSIZE = 0x5;
|
|
|
|
break;
|
|
|
|
case UART_CFG_DATA_BITS_6:
|
|
|
|
CTRLB_temp.bit.CHSIZE = 0x6;
|
|
|
|
break;
|
|
|
|
case UART_CFG_DATA_BITS_7:
|
|
|
|
CTRLB_temp.bit.CHSIZE = 0x7;
|
|
|
|
break;
|
|
|
|
case UART_CFG_DATA_BITS_8:
|
|
|
|
CTRLB_temp.bit.CHSIZE = 0x0;
|
|
|
|
break;
|
|
|
|
case UART_CFG_DATA_BITS_9:
|
|
|
|
CTRLB_temp.bit.CHSIZE = 0x1;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev_data->config_cache.data_bits = new_cfg->data_bits;
|
|
|
|
|
|
|
|
usart->CTRLA = CTRLA_temp;
|
|
|
|
wait_synchronization(usart);
|
|
|
|
|
|
|
|
usart->CTRLB = CTRLB_temp;
|
|
|
|
wait_synchronization(usart);
|
|
|
|
|
|
|
|
retval = uart_sam0_set_baudrate(usart, new_cfg->baudrate,
|
|
|
|
SOC_ATMEL_SAM0_GCLK0_FREQ_HZ);
|
|
|
|
if (retval != 0) {
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev_data->config_cache.baudrate = new_cfg->baudrate;
|
|
|
|
|
|
|
|
usart->CTRLA.bit.ENABLE = 1;
|
|
|
|
wait_synchronization(usart);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uart_sam0_config_get(const struct device *dev,
|
2020-04-16 14:23:40 +02:00
|
|
|
struct uart_config *out_cfg)
|
|
|
|
{
|
|
|
|
struct uart_sam0_dev_data *const dev_data = DEV_DATA(dev);
|
|
|
|
memcpy(out_cfg, &(dev_data->config_cache),
|
|
|
|
sizeof(dev_data->config_cache));
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uart_sam0_init(const struct device *dev)
|
2017-11-18 11:59:08 +01:00
|
|
|
{
|
|
|
|
int retval;
|
|
|
|
const struct uart_sam0_dev_cfg *const cfg = DEV_CFG(dev);
|
2020-04-16 14:23:40 +02:00
|
|
|
struct uart_sam0_dev_data *const dev_data = DEV_DATA(dev);
|
|
|
|
|
2017-11-18 11:59:08 +01:00
|
|
|
SercomUsart *const usart = cfg->regs;
|
|
|
|
|
2019-03-26 16:33:43 +01:00
|
|
|
#ifdef MCLK
|
|
|
|
/* Enable the GCLK */
|
|
|
|
GCLK->PCHCTRL[cfg->gclk_core_id].reg = GCLK_PCHCTRL_GEN_GCLK0 |
|
|
|
|
GCLK_PCHCTRL_CHEN;
|
|
|
|
|
|
|
|
/* Enable SERCOM clock in MCLK */
|
|
|
|
*cfg->mclk |= cfg->mclk_mask;
|
|
|
|
#else
|
2017-11-18 11:59:08 +01:00
|
|
|
/* Enable the GCLK */
|
2019-03-26 16:33:43 +01:00
|
|
|
GCLK->CLKCTRL.reg = cfg->gclk_clkctrl_id | GCLK_CLKCTRL_GEN_GCLK0 |
|
|
|
|
GCLK_CLKCTRL_CLKEN;
|
2017-11-18 11:59:08 +01:00
|
|
|
|
|
|
|
/* Enable SERCOM clock in PM */
|
|
|
|
PM->APBCMASK.reg |= cfg->pm_apbcmask;
|
2019-03-26 16:33:43 +01:00
|
|
|
#endif
|
2017-11-18 11:59:08 +01:00
|
|
|
|
|
|
|
/* Disable all USART interrupts */
|
|
|
|
usart->INTENCLR.reg = SERCOM_USART_INTENCLR_MASK;
|
|
|
|
wait_synchronization(usart);
|
|
|
|
|
|
|
|
/* 8 bits of data, no parity, 1 stop bit in normal mode */
|
|
|
|
usart->CTRLA.reg =
|
2019-03-26 16:33:43 +01:00
|
|
|
cfg->pads
|
2017-11-18 11:59:08 +01:00
|
|
|
/* Internal clock */
|
2019-03-26 16:33:43 +01:00
|
|
|
| SERCOM_USART_CTRLA_MODE_USART_INT_CLK
|
2018-01-22 18:58:11 +01:00
|
|
|
#if defined(SERCOM_USART_CTRLA_SAMPR)
|
2017-11-18 11:59:08 +01:00
|
|
|
/* 16x oversampling with arithmetic baud rate generation */
|
2018-01-22 18:58:11 +01:00
|
|
|
| SERCOM_USART_CTRLA_SAMPR(0)
|
|
|
|
#endif
|
|
|
|
| SERCOM_USART_CTRLA_FORM(0) |
|
2017-11-18 11:59:08 +01:00
|
|
|
SERCOM_USART_CTRLA_CPOL | SERCOM_USART_CTRLA_DORD;
|
|
|
|
wait_synchronization(usart);
|
|
|
|
|
2020-04-16 14:23:40 +02:00
|
|
|
dev_data->config_cache.flow_ctrl = UART_CFG_FLOW_CTRL_NONE;
|
|
|
|
dev_data->config_cache.parity = UART_CFG_PARITY_NONE;
|
|
|
|
dev_data->config_cache.stop_bits = UART_CFG_STOP_BITS_1;
|
|
|
|
dev_data->config_cache.data_bits = UART_CFG_DATA_BITS_8;
|
|
|
|
|
2017-11-18 11:59:08 +01:00
|
|
|
/* Enable receiver and transmitter */
|
2019-03-26 16:33:43 +01:00
|
|
|
usart->CTRLB.reg = SERCOM_USART_CTRLB_CHSIZE(0) |
|
2017-11-18 11:59:08 +01:00
|
|
|
SERCOM_USART_CTRLB_RXEN | SERCOM_USART_CTRLB_TXEN;
|
|
|
|
wait_synchronization(usart);
|
|
|
|
|
|
|
|
retval = uart_sam0_set_baudrate(usart, cfg->baudrate,
|
|
|
|
SOC_ATMEL_SAM0_GCLK0_FREQ_HZ);
|
|
|
|
if (retval != 0) {
|
|
|
|
return retval;
|
|
|
|
}
|
2020-04-16 14:23:40 +02:00
|
|
|
dev_data->config_cache.data_bits = cfg->baudrate;
|
2017-11-18 11:59:08 +01:00
|
|
|
|
2019-03-24 15:23:32 +01:00
|
|
|
#if CONFIG_UART_INTERRUPT_DRIVEN || CONFIG_UART_ASYNC_API
|
2017-11-18 11:59:08 +01:00
|
|
|
cfg->irq_config_func(dev);
|
|
|
|
#endif
|
|
|
|
|
2019-03-24 15:23:32 +01:00
|
|
|
#ifdef CONFIG_UART_ASYNC_API
|
2020-06-24 14:28:05 +02:00
|
|
|
dev_data->dev = dev;
|
2019-03-24 15:23:32 +01:00
|
|
|
dev_data->cfg = cfg;
|
2020-05-05 12:24:57 +02:00
|
|
|
dev_data->dma = device_get_binding(cfg->dma_dev);
|
2019-03-24 15:23:32 +01:00
|
|
|
|
|
|
|
k_delayed_work_init(&dev_data->tx_timeout_work, uart_sam0_tx_timeout);
|
|
|
|
k_delayed_work_init(&dev_data->rx_timeout_work, uart_sam0_rx_timeout);
|
|
|
|
|
|
|
|
if (cfg->tx_dma_channel != 0xFFU) {
|
|
|
|
struct dma_config dma_cfg = { 0 };
|
|
|
|
struct dma_block_config dma_blk = { 0 };
|
|
|
|
|
|
|
|
if (!dev_data->dma) {
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
dma_cfg.channel_direction = MEMORY_TO_PERIPHERAL;
|
|
|
|
dma_cfg.source_data_size = 1;
|
|
|
|
dma_cfg.dest_data_size = 1;
|
2020-07-08 13:37:36 +02:00
|
|
|
dma_cfg.user_data = dev_data;
|
2019-03-24 15:23:32 +01:00
|
|
|
dma_cfg.dma_callback = uart_sam0_dma_tx_done;
|
|
|
|
dma_cfg.block_count = 1;
|
|
|
|
dma_cfg.head_block = &dma_blk;
|
|
|
|
dma_cfg.dma_slot = cfg->tx_dma_request;
|
|
|
|
|
|
|
|
dma_blk.block_size = 1;
|
2020-05-27 18:26:57 +02:00
|
|
|
dma_blk.dest_address = (uint32_t)(&(usart->DATA.reg));
|
2019-03-24 15:23:32 +01:00
|
|
|
dma_blk.dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
|
|
|
|
|
|
|
|
retval = dma_config(dev_data->dma, cfg->tx_dma_channel,
|
|
|
|
&dma_cfg);
|
|
|
|
if (retval != 0) {
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cfg->rx_dma_channel != 0xFFU) {
|
|
|
|
struct dma_config dma_cfg = { 0 };
|
|
|
|
struct dma_block_config dma_blk = { 0 };
|
|
|
|
|
|
|
|
if (!dev_data->dma) {
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
dma_cfg.channel_direction = PERIPHERAL_TO_MEMORY;
|
|
|
|
dma_cfg.source_data_size = 1;
|
|
|
|
dma_cfg.dest_data_size = 1;
|
2020-07-08 13:37:36 +02:00
|
|
|
dma_cfg.user_data = dev_data;
|
2019-03-24 15:23:32 +01:00
|
|
|
dma_cfg.dma_callback = uart_sam0_dma_rx_done;
|
|
|
|
dma_cfg.block_count = 1;
|
|
|
|
dma_cfg.head_block = &dma_blk;
|
|
|
|
dma_cfg.dma_slot = cfg->rx_dma_request;
|
|
|
|
|
|
|
|
dma_blk.block_size = 1;
|
2020-05-27 18:26:57 +02:00
|
|
|
dma_blk.source_address = (uint32_t)(&(usart->DATA.reg));
|
2019-03-24 15:23:32 +01:00
|
|
|
dma_blk.source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
|
|
|
|
|
|
|
|
retval = dma_config(dev_data->dma, cfg->rx_dma_channel,
|
|
|
|
&dma_cfg);
|
|
|
|
if (retval != 0) {
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2017-11-18 11:59:08 +01:00
|
|
|
usart->CTRLA.bit.ENABLE = 1;
|
|
|
|
wait_synchronization(usart);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uart_sam0_poll_in(const struct device *dev, unsigned char *c)
|
2017-11-18 11:59:08 +01:00
|
|
|
{
|
|
|
|
SercomUsart *const usart = DEV_CFG(dev)->regs;
|
|
|
|
|
|
|
|
if (!usart->INTFLAG.bit.RXC) {
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
|
|
|
*c = (unsigned char)usart->DATA.reg;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static void uart_sam0_poll_out(const struct device *dev, unsigned char c)
|
2017-11-18 11:59:08 +01:00
|
|
|
{
|
|
|
|
SercomUsart *const usart = DEV_CFG(dev)->regs;
|
|
|
|
|
|
|
|
while (!usart->INTFLAG.bit.DRE) {
|
|
|
|
}
|
|
|
|
|
|
|
|
/* send a character */
|
|
|
|
usart->DATA.reg = c;
|
|
|
|
}
|
|
|
|
|
2019-03-24 15:23:32 +01:00
|
|
|
#if CONFIG_UART_INTERRUPT_DRIVEN || CONFIG_UART_ASYNC_API
|
2017-11-18 11:59:08 +01:00
|
|
|
|
isr: Normalize usage of device instance through ISR
The goal of this patch is to replace the 'void *' parameter by 'struct
device *' if they use such variable or just 'const void *' on all
relevant ISRs
This will avoid not-so-nice const qualifier tweaks when device instances
will be constant.
Note that only the ISR passed to IRQ_CONNECT are of interest here.
In order to do so, the script fix_isr.py below is necessary:
from pathlib import Path
import subprocess
import pickle
import mmap
import sys
import re
import os
cocci_template = """
@r_fix_isr_0
@
type ret_type;
identifier P;
identifier D;
@@
-ret_type <!fn!>(void *P)
+ret_type <!fn!>(const struct device *P)
{
...
(
const struct device *D = (const struct device *)P;
|
const struct device *D = P;
)
...
}
@r_fix_isr_1
@
type ret_type;
identifier P;
identifier D;
@@
-ret_type <!fn!>(void *P)
+ret_type <!fn!>(const struct device *P)
{
...
const struct device *D;
...
(
D = (const struct device *)P;
|
D = P;
)
...
}
@r_fix_isr_2
@
type ret_type;
identifier A;
@@
-ret_type <!fn!>(void *A)
+ret_type <!fn!>(const void *A)
{
...
}
@r_fix_isr_3
@
const struct device *D;
@@
-<!fn!>((void *)D);
+<!fn!>(D);
@r_fix_isr_4
@
type ret_type;
identifier D;
identifier P;
@@
-ret_type <!fn!>(const struct device *P)
+ret_type <!fn!>(const struct device *D)
{
...
(
-const struct device *D = (const struct device *)P;
|
-const struct device *D = P;
)
...
}
@r_fix_isr_5
@
type ret_type;
identifier D;
identifier P;
@@
-ret_type <!fn!>(const struct device *P)
+ret_type <!fn!>(const struct device *D)
{
...
-const struct device *D;
...
(
-D = (const struct device *)P;
|
-D = P;
)
...
}
"""
def find_isr(fn):
db = []
data = None
start = 0
try:
with open(fn, 'r+') as f:
data = str(mmap.mmap(f.fileno(), 0).read())
except Exception as e:
return db
while True:
isr = ""
irq = data.find('IRQ_CONNECT', start)
while irq > -1:
p = 1
arg = 1
p_o = data.find('(', irq)
if p_o < 0:
irq = -1
break;
pos = p_o + 1
while p > 0:
if data[pos] == ')':
p -= 1
elif data[pos] == '(':
p += 1
elif data[pos] == ',' and p == 1:
arg += 1
if arg == 3:
isr += data[pos]
pos += 1
isr = isr.strip(',\\n\\t ')
if isr not in db and len(isr) > 0:
db.append(isr)
start = pos
break
if irq < 0:
break
return db
def patch_isr(fn, isr_list):
if len(isr_list) <= 0:
return
for isr in isr_list:
tmplt = cocci_template.replace('<!fn!>', isr)
with open('/tmp/isr_fix.cocci', 'w') as f:
f.write(tmplt)
cmd = ['spatch', '--sp-file', '/tmp/isr_fix.cocci', '--in-place', fn]
subprocess.run(cmd)
def process_files(path):
if path.is_file() and path.suffix in ['.h', '.c']:
p = str(path.parent) + '/' + path.name
isr_list = find_isr(p)
patch_isr(p, isr_list)
elif path.is_dir():
for p in path.iterdir():
process_files(p)
if len(sys.argv) < 2:
print("You need to provide a dir/file path")
sys.exit(1)
process_files(Path(sys.argv[1]))
And is run: ./fix_isr.py <zephyr root directory>
Finally, some files needed manual fixes such.
Fixes #27399
Signed-off-by: Tomasz Bursztyka <tomasz.bursztyka@linux.intel.com>
2020-06-17 14:58:56 +02:00
|
|
|
static void uart_sam0_isr(const struct device *dev)
|
2017-11-18 11:59:08 +01:00
|
|
|
{
|
|
|
|
struct uart_sam0_dev_data *const dev_data = DEV_DATA(dev);
|
|
|
|
|
2019-03-24 15:23:32 +01:00
|
|
|
#if CONFIG_UART_INTERRUPT_DRIVEN
|
2017-11-18 11:59:08 +01:00
|
|
|
if (dev_data->cb) {
|
2020-06-24 15:47:15 +02:00
|
|
|
dev_data->cb(dev, dev_data->cb_data);
|
2017-11-18 11:59:08 +01:00
|
|
|
}
|
2019-03-24 15:23:32 +01:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#if CONFIG_UART_ASYNC_API
|
|
|
|
const struct uart_sam0_dev_cfg *const cfg = DEV_CFG(dev);
|
|
|
|
SercomUsart * const regs = cfg->regs;
|
|
|
|
|
|
|
|
if (dev_data->rx_len && regs->INTFLAG.bit.RXC &&
|
|
|
|
dev_data->rx_waiting_for_irq) {
|
|
|
|
dev_data->rx_waiting_for_irq = false;
|
|
|
|
regs->INTENCLR.reg = SERCOM_USART_INTENCLR_RXC;
|
|
|
|
|
|
|
|
/* Receive started, so request the next buffer */
|
|
|
|
if (dev_data->rx_next_len == 0U && dev_data->async_cb) {
|
|
|
|
struct uart_event evt = {
|
|
|
|
.type = UART_RX_BUF_REQUEST,
|
|
|
|
};
|
|
|
|
|
2020-06-24 14:28:05 +02:00
|
|
|
dev_data->async_cb(dev, &evt, dev_data->async_cb_data);
|
2019-03-24 15:23:32 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we have a timeout, restart the time remaining whenever
|
|
|
|
* we see data.
|
|
|
|
*/
|
2020-05-07 21:51:28 +02:00
|
|
|
if (dev_data->rx_timeout_time != SYS_FOREVER_MS) {
|
2019-03-24 15:23:32 +01:00
|
|
|
dev_data->rx_timeout_from_isr = true;
|
|
|
|
dev_data->rx_timeout_start = k_uptime_get_32();
|
|
|
|
k_delayed_work_submit(&dev_data->rx_timeout_work,
|
2020-05-07 21:51:28 +02:00
|
|
|
K_MSEC(dev_data->rx_timeout_chunk));
|
2019-03-24 15:23:32 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* DMA will read the currently ready byte out */
|
|
|
|
dma_start(dev_data->dma, cfg->rx_dma_channel);
|
|
|
|
}
|
|
|
|
#endif
|
2017-11-18 11:59:08 +01:00
|
|
|
}
|
|
|
|
|
2019-03-24 15:23:32 +01:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#if CONFIG_UART_INTERRUPT_DRIVEN
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uart_sam0_fifo_fill(const struct device *dev,
|
|
|
|
const uint8_t *tx_data, int len)
|
2017-11-25 14:06:34 +01:00
|
|
|
{
|
|
|
|
SercomUsart *regs = DEV_CFG(dev)->regs;
|
|
|
|
|
|
|
|
if (regs->INTFLAG.bit.DRE && len >= 1) {
|
|
|
|
regs->DATA.reg = tx_data[0];
|
|
|
|
return 1;
|
|
|
|
} else {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static void uart_sam0_irq_tx_enable(const struct device *dev)
|
2017-11-25 14:06:34 +01:00
|
|
|
{
|
|
|
|
SercomUsart *regs = DEV_CFG(dev)->regs;
|
|
|
|
|
|
|
|
regs->INTENSET.reg = SERCOM_USART_INTENCLR_DRE;
|
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static void uart_sam0_irq_tx_disable(const struct device *dev)
|
2017-11-18 11:59:08 +01:00
|
|
|
{
|
|
|
|
SercomUsart *const regs = DEV_CFG(dev)->regs;
|
|
|
|
|
|
|
|
regs->INTENCLR.reg = SERCOM_USART_INTENCLR_DRE;
|
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uart_sam0_irq_tx_ready(const struct device *dev)
|
2017-11-25 14:06:34 +01:00
|
|
|
{
|
|
|
|
SercomUsart *const regs = DEV_CFG(dev)->regs;
|
|
|
|
|
|
|
|
return regs->INTFLAG.bit.DRE != 0;
|
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static void uart_sam0_irq_rx_enable(const struct device *dev)
|
2017-11-18 11:59:08 +01:00
|
|
|
{
|
|
|
|
SercomUsart *const regs = DEV_CFG(dev)->regs;
|
|
|
|
|
|
|
|
regs->INTENSET.reg = SERCOM_USART_INTENSET_RXC;
|
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static void uart_sam0_irq_rx_disable(const struct device *dev)
|
2017-11-18 11:59:08 +01:00
|
|
|
{
|
|
|
|
SercomUsart *const regs = DEV_CFG(dev)->regs;
|
|
|
|
|
|
|
|
regs->INTENCLR.reg = SERCOM_USART_INTENCLR_RXC;
|
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uart_sam0_irq_rx_ready(const struct device *dev)
|
2017-11-18 11:59:08 +01:00
|
|
|
{
|
|
|
|
SercomUsart *const regs = DEV_CFG(dev)->regs;
|
|
|
|
|
|
|
|
return regs->INTFLAG.bit.RXC != 0;
|
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uart_sam0_fifo_read(const struct device *dev, uint8_t *rx_data,
|
2017-11-18 11:59:08 +01:00
|
|
|
const int size)
|
|
|
|
{
|
|
|
|
SercomUsart *const regs = DEV_CFG(dev)->regs;
|
|
|
|
|
|
|
|
if (regs->INTFLAG.bit.RXC) {
|
2020-05-27 18:26:57 +02:00
|
|
|
uint8_t ch = regs->DATA.reg;
|
2017-11-18 11:59:08 +01:00
|
|
|
|
|
|
|
if (size >= 1) {
|
|
|
|
*rx_data = ch;
|
|
|
|
return 1;
|
|
|
|
} else {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uart_sam0_irq_is_pending(const struct device *dev)
|
2017-11-18 11:59:08 +01:00
|
|
|
{
|
|
|
|
SercomUsart *const regs = DEV_CFG(dev)->regs;
|
|
|
|
|
|
|
|
return (regs->INTENSET.reg & regs->INTFLAG.reg) != 0;
|
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uart_sam0_irq_update(const struct device *dev) { return 1; }
|
2017-11-18 11:59:08 +01:00
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static void uart_sam0_irq_callback_set(const struct device *dev,
|
2018-07-16 20:12:26 +02:00
|
|
|
uart_irq_callback_user_data_t cb,
|
|
|
|
void *cb_data)
|
2017-11-18 11:59:08 +01:00
|
|
|
{
|
|
|
|
struct uart_sam0_dev_data *const dev_data = DEV_DATA(dev);
|
|
|
|
|
|
|
|
dev_data->cb = cb;
|
2018-07-16 20:12:26 +02:00
|
|
|
dev_data->cb_data = cb_data;
|
2017-11-18 11:59:08 +01:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2019-03-24 15:23:32 +01:00
|
|
|
#ifdef CONFIG_UART_ASYNC_API
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uart_sam0_callback_set(const struct device *dev,
|
|
|
|
uart_callback_t callback,
|
2019-03-24 15:23:32 +01:00
|
|
|
void *user_data)
|
|
|
|
{
|
|
|
|
struct uart_sam0_dev_data *const dev_data = DEV_DATA(dev);
|
|
|
|
|
|
|
|
dev_data->async_cb = callback;
|
|
|
|
dev_data->async_cb_data = user_data;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uart_sam0_tx(const struct device *dev, const uint8_t *buf,
|
|
|
|
size_t len,
|
2020-05-27 18:26:57 +02:00
|
|
|
int32_t timeout)
|
2019-03-24 15:23:32 +01:00
|
|
|
{
|
|
|
|
struct uart_sam0_dev_data *const dev_data = DEV_DATA(dev);
|
|
|
|
const struct uart_sam0_dev_cfg *const cfg = DEV_CFG(dev);
|
|
|
|
SercomUsart *regs = DEV_CFG(dev)->regs;
|
|
|
|
int retval;
|
|
|
|
|
|
|
|
if (!dev_data->dma || cfg->tx_dma_channel == 0xFFU) {
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (len > 0xFFFFU) {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
int key = irq_lock();
|
|
|
|
|
|
|
|
if (dev_data->tx_len != 0U) {
|
|
|
|
retval = -EBUSY;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev_data->tx_buf = buf;
|
|
|
|
dev_data->tx_len = len;
|
|
|
|
|
|
|
|
irq_unlock(key);
|
|
|
|
|
2020-05-27 18:26:57 +02:00
|
|
|
retval = dma_reload(dev_data->dma, cfg->tx_dma_channel, (uint32_t)buf,
|
|
|
|
(uint32_t)(&(regs->DATA.reg)), len);
|
2019-03-24 15:23:32 +01:00
|
|
|
if (retval != 0U) {
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2020-05-01 11:58:15 +02:00
|
|
|
if (timeout != SYS_FOREVER_MS) {
|
|
|
|
k_delayed_work_submit(&dev_data->tx_timeout_work,
|
|
|
|
K_MSEC(timeout));
|
2019-03-24 15:23:32 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return dma_start(dev_data->dma, cfg->tx_dma_channel);
|
|
|
|
err:
|
|
|
|
irq_unlock(key);
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uart_sam0_tx_abort(const struct device *dev)
|
2019-03-24 15:23:32 +01:00
|
|
|
{
|
|
|
|
struct uart_sam0_dev_data *const dev_data = DEV_DATA(dev);
|
|
|
|
const struct uart_sam0_dev_cfg *const cfg = DEV_CFG(dev);
|
|
|
|
|
|
|
|
if (!dev_data->dma || cfg->tx_dma_channel == 0xFFU) {
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
k_delayed_work_cancel(&dev_data->tx_timeout_work);
|
|
|
|
|
|
|
|
return uart_sam0_tx_halt(dev_data);
|
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uart_sam0_rx_enable(const struct device *dev, uint8_t *buf,
|
|
|
|
size_t len,
|
2020-05-27 18:26:57 +02:00
|
|
|
int32_t timeout)
|
2019-03-24 15:23:32 +01:00
|
|
|
{
|
|
|
|
struct uart_sam0_dev_data *const dev_data = DEV_DATA(dev);
|
|
|
|
const struct uart_sam0_dev_cfg *const cfg = DEV_CFG(dev);
|
|
|
|
SercomUsart *regs = DEV_CFG(dev)->regs;
|
|
|
|
int retval;
|
|
|
|
|
|
|
|
if (!dev_data->dma || cfg->rx_dma_channel == 0xFFU) {
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (len > 0xFFFFU) {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
int key = irq_lock();
|
|
|
|
|
|
|
|
if (dev_data->rx_len != 0U) {
|
|
|
|
retval = -EBUSY;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Read off anything that was already there */
|
|
|
|
while (regs->INTFLAG.bit.RXC) {
|
|
|
|
char discard = regs->DATA.reg;
|
|
|
|
|
|
|
|
(void)discard;
|
|
|
|
}
|
|
|
|
|
|
|
|
retval = dma_reload(dev_data->dma, cfg->rx_dma_channel,
|
2020-05-27 18:26:57 +02:00
|
|
|
(uint32_t)(&(regs->DATA.reg)),
|
|
|
|
(uint32_t)buf, len);
|
2019-03-24 15:23:32 +01:00
|
|
|
if (retval != 0) {
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev_data->rx_buf = buf;
|
|
|
|
dev_data->rx_len = len;
|
|
|
|
dev_data->rx_processed_len = 0U;
|
|
|
|
dev_data->rx_waiting_for_irq = true;
|
|
|
|
dev_data->rx_timeout_from_isr = true;
|
|
|
|
dev_data->rx_timeout_time = timeout;
|
|
|
|
dev_data->rx_timeout_chunk = MAX(timeout / 4U, 1);
|
|
|
|
|
|
|
|
regs->INTENSET.reg = SERCOM_USART_INTENSET_RXC;
|
|
|
|
|
|
|
|
irq_unlock(key);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err:
|
|
|
|
irq_unlock(key);
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uart_sam0_rx_buf_rsp(const struct device *dev, uint8_t *buf,
|
|
|
|
size_t len)
|
2019-03-24 15:23:32 +01:00
|
|
|
{
|
|
|
|
if (len > 0xFFFFU) {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct uart_sam0_dev_data *const dev_data = DEV_DATA(dev);
|
|
|
|
int key = irq_lock();
|
|
|
|
int retval = 0;
|
|
|
|
|
|
|
|
if (dev_data->rx_len == 0U) {
|
|
|
|
retval = -EACCES;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dev_data->rx_next_len != 0U) {
|
|
|
|
retval = -EBUSY;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev_data->rx_next_buf = buf;
|
|
|
|
dev_data->rx_next_len = len;
|
|
|
|
|
|
|
|
irq_unlock(key);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err:
|
|
|
|
irq_unlock(key);
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uart_sam0_rx_disable(const struct device *dev)
|
2019-03-24 15:23:32 +01:00
|
|
|
{
|
|
|
|
struct uart_sam0_dev_data *const dev_data = DEV_DATA(dev);
|
|
|
|
const struct uart_sam0_dev_cfg *const cfg = DEV_CFG(dev);
|
|
|
|
SercomUsart *const regs = cfg->regs;
|
|
|
|
struct dma_status st;
|
|
|
|
|
|
|
|
k_delayed_work_cancel(&dev_data->rx_timeout_work);
|
|
|
|
|
|
|
|
int key = irq_lock();
|
|
|
|
|
|
|
|
if (dev_data->rx_len == 0U) {
|
|
|
|
irq_unlock(key);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
regs->INTENCLR.reg = SERCOM_USART_INTENCLR_RXC;
|
|
|
|
dma_stop(dev_data->dma, cfg->rx_dma_channel);
|
|
|
|
|
|
|
|
|
|
|
|
if (dma_get_status(dev_data->dma, cfg->rx_dma_channel,
|
|
|
|
&st) == 0 && st.pending_length != 0U) {
|
|
|
|
size_t rx_processed = dev_data->rx_len - st.pending_length;
|
|
|
|
|
|
|
|
uart_sam0_notify_rx_processed(dev_data, rx_processed);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct uart_event evt = {
|
|
|
|
.type = UART_RX_BUF_RELEASED,
|
|
|
|
.data.rx_buf = {
|
|
|
|
.buf = dev_data->rx_buf,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
dev_data->rx_buf = NULL;
|
|
|
|
dev_data->rx_len = 0U;
|
|
|
|
|
|
|
|
if (dev_data->async_cb) {
|
2020-06-24 14:28:05 +02:00
|
|
|
dev_data->async_cb(dev, &evt, dev_data->async_cb_data);
|
2019-03-24 15:23:32 +01:00
|
|
|
}
|
|
|
|
|
2020-06-05 10:41:05 +02:00
|
|
|
if (dev_data->rx_next_len) {
|
|
|
|
struct uart_event evt = {
|
|
|
|
.type = UART_RX_BUF_RELEASED,
|
|
|
|
.data.rx_buf = {
|
|
|
|
.buf = dev_data->rx_next_buf,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
dev_data->rx_next_buf = NULL;
|
|
|
|
dev_data->rx_next_len = 0U;
|
|
|
|
|
|
|
|
if (dev_data->async_cb) {
|
2020-06-24 14:28:05 +02:00
|
|
|
dev_data->async_cb(dev, &evt, dev_data->async_cb_data);
|
2020-06-05 10:41:05 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-24 15:23:32 +01:00
|
|
|
evt.type = UART_RX_DISABLED;
|
|
|
|
if (dev_data->async_cb) {
|
2020-06-24 14:28:05 +02:00
|
|
|
dev_data->async_cb(dev, &evt, dev_data->async_cb_data);
|
2019-03-24 15:23:32 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
irq_unlock(key);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2017-11-18 11:59:08 +01:00
|
|
|
static const struct uart_driver_api uart_sam0_driver_api = {
|
|
|
|
.poll_in = uart_sam0_poll_in,
|
|
|
|
.poll_out = uart_sam0_poll_out,
|
2020-04-16 14:23:40 +02:00
|
|
|
.configure = uart_sam0_configure,
|
|
|
|
.config_get = uart_sam0_config_get,
|
2017-11-18 11:59:08 +01:00
|
|
|
#if CONFIG_UART_INTERRUPT_DRIVEN
|
2017-11-25 14:06:34 +01:00
|
|
|
.fifo_fill = uart_sam0_fifo_fill,
|
2017-11-18 11:59:08 +01:00
|
|
|
.fifo_read = uart_sam0_fifo_read,
|
2017-11-25 14:06:34 +01:00
|
|
|
.irq_tx_enable = uart_sam0_irq_tx_enable,
|
2017-11-18 11:59:08 +01:00
|
|
|
.irq_tx_disable = uart_sam0_irq_tx_disable,
|
2017-11-25 14:06:34 +01:00
|
|
|
.irq_tx_ready = uart_sam0_irq_tx_ready,
|
2017-11-18 11:59:08 +01:00
|
|
|
.irq_rx_enable = uart_sam0_irq_rx_enable,
|
|
|
|
.irq_rx_disable = uart_sam0_irq_rx_disable,
|
|
|
|
.irq_rx_ready = uart_sam0_irq_rx_ready,
|
|
|
|
.irq_is_pending = uart_sam0_irq_is_pending,
|
|
|
|
.irq_update = uart_sam0_irq_update,
|
|
|
|
.irq_callback_set = uart_sam0_irq_callback_set,
|
|
|
|
#endif
|
2019-03-24 15:23:32 +01:00
|
|
|
#if CONFIG_UART_ASYNC_API
|
|
|
|
.callback_set = uart_sam0_callback_set,
|
|
|
|
.tx = uart_sam0_tx,
|
|
|
|
.tx_abort = uart_sam0_tx_abort,
|
|
|
|
.rx_enable = uart_sam0_rx_enable,
|
|
|
|
.rx_buf_rsp = uart_sam0_rx_buf_rsp,
|
|
|
|
.rx_disable = uart_sam0_rx_disable,
|
|
|
|
#endif
|
2017-11-18 11:59:08 +01:00
|
|
|
};
|
|
|
|
|
2019-03-24 15:23:32 +01:00
|
|
|
#if CONFIG_UART_INTERRUPT_DRIVEN || CONFIG_UART_ASYNC_API
|
2019-03-26 16:33:43 +01:00
|
|
|
|
|
|
|
#define SAM0_UART_IRQ_CONNECT(n, m) \
|
|
|
|
do { \
|
2020-04-20 22:20:42 +02:00
|
|
|
IRQ_CONNECT(DT_INST_IRQ_BY_IDX(n, m, irq), \
|
|
|
|
DT_INST_IRQ_BY_IDX(n, m, priority), \
|
|
|
|
uart_sam0_isr, \
|
2020-12-11 17:12:30 +01:00
|
|
|
DEVICE_DT_INST_GET(n), 0); \
|
2020-04-20 22:20:42 +02:00
|
|
|
irq_enable(DT_INST_IRQ_BY_IDX(n, m, irq)); \
|
2019-03-26 16:33:43 +01:00
|
|
|
} while (0)
|
|
|
|
|
2017-11-18 11:59:08 +01:00
|
|
|
#define UART_SAM0_IRQ_HANDLER_DECL(n) \
|
2020-07-14 17:02:00 +02:00
|
|
|
static void uart_sam0_irq_config_##n(const struct device *dev)
|
2017-11-18 11:59:08 +01:00
|
|
|
#define UART_SAM0_IRQ_HANDLER_FUNC(n) \
|
|
|
|
.irq_config_func = uart_sam0_irq_config_##n,
|
2019-03-26 16:33:43 +01:00
|
|
|
|
2020-03-24 21:41:31 +01:00
|
|
|
#if DT_INST_IRQ_HAS_IDX(0, 3)
|
2019-03-26 16:33:43 +01:00
|
|
|
#define UART_SAM0_IRQ_HANDLER(n) \
|
2020-04-30 20:33:38 +02:00
|
|
|
static void uart_sam0_irq_config_##n(const struct device *dev) \
|
2019-03-26 16:33:43 +01:00
|
|
|
{ \
|
|
|
|
SAM0_UART_IRQ_CONNECT(n, 0); \
|
|
|
|
SAM0_UART_IRQ_CONNECT(n, 1); \
|
|
|
|
SAM0_UART_IRQ_CONNECT(n, 2); \
|
|
|
|
SAM0_UART_IRQ_CONNECT(n, 3); \
|
|
|
|
}
|
|
|
|
#else
|
2017-11-18 11:59:08 +01:00
|
|
|
#define UART_SAM0_IRQ_HANDLER(n) \
|
2020-04-30 20:33:38 +02:00
|
|
|
static void uart_sam0_irq_config_##n(const struct device *dev) \
|
2017-11-18 11:59:08 +01:00
|
|
|
{ \
|
2019-03-26 16:33:43 +01:00
|
|
|
SAM0_UART_IRQ_CONNECT(n, 0); \
|
2017-11-18 11:59:08 +01:00
|
|
|
}
|
2019-03-26 16:33:43 +01:00
|
|
|
#endif
|
2017-11-18 11:59:08 +01:00
|
|
|
#else
|
|
|
|
#define UART_SAM0_IRQ_HANDLER_DECL(n)
|
|
|
|
#define UART_SAM0_IRQ_HANDLER_FUNC(n)
|
|
|
|
#define UART_SAM0_IRQ_HANDLER(n)
|
|
|
|
#endif
|
|
|
|
|
2019-03-24 15:23:32 +01:00
|
|
|
#if CONFIG_UART_ASYNC_API
|
2020-04-20 22:20:42 +02:00
|
|
|
#define UART_SAM0_DMA_CHANNELS(n) \
|
2020-05-05 12:24:57 +02:00
|
|
|
.dma_dev = ATMEL_SAM0_DT_INST_DMA_NAME(n, tx), \
|
2020-04-20 22:20:42 +02:00
|
|
|
.tx_dma_request = ATMEL_SAM0_DT_INST_DMA_TRIGSRC(n, tx), \
|
|
|
|
.tx_dma_channel = ATMEL_SAM0_DT_INST_DMA_CHANNEL(n, tx), \
|
|
|
|
.rx_dma_request = ATMEL_SAM0_DT_INST_DMA_TRIGSRC(n, rx), \
|
|
|
|
.rx_dma_channel = ATMEL_SAM0_DT_INST_DMA_CHANNEL(n, rx),
|
2019-03-24 15:23:32 +01:00
|
|
|
#else
|
|
|
|
#define UART_SAM0_DMA_CHANNELS(n)
|
|
|
|
#endif
|
|
|
|
|
2019-02-01 11:36:43 +01:00
|
|
|
#define UART_SAM0_SERCOM_PADS(n) \
|
2020-04-20 22:20:42 +02:00
|
|
|
(DT_INST_PROP(n, rxpo) << SERCOM_USART_CTRLA_RXPO_Pos) | \
|
|
|
|
(DT_INST_PROP(n, txpo) << SERCOM_USART_CTRLA_TXPO_Pos)
|
2018-11-01 10:46:46 +01:00
|
|
|
|
2019-03-26 16:33:43 +01:00
|
|
|
#ifdef MCLK
|
2020-04-20 22:20:42 +02:00
|
|
|
#define UART_SAM0_CONFIG_DEFN(n) \
|
|
|
|
static const struct uart_sam0_dev_cfg uart_sam0_config_##n = { \
|
|
|
|
.regs = (SercomUsart *)DT_INST_REG_ADDR(n), \
|
|
|
|
.baudrate = DT_INST_PROP(n, current_speed), \
|
|
|
|
.mclk = (volatile uint32_t *)MCLK_MASK_DT_INT_REG_ADDR(n), \
|
|
|
|
.mclk_mask = BIT(DT_INST_CLOCKS_CELL_BY_NAME(n, mclk, bit)), \
|
|
|
|
.gclk_core_id = DT_INST_CLOCKS_CELL_BY_NAME(n, gclk, periph_ch),\
|
|
|
|
.pads = UART_SAM0_SERCOM_PADS(n), \
|
|
|
|
UART_SAM0_IRQ_HANDLER_FUNC(n) \
|
|
|
|
UART_SAM0_DMA_CHANNELS(n) \
|
2019-03-26 16:33:43 +01:00
|
|
|
}
|
|
|
|
#else
|
2020-04-20 22:20:42 +02:00
|
|
|
#define UART_SAM0_CONFIG_DEFN(n) \
|
|
|
|
static const struct uart_sam0_dev_cfg uart_sam0_config_##n = { \
|
|
|
|
.regs = (SercomUsart *)DT_INST_REG_ADDR(n), \
|
|
|
|
.baudrate = DT_INST_PROP(n, current_speed), \
|
|
|
|
.pm_apbcmask = BIT(DT_INST_CLOCKS_CELL_BY_NAME(n, pm, bit)), \
|
|
|
|
.gclk_clkctrl_id = DT_INST_CLOCKS_CELL_BY_NAME(n, gclk, clkctrl_id),\
|
|
|
|
.pads = UART_SAM0_SERCOM_PADS(n), \
|
|
|
|
UART_SAM0_IRQ_HANDLER_FUNC(n) \
|
|
|
|
UART_SAM0_DMA_CHANNELS(n) \
|
2019-03-26 16:33:43 +01:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2020-04-20 22:20:42 +02:00
|
|
|
#define UART_SAM0_DEVICE_INIT(n) \
|
|
|
|
static struct uart_sam0_dev_data uart_sam0_data_##n; \
|
|
|
|
UART_SAM0_IRQ_HANDLER_DECL(n); \
|
|
|
|
UART_SAM0_CONFIG_DEFN(n); \
|
2020-12-11 17:12:30 +01:00
|
|
|
DEVICE_DT_INST_DEFINE(n, uart_sam0_init, device_pm_control_nop, \
|
|
|
|
&uart_sam0_data_##n, \
|
2020-04-20 22:20:42 +02:00
|
|
|
&uart_sam0_config_##n, PRE_KERNEL_1, \
|
|
|
|
CONFIG_KERNEL_INIT_PRIORITY_DEVICE, \
|
|
|
|
&uart_sam0_driver_api); \
|
2017-11-18 11:59:08 +01:00
|
|
|
UART_SAM0_IRQ_HANDLER(n)
|
|
|
|
|
2020-05-06 20:23:07 +02:00
|
|
|
DT_INST_FOREACH_STATUS_OKAY(UART_SAM0_DEVICE_INIT)
|