spi: SAM add RTIO support

Implements the SPI RTIO API. Refactors many of the internal transfer
functions to work on simple types to allow for both the RTIO and existing
SPI API functions to co-exist.

Removes the unneeded shift_master specialization. Either a polling or DMA
transfer flow is used when used with RTIO as all operations are normalized.

When SPI_RTIO is enabled the spi_transceive blocking call translates
the request into an RTIO transaction placed in the queue of transactions
the device is meant to do.

Signed-off-by: Tom Burdick <thomas.burdick@intel.com>
This commit is contained in:
Tom Burdick 2023-02-21 20:42:54 -06:00 committed by Carles Cufí
parent d9d24b4d65
commit dcd9322d43
2 changed files with 450 additions and 262 deletions

View file

@ -19,4 +19,19 @@ config SPI_SAM_DMA
help
Enable using DMA with SPI for SPI instances that enable dma channels in
their device tree node.
if SPI_RTIO
config SPI_SAM_RTIO_SQ_SIZE
int "Number of avialable submission queue entries"
default 8 # Sensible default that covers most common spi transactions
help
When RTIO is use with SPI each driver holds a context with which blocking
API calls use to perform SPI transactions. This queue needs to be as deep
as the longest set of spi_buf_sets used, where normal SPI operations are
used (equal length buffers). It may need to be slightly deeper where the
spi buffer sets for transmit/receive are not always matched equally in
length as these are transformed into normal transceives.
endif # SPI_RTIO
endif # SPI_SAM

View file

@ -14,11 +14,16 @@ LOG_MODULE_REGISTER(spi_sam);
#include "spi_context.h"
#include <errno.h>
#include <zephyr/spinlock.h>
#include <zephyr/device.h>
#include <zephyr/drivers/spi.h>
#include <zephyr/drivers/dma.h>
#include <zephyr/drivers/pinctrl.h>
#include <zephyr/drivers/clock_control/atmel_sam_pmc.h>
#include <zephyr/rtio/rtio.h>
#include <zephyr/rtio/rtio_executor_simple.h>
#include <zephyr/sys/__assert.h>
#include <zephyr/sys/util.h>
#include <soc.h>
#define SAM_SPI_CHIP_SELECT_COUNT 4
@ -47,6 +52,14 @@ struct spi_sam_data {
struct spi_context ctx;
struct k_spinlock lock;
#ifdef CONFIG_SPI_RTIO
struct rtio *r; /* context for thread calls */
struct rtio_iodev iodev;
struct rtio_iodev_sqe *iodev_sqe;
struct rtio_sqe *sqe;
struct spi_dt_spec dt_spec;
#endif
#ifdef CONFIG_SPI_SAM_DMA
struct k_sem dma_sem;
#endif /* CONFIG_SPI_SAM_DMA */
@ -150,39 +163,6 @@ static int spi_sam_configure(const struct device *dev,
return 0;
}
static bool spi_sam_transfer_ongoing(struct spi_sam_data *data)
{
return spi_context_tx_on(&data->ctx) || spi_context_rx_on(&data->ctx);
}
static void spi_sam_shift_master(Spi *regs, struct spi_sam_data *data)
{
uint8_t tx;
uint8_t rx;
if (spi_context_tx_buf_on(&data->ctx)) {
tx = *(uint8_t *)(data->ctx.tx_buf);
} else {
tx = 0U;
}
while ((regs->SPI_SR & SPI_SR_TDRE) == 0) {
}
regs->SPI_TDR = SPI_TDR_TD(tx);
spi_context_update_tx(&data->ctx, 1, 1);
while ((regs->SPI_SR & SPI_SR_RDRF) == 0) {
}
rx = (uint8_t)regs->SPI_RDR;
if (spi_context_rx_buf_on(&data->ctx)) {
*data->ctx.rx_buf = rx;
}
spi_context_update_rx(&data->ctx, 1, 1);
}
/* Finish any ongoing writes and drop any remaining read data */
static void spi_sam_finish(Spi *regs)
{
@ -195,12 +175,10 @@ static void spi_sam_finish(Spi *regs)
}
/* Fast path that transmits a buf */
static void spi_sam_fast_tx(const struct device *dev, Spi *regs, const struct spi_buf *tx_buf)
static void spi_sam_fast_tx(Spi *regs, const uint8_t *tx_buf, const uint32_t tx_buf_len)
{
k_spinlock_key_t key = spi_spin_lock(dev);
const uint8_t *p = tx_buf->buf;
const uint8_t *pend = (uint8_t *)tx_buf->buf + tx_buf->len;
const uint8_t *p = tx_buf;
const uint8_t *pend = (uint8_t *)tx_buf + tx_buf_len;
uint8_t ch;
while (p != pend) {
@ -211,26 +189,18 @@ static void spi_sam_fast_tx(const struct device *dev, Spi *regs, const struct sp
regs->SPI_TDR = SPI_TDR_TD(ch);
}
spi_sam_finish(regs);
spi_spin_unlock(dev, key);
}
/* Fast path that reads into a buf */
static void spi_sam_fast_rx(const struct device *dev, Spi *regs, const struct spi_buf *rx_buf)
static void spi_sam_fast_rx(Spi *regs, uint8_t *rx_buf, const uint32_t rx_buf_len)
{
k_spinlock_key_t key = spi_spin_lock(dev);
uint8_t *rx = rx_buf->buf;
int len = rx_buf->len;
uint8_t *rx = rx_buf;
int len = rx_buf_len;
if (len <= 0) {
return;
}
/* See the comment in spi_sam_fast_txrx re: interleaving. */
/* Write the first byte */
regs->SPI_TDR = SPI_TDR_TD(0);
len--;
@ -239,15 +209,16 @@ static void spi_sam_fast_rx(const struct device *dev, Spi *regs, const struct sp
while ((regs->SPI_SR & SPI_SR_TDRE) == 0) {
}
/* Load byte N+1 into the transmit register */
regs->SPI_TDR = SPI_TDR_TD(0);
len--;
/* Read byte N+0 from the receive register */
while ((regs->SPI_SR & SPI_SR_RDRF) == 0) {
}
*rx++ = (uint8_t)regs->SPI_RDR;
*rx = (uint8_t)regs->SPI_RDR;
rx++;
/* Load byte N+1 into the transmit register */
regs->SPI_TDR = SPI_TDR_TD(0);
len--;
}
/* Read the final incoming byte */
@ -255,164 +226,17 @@ static void spi_sam_fast_rx(const struct device *dev, Spi *regs, const struct sp
}
*rx = (uint8_t)regs->SPI_RDR;
spi_sam_finish(regs);
spi_spin_unlock(dev, key);
}
#ifdef CONFIG_SPI_SAM_DMA
static uint8_t tx_dummy;
static uint8_t rx_dummy;
static void dma_callback(const struct device *dma_dev, void *user_data,
uint32_t channel, int status)
{
ARG_UNUSED(dma_dev);
ARG_UNUSED(channel);
ARG_UNUSED(status);
struct k_sem *sem = user_data;
k_sem_give(sem);
}
/* DMA transceive path */
static int spi_sam_dma_txrx(const struct device *dev,
Spi *regs,
const struct spi_buf *tx_buf,
const struct spi_buf *rx_buf)
{
const struct spi_sam_config *drv_cfg = dev->config;
struct spi_sam_data *drv_data = dev->data;
int res = 0;
__ASSERT_NO_MSG(rx_buf != NULL || tx_buf != NULL);
/* If rx and tx are non-null, they must be the same length */
if (rx_buf != NULL && tx_buf != NULL) {
__ASSERT(tx_buf->len == rx_buf->len, "TX RX buffer lengths must match");
}
uint32_t len = tx_buf != NULL ? tx_buf->len : rx_buf->len;
struct dma_config rx_dma_cfg = {
.source_data_size = 1,
.dest_data_size = 1,
.block_count = 1,
.dma_slot = drv_cfg->dma_rx_perid,
.channel_direction = PERIPHERAL_TO_MEMORY,
.source_burst_length = 1,
.dest_burst_length = 1,
.complete_callback_en = true,
.error_callback_en = true,
.dma_callback = dma_callback,
.user_data = &drv_data->dma_sem,
};
uint32_t dest_address, dest_addr_adjust;
if (rx_buf != NULL) {
dest_address = (uint32_t)rx_buf->buf;
dest_addr_adjust = DMA_ADDR_ADJ_INCREMENT;
} else {
dest_address = (uint32_t)&rx_dummy;
dest_addr_adjust = DMA_ADDR_ADJ_NO_CHANGE;
}
struct dma_block_config rx_block_cfg = {
.dest_addr_adj = dest_addr_adjust,
.block_size = len,
.source_address = (uint32_t)&regs->SPI_RDR,
.dest_address = dest_address
};
rx_dma_cfg.head_block = &rx_block_cfg;
struct dma_config tx_dma_cfg = {
.source_data_size = 1,
.dest_data_size = 1,
.block_count = 1,
.dma_slot = drv_cfg->dma_tx_perid,
.channel_direction = MEMORY_TO_PERIPHERAL,
.source_burst_length = 1,
.dest_burst_length = 1,
.complete_callback_en = true,
.error_callback_en = true,
.dma_callback = dma_callback,
.user_data = &drv_data->dma_sem,
};
uint32_t source_address, source_addr_adjust;
if (tx_buf != NULL) {
source_address = (uint32_t)tx_buf->buf;
source_addr_adjust = DMA_ADDR_ADJ_INCREMENT;
} else {
source_address = (uint32_t)&tx_dummy;
source_addr_adjust = DMA_ADDR_ADJ_NO_CHANGE;
}
struct dma_block_config tx_block_cfg = {
.source_addr_adj = source_addr_adjust,
.block_size = len,
.source_address = source_address,
.dest_address = (uint32_t)&regs->SPI_TDR
};
tx_dma_cfg.head_block = &tx_block_cfg;
res = dma_config(drv_cfg->dma_dev, drv_cfg->dma_rx_channel, &rx_dma_cfg);
if (res != 0) {
LOG_ERR("failed to configure SPI DMA RX");
goto out;
}
res = dma_config(drv_cfg->dma_dev, drv_cfg->dma_tx_channel, &tx_dma_cfg);
if (res != 0) {
LOG_ERR("failed to configure SPI DMA TX");
goto out;
}
/* Clocking begins on tx, so start rx first */
res = dma_start(drv_cfg->dma_dev, drv_cfg->dma_rx_channel);
if (res != 0) {
LOG_ERR("failed to start SPI DMA RX");
goto out;
}
res = dma_start(drv_cfg->dma_dev, drv_cfg->dma_tx_channel);
if (res != 0) {
LOG_ERR("failed to start SPI DMA TX");
dma_stop(drv_cfg->dma_dev, drv_cfg->dma_rx_channel);
}
for (int i = 0; i < 2; i++) {
k_sem_take(&drv_data->dma_sem, K_FOREVER);
}
spi_sam_finish(regs);
out:
return res;
}
#endif /* CONFIG_SPI_SAM_DMA */
/* Fast path that writes and reads bufs of the same length */
static void spi_sam_fast_txrx(const struct device *dev, Spi *regs, const struct spi_buf *tx_buf,
const struct spi_buf *rx_buf)
static void spi_sam_fast_txrx(Spi *regs,
const uint8_t *tx_buf,
const uint8_t *rx_buf,
const uint32_t len)
{
k_spinlock_key_t key = spi_spin_lock(dev);
const uint8_t *tx = tx_buf->buf;
const uint8_t *txend = (uint8_t *)tx_buf->buf + tx_buf->len;
uint8_t *rx = rx_buf->buf;
size_t len = rx_buf->len;
const uint8_t *tx = tx_buf;
const uint8_t *txend = tx_buf + len;
uint8_t *rx = (uint8_t *)rx_buf;
if (len == 0) {
return;
@ -456,63 +280,243 @@ static void spi_sam_fast_txrx(const struct device *dev, Spi *regs, const struct
*rx = (uint8_t)regs->SPI_RDR;
}
#ifdef CONFIG_SPI_SAM_DMA
static __aligned(4) uint32_t tx_dummy;
static __aligned(4) uint32_t rx_dummy;
#ifdef CONFIG_SPI_RTIO
static void spi_sam_iodev_complete(const struct device *dev, int status);
#endif
static void dma_callback(const struct device *dma_dev, void *user_data,
uint32_t channel, int status)
{
ARG_UNUSED(dma_dev);
ARG_UNUSED(channel);
ARG_UNUSED(status);
const struct device *dev = user_data;
struct spi_sam_data *drv_data = dev->data;
#ifdef CONFIG_SPI_RTIO
if (drv_data->iodev_sqe != NULL) {
spi_sam_iodev_complete(dev, status);
return;
}
#endif
k_sem_give(&drv_data->dma_sem);
}
/* DMA transceive path */
static int spi_sam_dma_txrx(const struct device *dev,
Spi *regs,
const uint8_t *tx_buf,
const uint8_t *rx_buf,
const uint32_t len)
{
const struct spi_sam_config *drv_cfg = dev->config;
struct spi_sam_data *drv_data = dev->data;
#ifdef CONFIG_SPI_RTIO
bool blocking = drv_data->iodev_sqe == NULL;
#else
bool blocking = true;
#endif
int res = 0;
__ASSERT_NO_MSG(rx_buf != NULL || tx_buf != NULL);
struct dma_config rx_dma_cfg = {
.source_data_size = 1,
.dest_data_size = 1,
.block_count = 1,
.dma_slot = drv_cfg->dma_rx_perid,
.channel_direction = PERIPHERAL_TO_MEMORY,
.source_burst_length = 1,
.dest_burst_length = 1,
.complete_callback_en = true,
.error_callback_en = true,
.dma_callback = NULL,
.user_data = (void *)dev,
};
uint32_t dest_address, dest_addr_adjust;
if (rx_buf != NULL) {
dest_address = (uint32_t)rx_buf;
dest_addr_adjust = DMA_ADDR_ADJ_INCREMENT;
} else {
dest_address = (uint32_t)&rx_dummy;
dest_addr_adjust = DMA_ADDR_ADJ_NO_CHANGE;
}
struct dma_block_config rx_block_cfg = {
.dest_addr_adj = dest_addr_adjust,
.block_size = len,
.source_address = (uint32_t)&regs->SPI_RDR,
.dest_address = dest_address
};
rx_dma_cfg.head_block = &rx_block_cfg;
struct dma_config tx_dma_cfg = {
.source_data_size = 1,
.dest_data_size = 1,
.block_count = 1,
.dma_slot = drv_cfg->dma_tx_perid,
.channel_direction = MEMORY_TO_PERIPHERAL,
.source_burst_length = 1,
.dest_burst_length = 1,
.complete_callback_en = true,
.error_callback_en = true,
.dma_callback = dma_callback,
.user_data = (void *)dev,
};
uint32_t source_address, source_addr_adjust;
if (tx_buf != NULL) {
source_address = (uint32_t)tx_buf;
source_addr_adjust = DMA_ADDR_ADJ_INCREMENT;
} else {
LOG_INF("using tx dummy");
source_address = (uint32_t)&tx_dummy;
source_addr_adjust = DMA_ADDR_ADJ_NO_CHANGE;
}
struct dma_block_config tx_block_cfg = {
.source_addr_adj = source_addr_adjust,
.block_size = len,
.source_address = source_address,
.dest_address = (uint32_t)&regs->SPI_TDR
};
tx_dma_cfg.head_block = &tx_block_cfg;
res = dma_config(drv_cfg->dma_dev, drv_cfg->dma_rx_channel, &rx_dma_cfg);
if (res != 0) {
LOG_ERR("failed to configure SPI DMA RX");
goto out;
}
res = dma_config(drv_cfg->dma_dev, drv_cfg->dma_tx_channel, &tx_dma_cfg);
if (res != 0) {
LOG_ERR("failed to configure SPI DMA TX");
goto out;
}
/* Clocking begins on tx, so start rx first */
res = dma_start(drv_cfg->dma_dev, drv_cfg->dma_rx_channel);
if (res != 0) {
LOG_ERR("failed to start SPI DMA RX");
goto out;
}
res = dma_start(drv_cfg->dma_dev, drv_cfg->dma_tx_channel);
if (res != 0) {
LOG_ERR("failed to start SPI DMA TX");
dma_stop(drv_cfg->dma_dev, drv_cfg->dma_rx_channel);
}
/* Move up a level or wrap in branch when blocking */
if (blocking) {
k_sem_take(&drv_data->dma_sem, K_FOREVER);
spi_sam_finish(regs);
} else {
res = -EWOULDBLOCK;
}
out:
return res;
}
#endif /* CONFIG_SPI_SAM_DMA */
static inline int spi_sam_rx(const struct device *dev,
Spi *regs,
uint8_t *rx_buf,
uint32_t rx_buf_len)
{
k_spinlock_key_t key;
#ifdef CONFIG_SPI_SAM_DMA
const struct spi_sam_config *cfg = dev->config;
if (rx_buf_len < SAM_SPI_DMA_THRESHOLD || cfg->dma_dev == NULL) {
key = spi_spin_lock(dev);
spi_sam_fast_rx(regs, rx_buf, rx_buf_len);
} else {
return spi_sam_dma_txrx(dev, regs, NULL, rx_buf, rx_buf_len);
}
#else
key = spi_spin_lock(dev);
spi_sam_fast_rx(regs, rx_buf, rx_buf_len);
#endif
spi_sam_finish(regs);
spi_spin_unlock(dev, key);
return 0;
}
static inline void spi_sam_rx(const struct device *dev,
static inline int spi_sam_tx(const struct device *dev,
Spi *regs,
const struct spi_buf *rx)
const uint8_t *tx_buf,
uint32_t tx_buf_len)
{
k_spinlock_key_t key;
#ifdef CONFIG_SPI_SAM_DMA
const struct spi_sam_config *cfg = dev->config;
if (rx->len < SAM_SPI_DMA_THRESHOLD || cfg->dma_dev == NULL) {
spi_sam_fast_rx(dev, regs, rx);
if (tx_buf_len < SAM_SPI_DMA_THRESHOLD || cfg->dma_dev == NULL) {
key = spi_spin_lock(dev);
spi_sam_fast_tx(regs, tx_buf, tx_buf_len);
} else {
spi_sam_dma_txrx(dev, regs, NULL, rx);
return spi_sam_dma_txrx(dev, regs, tx_buf, NULL, tx_buf_len);
}
#else
spi_sam_fast_rx(dev, regs, rx);
key = spi_spin_lock(dev);
spi_sam_fast_tx(regs, tx_buf, tx_buf_len);
#endif
spi_sam_finish(regs);
spi_spin_unlock(dev, key);
return 0;
}
static inline void spi_sam_tx(const struct device *dev,
static inline int spi_sam_txrx(const struct device *dev,
Spi *regs,
const struct spi_buf *tx)
const uint8_t *tx_buf,
const uint8_t *rx_buf,
uint32_t buf_len)
{
k_spinlock_key_t key;
#ifdef CONFIG_SPI_SAM_DMA
const struct spi_sam_config *cfg = dev->config;
if (tx->len < SAM_SPI_DMA_THRESHOLD || cfg->dma_dev == NULL) {
spi_sam_fast_tx(dev, regs, tx);
if (buf_len < SAM_SPI_DMA_THRESHOLD || cfg->dma_dev == NULL) {
key = spi_spin_lock(dev);
spi_sam_fast_txrx(regs, tx_buf, rx_buf, buf_len);
} else {
spi_sam_dma_txrx(dev, regs, tx, NULL);
return spi_sam_dma_txrx(dev, regs, tx_buf, rx_buf, buf_len);
}
#else
spi_sam_fast_tx(dev, regs, tx);
key = spi_spin_lock(dev);
spi_sam_fast_txrx(regs, tx_buf, rx_buf, buf_len);
#endif
spi_sam_finish(regs);
spi_spin_unlock(dev, key);
return 0;
}
static inline void spi_sam_txrx(const struct device *dev,
Spi *regs,
const struct spi_buf *tx,
const struct spi_buf *rx)
{
#ifdef CONFIG_SPI_SAM_DMA
const struct spi_sam_config *cfg = dev->config;
if (tx->len < SAM_SPI_DMA_THRESHOLD || cfg->dma_dev == NULL) {
spi_sam_fast_txrx(dev, regs, tx, rx);
} else {
spi_sam_dma_txrx(dev, regs, tx, rx);
}
#else
spi_sam_fast_txrx(dev, regs, tx, rx);
#endif
}
#ifndef CONFIG_SPI_RTIO
/* Fast path where every overlapping tx and rx buffer is the same length */
static void spi_sam_fast_transceive(const struct device *dev,
@ -539,11 +543,13 @@ static void spi_sam_fast_transceive(const struct device *dev,
while (tx_count != 0 && rx_count != 0) {
if (tx->buf == NULL) {
spi_sam_rx(dev, regs, rx);
spi_sam_rx(dev, regs, rx->buf, rx->len);
} else if (rx->buf == NULL) {
spi_sam_tx(dev, regs, tx);
spi_sam_tx(dev, regs, tx->buf, tx->len);
} else if (rx->len == tx->len) {
spi_sam_txrx(dev, regs, tx->buf, rx->buf, rx->len);
} else {
spi_sam_txrx(dev, regs, tx, rx);
__ASSERT_NO_MSG("Invalid fast transceive configuration");
}
tx++;
@ -553,14 +559,49 @@ static void spi_sam_fast_transceive(const struct device *dev,
}
for (; tx_count != 0; tx_count--) {
spi_sam_tx(dev, regs, tx++);
spi_sam_tx(dev, regs, tx->buf, tx->len);
tx++;
}
for (; rx_count != 0; rx_count--) {
spi_sam_rx(dev, regs, rx++);
spi_sam_rx(dev, regs, rx->buf, rx->len);
rx++;
}
}
static bool spi_sam_transfer_ongoing(struct spi_sam_data *data)
{
return spi_context_tx_on(&data->ctx) || spi_context_rx_on(&data->ctx);
}
static void spi_sam_shift_master(Spi *regs, struct spi_sam_data *data)
{
uint8_t tx;
uint8_t rx;
if (spi_context_tx_buf_on(&data->ctx)) {
tx = *(uint8_t *)(data->ctx.tx_buf);
} else {
tx = 0U;
}
while ((regs->SPI_SR & SPI_SR_TDRE) == 0) {
}
regs->SPI_TDR = SPI_TDR_TD(tx);
spi_context_update_tx(&data->ctx, 1, 1);
while ((regs->SPI_SR & SPI_SR_RDRF) == 0) {
}
rx = (uint8_t)regs->SPI_RDR;
if (spi_context_rx_buf_on(&data->ctx)) {
*data->ctx.rx_buf = rx;
}
spi_context_update_rx(&data->ctx, 1, 1);
}
/* Returns true if the request is suitable for the fast
* path. Specifically, the bufs are a sequence of:
*
@ -604,18 +645,138 @@ static bool spi_sam_is_regular(const struct spi_buf_set *tx_bufs,
return true;
}
#else
static void spi_sam_iodev_complete(const struct device *dev, int status);
static void spi_sam_iodev_start(const struct device *dev)
{
const struct spi_sam_config *cfg = dev->config;
struct spi_sam_data *data = dev->data;
struct rtio_sqe *sqe = data->sqe;
int ret = 0;
switch (sqe->op) {
case RTIO_OP_RX:
ret = spi_sam_rx(dev, cfg->regs, sqe->buf, sqe->buf_len);
break;
case RTIO_OP_TX:
ret = spi_sam_tx(dev, cfg->regs, sqe->buf, sqe->buf_len);
break;
case RTIO_OP_TINY_TX:
ret = spi_sam_tx(dev, cfg->regs, sqe->tiny_buf, sqe->tiny_buf_len);
break;
case RTIO_OP_TXRX:
ret = spi_sam_txrx(dev, cfg->regs, sqe->tx_buf, sqe->rx_buf, sqe->txrx_buf_len);
break;
default:
LOG_ERR("Invalid op code %d for submission %p\n", sqe->op, (void *)sqe);
rtio_iodev_sqe_err(data->iodev_sqe, -EINVAL);
data->iodev_sqe = NULL;
data->sqe = NULL;
ret = 0;
}
if (ret == 0) {
spi_sam_iodev_complete(dev, 0);
}
}
static void spi_sam_iodev_next(const struct device *dev, bool completion)
{
struct spi_sam_data *data = dev->data;
k_spinlock_key_t key = spi_spin_lock(dev);
if (!completion && data->iodev_sqe != NULL) {
spi_spin_unlock(dev, key);
return;
}
struct rtio_mpsc_node *next = rtio_mpsc_pop(&data->iodev.iodev_sq);
if (next != NULL) {
struct rtio_iodev_sqe *next_sqe = CONTAINER_OF(next, struct rtio_iodev_sqe, q);
data->iodev_sqe = next_sqe;
data->sqe = (struct rtio_sqe *)next_sqe->sqe;
} else {
data->iodev_sqe = NULL;
data->sqe = NULL;
}
spi_spin_unlock(dev, key);
if (data->iodev_sqe != NULL) {
struct spi_dt_spec *spi_dt_spec = data->sqe->iodev->data;
struct spi_config *spi_cfg = &spi_dt_spec->config;
spi_sam_configure(dev, spi_cfg);
spi_context_cs_control(&data->ctx, true);
spi_sam_iodev_start(dev);
}
}
static void spi_sam_iodev_complete(const struct device *dev, int status)
{
struct spi_sam_data *data = dev->data;
if (data->sqe->flags & RTIO_SQE_TRANSACTION) {
data->sqe = rtio_spsc_next(data->iodev_sqe->r->sq, data->sqe);
spi_sam_iodev_start(dev);
} else {
struct rtio_iodev_sqe *iodev_sqe = data->iodev_sqe;
spi_context_cs_control(&data->ctx, false);
spi_sam_iodev_next(dev, true);
rtio_iodev_sqe_ok(iodev_sqe, status);
}
}
static void spi_sam_iodev_submit(const struct device *dev,
struct rtio_iodev_sqe *iodev_sqe)
{
struct spi_sam_data *data = dev->data;
rtio_mpsc_push(&data->iodev.iodev_sq, &iodev_sqe->q);
spi_sam_iodev_next(dev, false);
}
#endif
static int spi_sam_transceive(const struct device *dev,
const struct spi_config *config,
const struct spi_buf_set *tx_bufs,
const struct spi_buf_set *rx_bufs)
{
const struct spi_sam_config *cfg = dev->config;
struct spi_sam_data *data = dev->data;
Spi *regs = cfg->regs;
int err;
int err = 0;
spi_context_lock(&data->ctx, false, NULL, NULL, config);
#if CONFIG_SPI_RTIO
struct rtio_sqe *sqe;
struct rtio_cqe *cqe;
struct spi_dt_spec *dt_spec = &data->dt_spec;
dt_spec->config = *config;
sqe = spi_rtio_copy(data->r, &data->iodev, tx_bufs, rx_bufs);
if (sqe == NULL) {
err = -ENOMEM;
goto done;
}
/* Submit request and wait */
rtio_submit(data->r, 1);
cqe = rtio_cqe_consume(data->r);
err = cqe->result;
rtio_cqe_release(data->r);
#else
const struct spi_sam_config *cfg = dev->config;
err = spi_sam_configure(dev, config);
if (err != 0) {
goto done;
@ -623,23 +784,18 @@ static int spi_sam_transceive(const struct device *dev,
spi_context_cs_control(&data->ctx, true);
/* This driver special cases the common send only, receive
* only, and transmit then receive operations. This special
* casing is 4x faster than the spi_context() routines
* and allows the transmit and receive to be interleaved.
*/
if (spi_sam_is_regular(tx_bufs, rx_bufs)) {
spi_sam_fast_transceive(dev, config, tx_bufs, rx_bufs);
} else {
spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1);
do {
spi_sam_shift_master(regs, data);
spi_sam_shift_master(cfg->regs, data);
} while (spi_sam_transfer_ongoing(data));
}
spi_context_cs_control(&data->ctx, false);
#endif
done:
spi_context_release(&data->ctx, err);
return err;
@ -700,6 +856,13 @@ static int spi_sam_init(const struct device *dev)
k_sem_init(&data->dma_sem, 0, K_SEM_MAX_LIMIT);
#endif
#ifdef CONFIG_SPI_RTIO
data->dt_spec.bus = dev;
data->iodev.api = &spi_iodev_api;
data->iodev.data = &data->dt_spec;
rtio_mpsc_init(&data->iodev.iodev_sq);
#endif
spi_context_unlock_unconditionally(&data->ctx);
/* The device will be configured and enabled when transceive
@ -713,6 +876,9 @@ static const struct spi_driver_api spi_sam_driver_api = {
.transceive = spi_sam_transceive_sync,
#ifdef CONFIG_SPI_ASYNC
.transceive_async = spi_sam_transceive_async,
#endif
#ifdef CONFIG_SPI_RTIO
.iodev_submit = spi_sam_iodev_submit,
#endif
.release = spi_sam_release,
};
@ -739,13 +905,20 @@ static const struct spi_driver_api spi_sam_driver_api = {
COND_CODE_1(SPI_SAM_USE_DMA(n), (SPI_DMA_INIT(n)), ()) \
}
#define SPI_SAM_RTIO_DEFINE(n) \
RTIO_EXECUTOR_SIMPLE_DEFINE(spi_sam_exec_##n); \
RTIO_DEFINE(spi_sam_rtio_##n, (struct rtio_executor *)&spi_sam_exec_##n, \
CONFIG_SPI_SAM_RTIO_SQ_SIZE, 1)
#define SPI_SAM_DEVICE_INIT(n) \
PINCTRL_DT_INST_DEFINE(n); \
SPI_SAM_DEFINE_CONFIG(n); \
COND_CODE_1(CONFIG_SPI_RTIO, (SPI_SAM_RTIO_DEFINE(n)), ()); \
static struct spi_sam_data spi_sam_dev_data_##n = { \
SPI_CONTEXT_INIT_LOCK(spi_sam_dev_data_##n, ctx), \
SPI_CONTEXT_INIT_SYNC(spi_sam_dev_data_##n, ctx), \
SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(n), ctx) \
IF_ENABLED(CONFIG_SPI_RTIO, (.r = &spi_sam_rtio_##n)) \
}; \
DEVICE_DT_INST_DEFINE(n, &spi_sam_init, NULL, \
&spi_sam_dev_data_##n, \