drivers: spi: spi_mcux_lpspi: Updated the Async api

Updated the Async API allowing the code path
for DMA while Async is enabled. Added common
DMA function that sets up both tx and rx dma
channels.

Signed-off-by: Emilio Benavente <emilio.benavente@nxp.com>
This commit is contained in:
Emilio Benavente 2023-09-19 07:19:50 -05:00 committed by Johan Hedberg
parent dde691bdd4
commit 17032a093d

View file

@ -248,6 +248,8 @@ static int spi_mcux_configure(const struct device *dev,
}
#ifdef CONFIG_SPI_MCUX_LPSPI_DMA
static int spi_mcux_dma_rxtx_load(const struct device *dev,
size_t *dma_size);
/* This function is executed in the interrupt context */
static void spi_mcux_dma_callback(const struct device *dev, void *arg,
@ -276,6 +278,26 @@ static void spi_mcux_dma_callback(const struct device *dev, void *arg,
data->status_flags |= SPI_MCUX_LPSPI_DMA_ERROR_FLAG;
}
}
#if CONFIG_SPI_ASYNC
if (data->ctx.asynchronous &&
((data->status_flags & SPI_MCUX_LPSPI_DMA_DONE_FLAG) ==
SPI_MCUX_LPSPI_DMA_DONE_FLAG)) {
/* Load dma blocks of equal length */
size_t dma_size = MIN(data->ctx.tx_len, data->ctx.rx_len);
if (dma_size == 0) {
dma_size = MAX(data->ctx.tx_len, data->ctx.rx_len);
}
spi_context_update_tx(&data->ctx, 1, dma_size);
spi_context_update_rx(&data->ctx, 1, dma_size);
if (data->ctx.tx_len == 0 && data->ctx.rx_len == 0) {
spi_context_complete(&data->ctx, spi_dev, 0);
}
return;
}
#endif
spi_context_complete(&data->ctx, spi_dev, 0);
}
@ -386,6 +408,45 @@ static int wait_dma_rx_tx_done(const struct device *dev)
}
}
static inline int spi_mcux_dma_rxtx_load(const struct device *dev,
size_t *dma_size)
{
struct spi_mcux_data *lpspi_data = dev->data;
int ret = 0;
/* Clear status flags */
lpspi_data->status_flags = 0U;
/* Load dma blocks of equal length */
*dma_size = MIN(lpspi_data->ctx.tx_len, lpspi_data->ctx.rx_len);
if (*dma_size == 0) {
*dma_size = MAX(lpspi_data->ctx.tx_len, lpspi_data->ctx.rx_len);
}
ret = spi_mcux_dma_tx_load(dev, lpspi_data->ctx.tx_buf,
*dma_size);
if (ret != 0) {
return ret;
}
ret = spi_mcux_dma_rx_load(dev, lpspi_data->ctx.rx_buf,
*dma_size);
if (ret != 0) {
return ret;
}
/* Start DMA */
ret = dma_start(lpspi_data->dma_tx.dma_dev,
lpspi_data->dma_tx.channel);
if (ret != 0) {
return ret;
}
ret = dma_start(lpspi_data->dma_rx.dma_dev,
lpspi_data->dma_rx.channel);
return ret;
}
static int transceive_dma(const struct device *dev,
const struct spi_config *spi_cfg,
const struct spi_buf_set *tx_bufs,
@ -400,49 +461,32 @@ static int transceive_dma(const struct device *dev,
int ret;
size_t dma_size;
if (!asynchronous) {
spi_context_lock(&data->ctx, asynchronous, cb, userdata, spi_cfg);
}
ret = spi_mcux_configure(dev, spi_cfg);
if (ret) {
goto out;
if (!asynchronous) {
spi_context_release(&data->ctx, ret);
}
return ret;
}
spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1);
spi_context_cs_control(&data->ctx, true);
/* DMA is fast enough watermarks are not required */
LPSPI_SetFifoWatermarks(base, 0U, 0U);
if (!asynchronous) {
spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1);
spi_context_cs_control(&data->ctx, true);
/* Send each spi buf via DMA, updating context as DMA completes */
while (data->ctx.rx_len > 0 || data->ctx.tx_len > 0) {
/* Clear status flags */
data->status_flags = 0U;
/* Load dma blocks of equal length */
dma_size = MIN(data->ctx.tx_len, data->ctx.rx_len);
if (dma_size == 0) {
dma_size = MAX(data->ctx.tx_len, data->ctx.rx_len);
}
ret = spi_mcux_dma_tx_load(dev, data->ctx.tx_buf, dma_size);
/* Load dma block */
ret = spi_mcux_dma_rxtx_load(dev, &dma_size);
if (ret != 0) {
goto out;
}
ret = spi_mcux_dma_rx_load(dev, data->ctx.rx_buf, dma_size);
if (ret != 0) {
goto out;
}
/* Start DMA */
ret = dma_start(data->dma_tx.dma_dev, data->dma_tx.channel);
if (ret != 0) {
goto out;
}
ret = dma_start(data->dma_rx.dma_dev, data->dma_rx.channel);
if (ret != 0) {
goto out;
}
/* Enable DMA Requests */
LPSPI_EnableDMA(base, kLPSPI_TxDmaEnable | kLPSPI_RxDmaEnable);
@ -451,7 +495,6 @@ static int transceive_dma(const struct device *dev,
if (ret != 0) {
goto out;
}
while ((LPSPI_GetStatusFlags(base) & kLPSPI_ModuleBusyFlag)) {
/* wait until module is idle */
}
@ -463,15 +506,29 @@ static int transceive_dma(const struct device *dev,
spi_context_update_tx(&data->ctx, 1, dma_size);
spi_context_update_rx(&data->ctx, 1, dma_size);
}
spi_context_cs_control(&data->ctx, false);
out:
spi_context_release(&data->ctx, ret);
}
#if CONFIG_SPI_ASYNC
else {
data->ctx.asynchronous = asynchronous;
data->ctx.callback = cb;
data->ctx.callback_data = userdata;
ret = spi_mcux_dma_rxtx_load(dev, &dma_size);
if (ret != 0) {
goto out;
}
/* Enable DMA Requests */
LPSPI_EnableDMA(base, kLPSPI_TxDmaEnable | kLPSPI_RxDmaEnable);
}
#endif
return ret;
}
#endif
static int transceive(const struct device *dev,
@ -530,7 +587,17 @@ static int spi_mcux_transceive_async(const struct device *dev,
spi_callback_t cb,
void *userdata)
{
#ifdef CONFIG_SPI_MCUX_LPSPI_DMA
struct spi_mcux_data *data = dev->data;
if (data->dma_rx.dma_dev && data->dma_tx.dma_dev) {
spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1);
}
return transceive_dma(dev, spi_cfg, tx_bufs, rx_bufs, true, cb, userdata);
#else
return transceive(dev, spi_cfg, tx_bufs, rx_bufs, true, cb, userdata);
#endif /* CONFIG_SPI_MCUX_LPSPI_DMA */
}
#endif /* CONFIG_SPI_ASYNC */