timeouts: Port UART async API to the new timeout API

Port the API to the new paradigm as to avoid having to select
LEGACY_TIMEOUT_API.

Signed-off-by: Carles Cufi <carles.cufi@nordicsemi.no>
This commit is contained in:
Carles Cufi 2020-05-01 11:58:15 +02:00 committed by Carles Cufí
parent 9b04a99243
commit 9b096f40b6
5 changed files with 24 additions and 23 deletions

View file

@ -34,7 +34,6 @@ config SERIAL_SUPPORT_INTERRUPT
config UART_ASYNC_API
bool "Enable new asynchronous UART API [EXPERIMENTAL]"
depends on SERIAL_SUPPORT_ASYNC
select LEGACY_TIMEOUT_API
help
This option enables new asynchronous UART API.

View file

@ -443,7 +443,7 @@ static int uart_nrfx_tx_abort(struct device *dev)
return -EINVAL;
}
#if HW_FLOW_CONTROL
if (uart0_cb.tx_timeout != K_FOREVER) {
if (uart0_cb.tx_timeout != SYS_FOREVER_MS) {
k_delayed_work_cancel(&uart0_cb.tx_timeout_work);
}
#endif
@ -514,7 +514,7 @@ static int uart_nrfx_rx_disable(struct device *dev)
}
uart0_cb.rx_enabled = 0;
if (uart0_cb.rx_timeout != K_FOREVER) {
if (uart0_cb.rx_timeout != SYS_FOREVER_MS) {
k_delayed_work_cancel(&uart0_cb.rx_timeout_work);
}
nrf_uart_task_trigger(uart0_addr, NRF_UART_TASK_STOPRX);
@ -584,16 +584,16 @@ static void rx_isr(struct device *dev)
uart0_cb.rx_buffer[uart0_cb.rx_counter] =
nrf_uart_rxd_get(uart0_addr);
uart0_cb.rx_counter++;
if (uart0_cb.rx_timeout == K_NO_WAIT) {
if (uart0_cb.rx_timeout == 0) {
rx_rdy_evt();
} else if (uart0_cb.rx_timeout != K_FOREVER) {
} else if (uart0_cb.rx_timeout != SYS_FOREVER_MS) {
k_delayed_work_submit(&uart0_cb.rx_timeout_work,
uart0_cb.rx_timeout);
K_MSEC(uart0_cb.rx_timeout));
}
}
if (uart0_cb.rx_buffer_length == uart0_cb.rx_counter) {
if (uart0_cb.rx_timeout != K_FOREVER) {
if (uart0_cb.rx_timeout != SYS_FOREVER_MS) {
k_delayed_work_cancel(&uart0_cb.rx_timeout_work);
}
rx_rdy_evt();
@ -622,9 +622,9 @@ static void tx_isr(void)
if (uart0_cb.tx_counter < uart0_cb.tx_buffer_length &&
!uart0_cb.tx_abort) {
#if HW_FLOW_CONTROL
if (uart0_cb.tx_timeout != K_FOREVER) {
if (uart0_cb.tx_timeout != SYS_FOREVER_MS) {
k_delayed_work_submit(&uart0_cb.tx_timeout_work,
uart0_cb.tx_timeout);
K_MSEC(uart0_cb.tx_timeout));
}
#endif
nrf_uart_event_clear(uart0_addr, NRF_UART_EVENT_TXDRDY);
@ -635,7 +635,7 @@ static void tx_isr(void)
} else {
#if HW_FLOW_CONTROL
if (uart0_cb.tx_timeout != K_FOREVER) {
if (uart0_cb.tx_timeout != SYS_FOREVER_MS) {
k_delayed_work_cancel(&uart0_cb.tx_timeout_work);
}
#endif
@ -664,7 +664,7 @@ static void tx_isr(void)
static void error_isr(struct device *dev)
{
if (uart0_cb.rx_timeout != K_FOREVER) {
if (uart0_cb.rx_timeout != SYS_FOREVER_MS) {
k_delayed_work_cancel(&uart0_cb.rx_timeout_work);
}
nrf_uart_event_clear(uart0_addr, NRF_UART_EVENT_ERROR);
@ -741,7 +741,7 @@ static void tx_timeout(struct k_work *work)
{
struct uart_event evt;
if (uart0_cb.tx_timeout != K_FOREVER) {
if (uart0_cb.tx_timeout != SYS_FOREVER_MS) {
k_delayed_work_cancel(&uart0_cb.tx_timeout_work);
}
nrf_uart_task_trigger(uart0_addr, NRF_UART_TASK_STOPTX);

View file

@ -496,8 +496,8 @@ static int uarte_nrfx_tx(struct device *dev, const u8_t *buf, size_t len,
NRF_UARTE_INT_TXSTOPPED_MASK);
nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTTX);
if (data->uart_config.flow_ctrl == UART_CFG_FLOW_CTRL_RTS_CTS
&& timeout != K_FOREVER) {
k_timer_start(&data->async->tx_timeout_timer, timeout,
&& timeout != SYS_FOREVER_MS) {
k_timer_start(&data->async->tx_timeout_timer, K_MSEC(timeout),
K_NO_WAIT);
}
return 0;
@ -733,11 +733,11 @@ static void rxstarted_isr(struct device *dev)
.type = UART_RX_BUF_REQUEST,
};
user_callback(dev, &evt);
if (data->async->rx_timeout != K_FOREVER) {
if (data->async->rx_timeout != SYS_FOREVER_MS) {
data->async->rx_timeout_left = data->async->rx_timeout;
k_timer_start(&data->async->rx_timeout_timer,
data->async->rx_timeout_slab,
data->async->rx_timeout_slab);
K_MSEC(data->async->rx_timeout_slab),
K_MSEC(data->async->rx_timeout_slab));
}
}

View file

@ -354,7 +354,7 @@ static void uart_sam0_rx_timeout(struct k_work *work)
if (dev_data->rx_timeout_from_isr) {
dev_data->rx_timeout_from_isr = false;
k_delayed_work_submit(&dev_data->rx_timeout_work,
dev_data->rx_timeout_chunk);
K_MSEC(dev_data->rx_timeout_chunk));
irq_unlock(key);
return;
}
@ -375,7 +375,8 @@ static void uart_sam0_rx_timeout(struct k_work *work)
u32_t remaining = MIN(dev_data->rx_timeout_time - elapsed,
dev_data->rx_timeout_chunk);
k_delayed_work_submit(&dev_data->rx_timeout_work, remaining);
k_delayed_work_submit(&dev_data->rx_timeout_work,
K_MSEC(remaining));
}
irq_unlock(key);
@ -839,8 +840,9 @@ static int uart_sam0_tx(struct device *dev, const u8_t *buf, size_t len,
return retval;
}
if (timeout != K_FOREVER) {
k_delayed_work_submit(&dev_data->tx_timeout_work, timeout);
if (timeout != SYS_FOREVER_MS) {
k_delayed_work_submit(&dev_data->tx_timeout_work,
K_MSEC(timeout));
}
return dma_start(dev_data->dma, cfg->tx_dma_channel);

View file

@ -456,7 +456,7 @@ static inline int uart_callback_set(struct device *dev,
* @param buf Pointer to transmit buffer.
* @param len Length of transmit buffer.
* @param timeout Timeout in milliseconds. Valid only if flow control is
* enabled. SYS_FOREVER_MS disables timeout.
* enabled. @ref SYS_FOREVER_MS disables timeout.
*
* @retval -EBUSY There is already an ongoing transfer.
* @retval 0 If successful, negative errno code otherwise.
@ -504,7 +504,7 @@ static inline int z_impl_uart_tx_abort(struct device *dev)
* @param dev UART device structure.
* @param buf Pointer to receive buffer.
* @param len Buffer length.
* @param timeout Timeout in milliseconds. SYS_FOREVER_MS disables timeout.
* @param timeout Timeout in milliseconds. @ref SYS_FOREVER_MS disables timeout.
*
* @retval -EBUSY RX already in progress.
* @retval 0 If successful, negative errno code otherwise.