drivers: serial: Use microseconds to represent timeout

Updated uart_rx_enable() and uart_tx() to use timeout given
in microseconds. Previously argument was given in milliseconds.
However, there are cases when milliseconds granularity is not
enough and can significantly reduce a throughput, e.g. 1ms is
100 bytes at 1Mb.

Updated 4 drivers which implement asynchronous API. Updated
places where API was used.

Signed-off-by: Krzysztof Chruscinski <krzysztof.chruscinski@nordicsemi.no>
This commit is contained in:
Krzysztof Chruscinski 2021-10-01 15:47:40 +02:00 committed by Carles Cufí
parent cc69ca1a9b
commit c590b3545a
8 changed files with 64 additions and 58 deletions

View file

@ -448,7 +448,7 @@ static int uart_nrfx_tx_abort(const struct device *dev)
return -EINVAL; return -EINVAL;
} }
#if HW_FLOW_CONTROL_AVAILABLE #if HW_FLOW_CONTROL_AVAILABLE
if (uart0_cb.tx_timeout != SYS_FOREVER_MS) { if (uart0_cb.tx_timeout != SYS_FOREVER_US) {
k_timer_stop(&uart0_cb.tx_timeout_timer); k_timer_stop(&uart0_cb.tx_timeout_timer);
} }
#endif #endif
@ -527,7 +527,7 @@ static int uart_nrfx_rx_disable(const struct device *dev)
} }
uart0_cb.rx_enabled = 0; uart0_cb.rx_enabled = 0;
if (uart0_cb.rx_timeout != SYS_FOREVER_MS) { if (uart0_cb.rx_timeout != SYS_FOREVER_US) {
k_timer_stop(&uart0_cb.rx_timeout_timer); k_timer_stop(&uart0_cb.rx_timeout_timer);
} }
nrf_uart_task_trigger(uart0_addr, NRF_UART_TASK_STOPRX); nrf_uart_task_trigger(uart0_addr, NRF_UART_TASK_STOPRX);
@ -599,15 +599,15 @@ static void rx_isr(const struct device *dev)
uart0_cb.rx_counter++; uart0_cb.rx_counter++;
if (uart0_cb.rx_timeout == 0) { if (uart0_cb.rx_timeout == 0) {
rx_rdy_evt(dev); rx_rdy_evt(dev);
} else if (uart0_cb.rx_timeout != SYS_FOREVER_MS) { } else if (uart0_cb.rx_timeout != SYS_FOREVER_US) {
k_timer_start(&uart0_cb.rx_timeout_timer, k_timer_start(&uart0_cb.rx_timeout_timer,
K_MSEC(uart0_cb.rx_timeout), K_USEC(uart0_cb.rx_timeout),
K_NO_WAIT); K_NO_WAIT);
} }
} }
if (uart0_cb.rx_buffer_length == uart0_cb.rx_counter) { if (uart0_cb.rx_buffer_length == uart0_cb.rx_counter) {
if (uart0_cb.rx_timeout != SYS_FOREVER_MS) { if (uart0_cb.rx_timeout != SYS_FOREVER_US) {
k_timer_stop(&uart0_cb.rx_timeout_timer); k_timer_stop(&uart0_cb.rx_timeout_timer);
} }
rx_rdy_evt(dev); rx_rdy_evt(dev);
@ -643,9 +643,9 @@ static void tx_isr(const struct device *dev)
if (uart0_cb.tx_counter < uart0_cb.tx_buffer_length && if (uart0_cb.tx_counter < uart0_cb.tx_buffer_length &&
!uart0_cb.tx_abort) { !uart0_cb.tx_abort) {
#if HW_FLOW_CONTROL_AVAILABLE #if HW_FLOW_CONTROL_AVAILABLE
if (uart0_cb.tx_timeout != SYS_FOREVER_MS) { if (uart0_cb.tx_timeout != SYS_FOREVER_US) {
k_timer_start(&uart0_cb.tx_timeout_timer, k_timer_start(&uart0_cb.tx_timeout_timer,
K_MSEC(uart0_cb.tx_timeout), K_USEC(uart0_cb.tx_timeout),
K_NO_WAIT); K_NO_WAIT);
} }
#endif #endif
@ -657,7 +657,7 @@ static void tx_isr(const struct device *dev)
} else { } else {
#if HW_FLOW_CONTROL_AVAILABLE #if HW_FLOW_CONTROL_AVAILABLE
if (uart0_cb.tx_timeout != SYS_FOREVER_MS) { if (uart0_cb.tx_timeout != SYS_FOREVER_US) {
k_timer_stop(&uart0_cb.tx_timeout_timer); k_timer_stop(&uart0_cb.tx_timeout_timer);
} }
#endif #endif
@ -686,7 +686,7 @@ static void tx_isr(const struct device *dev)
static void error_isr(const struct device *dev) static void error_isr(const struct device *dev)
{ {
if (uart0_cb.rx_timeout != SYS_FOREVER_MS) { if (uart0_cb.rx_timeout != SYS_FOREVER_US) {
k_timer_stop(&uart0_cb.rx_timeout_timer); k_timer_stop(&uart0_cb.rx_timeout_timer);
} }
nrf_uart_event_clear(uart0_addr, NRF_UART_EVENT_ERROR); nrf_uart_event_clear(uart0_addr, NRF_UART_EVENT_ERROR);
@ -766,7 +766,7 @@ static void tx_timeout(struct k_timer *timer)
{ {
struct uart_event evt; struct uart_event evt;
if (uart0_cb.tx_timeout != SYS_FOREVER_MS) { if (uart0_cb.tx_timeout != SYS_FOREVER_US) {
k_timer_stop(&uart0_cb.tx_timeout_timer); k_timer_stop(&uart0_cb.tx_timeout_timer);
} }
nrf_uart_task_trigger(uart0_addr, NRF_UART_TASK_STOPTX); nrf_uart_task_trigger(uart0_addr, NRF_UART_TASK_STOPTX);

View file

@ -725,8 +725,8 @@ static int uarte_nrfx_tx(const struct device *dev, const uint8_t *buf,
irq_unlock(key); irq_unlock(key);
if (data->uart_config.flow_ctrl == UART_CFG_FLOW_CTRL_RTS_CTS if (data->uart_config.flow_ctrl == UART_CFG_FLOW_CTRL_RTS_CTS
&& timeout != SYS_FOREVER_MS) { && timeout != SYS_FOREVER_US) {
k_timer_start(&data->async->tx_timeout_timer, K_MSEC(timeout), k_timer_start(&data->async->tx_timeout_timer, K_USEC(timeout),
K_NO_WAIT); K_NO_WAIT);
} }
return 0; return 0;
@ -806,9 +806,15 @@ static int uarte_nrfx_rx_enable(const struct device *dev, uint8_t *buf,
} }
data->async->rx_timeout = timeout; data->async->rx_timeout = timeout;
/* Set minimum interval to 3 RTC ticks. 3 is used due to RTC limitation
* which cannot set timeout for next tick. Assuming delay in processing
* 3 instead of 2 is used. Note that lower value would work in a similar
* way but timeouts would always occur later than expected, most likely
* after ~3 ticks.
*/
data->async->rx_timeout_slab = data->async->rx_timeout_slab =
MAX(timeout / RX_TIMEOUT_DIV, MAX(timeout / RX_TIMEOUT_DIV,
NRFX_CEIL_DIV(1000, CONFIG_SYS_CLOCK_TICKS_PER_SEC)); NRFX_CEIL_DIV(3 * 1000000, CONFIG_SYS_CLOCK_TICKS_PER_SEC));
data->async->rx_buf = buf; data->async->rx_buf = buf;
data->async->rx_buf_len = len; data->async->rx_buf_len = len;
@ -926,7 +932,7 @@ static void tx_timeout(struct k_timer *timer)
/** /**
* Whole timeout is divided by RX_TIMEOUT_DIV into smaller units, rx_timeout * Whole timeout is divided by RX_TIMEOUT_DIV into smaller units, rx_timeout
* is executed periodically every rx_timeout_slab ms. If between executions * is executed periodically every rx_timeout_slab us. If between executions
* data was received, then we start counting down time from start, if not, then * data was received, then we start counting down time from start, if not, then
* we subtract rx_timeout_slab from rx_timeout_left. * we subtract rx_timeout_slab from rx_timeout_left.
* If rx_timeout_left is less than rx_timeout_slab it means that receiving has * If rx_timeout_left is less than rx_timeout_slab it means that receiving has
@ -996,7 +1002,7 @@ static void rx_timeout(struct k_timer *timer)
if (clipped || if (clipped ||
(data->async->rx_timeout_left (data->async->rx_timeout_left
< data->async->rx_timeout_slab)) { < data->async->rx_timeout_slab)) {
/* rx_timeout ms elapsed since last receiving */ /* rx_timeout us elapsed since last receiving */
notify_uart_rx_rdy(dev, len); notify_uart_rx_rdy(dev, len);
data->async->rx_offset += len; data->async->rx_offset += len;
data->async->rx_total_user_byte_cnt += len; data->async->rx_total_user_byte_cnt += len;
@ -1044,11 +1050,11 @@ static void rxstarted_isr(const struct device *dev)
.type = UART_RX_BUF_REQUEST, .type = UART_RX_BUF_REQUEST,
}; };
user_callback(dev, &evt); user_callback(dev, &evt);
if (data->async->rx_timeout != SYS_FOREVER_MS) { if (data->async->rx_timeout != SYS_FOREVER_US) {
data->async->rx_timeout_left = data->async->rx_timeout; data->async->rx_timeout_left = data->async->rx_timeout;
k_timer_start(&data->async->rx_timeout_timer, k_timer_start(&data->async->rx_timeout_timer,
K_MSEC(data->async->rx_timeout_slab), K_USEC(data->async->rx_timeout_slab),
K_MSEC(data->async->rx_timeout_slab)); K_USEC(data->async->rx_timeout_slab));
} }
} }

View file

@ -283,7 +283,7 @@ static void uart_sam0_dma_rx_done(const struct device *dma_dev, void *arg,
* reception. This also catches the case of DMA completion during * reception. This also catches the case of DMA completion during
* timeout handling. * timeout handling.
*/ */
if (dev_data->rx_timeout_time != SYS_FOREVER_MS) { if (dev_data->rx_timeout_time != SYS_FOREVER_US) {
dev_data->rx_waiting_for_irq = true; dev_data->rx_waiting_for_irq = true;
regs->INTENSET.reg = SERCOM_USART_INTENSET_RXC; regs->INTENSET.reg = SERCOM_USART_INTENSET_RXC;
irq_unlock(key); irq_unlock(key);
@ -356,7 +356,7 @@ static void uart_sam0_rx_timeout(struct k_work *work)
if (dev_data->rx_timeout_from_isr) { if (dev_data->rx_timeout_from_isr) {
dev_data->rx_timeout_from_isr = false; dev_data->rx_timeout_from_isr = false;
k_work_reschedule(&dev_data->rx_timeout_work, k_work_reschedule(&dev_data->rx_timeout_work,
K_MSEC(dev_data->rx_timeout_chunk)); K_USEC(dev_data->rx_timeout_chunk));
irq_unlock(key); irq_unlock(key);
return; return;
} }
@ -378,7 +378,7 @@ static void uart_sam0_rx_timeout(struct k_work *work)
dev_data->rx_timeout_chunk); dev_data->rx_timeout_chunk);
k_work_reschedule(&dev_data->rx_timeout_work, k_work_reschedule(&dev_data->rx_timeout_work,
K_MSEC(remaining)); K_USEC(remaining));
} }
irq_unlock(key); irq_unlock(key);
@ -755,11 +755,11 @@ static void uart_sam0_isr(const struct device *dev)
* If we have a timeout, restart the time remaining whenever * If we have a timeout, restart the time remaining whenever
* we see data. * we see data.
*/ */
if (dev_data->rx_timeout_time != SYS_FOREVER_MS) { if (dev_data->rx_timeout_time != SYS_FOREVER_US) {
dev_data->rx_timeout_from_isr = true; dev_data->rx_timeout_from_isr = true;
dev_data->rx_timeout_start = k_uptime_get_32(); dev_data->rx_timeout_start = k_uptime_get_32();
k_work_reschedule(&dev_data->rx_timeout_work, k_work_reschedule(&dev_data->rx_timeout_work,
K_MSEC(dev_data->rx_timeout_chunk)); K_USEC(dev_data->rx_timeout_chunk));
} }
/* DMA will read the currently ready byte out */ /* DMA will read the currently ready byte out */
@ -945,9 +945,9 @@ static int uart_sam0_tx(const struct device *dev, const uint8_t *buf,
return retval; return retval;
} }
if (timeout != SYS_FOREVER_MS) { if (timeout != SYS_FOREVER_US) {
k_work_reschedule(&dev_data->tx_timeout_work, k_work_reschedule(&dev_data->tx_timeout_work,
K_MSEC(timeout)); K_USEC(timeout));
} }
return dma_start(cfg->dma_dev, cfg->tx_dma_channel); return dma_start(cfg->dma_dev, cfg->tx_dma_channel);

View file

@ -808,10 +808,10 @@ static inline void async_evt_rx_buf_release(struct uart_stm32_data *data)
static inline void async_timer_start(struct k_work_delayable *work, static inline void async_timer_start(struct k_work_delayable *work,
int32_t timeout) int32_t timeout)
{ {
if ((timeout != SYS_FOREVER_MS) && (timeout != 0)) { if ((timeout != SYS_FOREVER_US) && (timeout != 0)) {
/* start timer */ /* start timer */
LOG_DBG("async timer started for %d ms", timeout); LOG_DBG("async timer started for %d us", timeout);
k_work_reschedule(work, K_MSEC(timeout)); k_work_reschedule(work, K_USEC(timeout));
} }
} }

View file

@ -486,8 +486,8 @@ static inline int uart_callback_set(const struct device *dev,
* @param dev UART device structure. * @param dev UART device structure.
* @param buf Pointer to transmit buffer. * @param buf Pointer to transmit buffer.
* @param len Length of transmit buffer. * @param len Length of transmit buffer.
* @param timeout Timeout in milliseconds. Valid only if flow control is * @param timeout Timeout in microseconds. Valid only if flow control is
* enabled. @ref SYS_FOREVER_MS disables timeout. * enabled. @ref SYS_FOREVER_US disables timeout.
* *
* @retval -ENOTSUP If not supported. * @retval -ENOTSUP If not supported.
* @retval -EBUSY There is already an ongoing transfer. * @retval -EBUSY There is already an ongoing transfer.
@ -550,7 +550,7 @@ static inline int z_impl_uart_tx_abort(const struct device *dev)
* @param len Buffer length. * @param len Buffer length.
* @param timeout Inactivity period after receiving at least a byte which * @param timeout Inactivity period after receiving at least a byte which
* triggers @ref uart_event_type::UART_RX_RDY event. Given in * triggers @ref uart_event_type::UART_RX_RDY event. Given in
* milliseconds. @ref SYS_FOREVER_MS disables timeout. See * microseconds. @ref SYS_FOREVER_US disables timeout. See
* @ref uart_event_type for details. * @ref uart_event_type for details.
* *
* @retval -ENOTSUP If not supported. * @retval -ENOTSUP If not supported.

View file

@ -73,11 +73,11 @@ void test_single_read(void)
zassert_not_equal(memcmp(tx_buf, rx_buf, 5), 0, zassert_not_equal(memcmp(tx_buf, rx_buf, 5), 0,
"Initial buffer check failed"); "Initial buffer check failed");
uart_rx_enable(uart_dev, rx_buf, 10, 50); uart_rx_enable(uart_dev, rx_buf, 10, 50 * USEC_PER_MSEC);
zassert_equal(k_sem_take(&rx_rdy, K_MSEC(100)), -EAGAIN, zassert_equal(k_sem_take(&rx_rdy, K_MSEC(100)), -EAGAIN,
"RX_RDY not expected at this point"); "RX_RDY not expected at this point");
uart_tx(uart_dev, tx_buf, sizeof(tx_buf), 100); uart_tx(uart_dev, tx_buf, sizeof(tx_buf), 100 * USEC_PER_MSEC);
zassert_equal(k_sem_take(&tx_done, K_MSEC(100)), 0, "TX_DONE timeout"); zassert_equal(k_sem_take(&tx_done, K_MSEC(100)), 0, "TX_DONE timeout");
zassert_equal(k_sem_take(&rx_rdy, K_MSEC(100)), 0, "RX_RDY timeout"); zassert_equal(k_sem_take(&rx_rdy, K_MSEC(100)), 0, "RX_RDY timeout");
zassert_equal(k_sem_take(&rx_rdy, K_MSEC(100)), -EAGAIN, zassert_equal(k_sem_take(&rx_rdy, K_MSEC(100)), -EAGAIN,
@ -86,7 +86,7 @@ void test_single_read(void)
zassert_equal(memcmp(tx_buf, rx_buf, 5), 0, "Buffers not equal"); zassert_equal(memcmp(tx_buf, rx_buf, 5), 0, "Buffers not equal");
zassert_not_equal(memcmp(tx_buf, rx_buf+5, 5), 0, "Buffers not equal"); zassert_not_equal(memcmp(tx_buf, rx_buf+5, 5), 0, "Buffers not equal");
uart_tx(uart_dev, tx_buf, sizeof(tx_buf), 100); uart_tx(uart_dev, tx_buf, sizeof(tx_buf), 100 * USEC_PER_MSEC);
zassert_equal(k_sem_take(&tx_done, K_MSEC(100)), 0, "TX_DONE timeout"); zassert_equal(k_sem_take(&tx_done, K_MSEC(100)), 0, "TX_DONE timeout");
zassert_equal(k_sem_take(&rx_rdy, K_MSEC(100)), 0, "RX_RDY timeout"); zassert_equal(k_sem_take(&rx_rdy, K_MSEC(100)), 0, "RX_RDY timeout");
zassert_equal(k_sem_take(&rx_buf_released, K_MSEC(100)), zassert_equal(k_sem_take(&rx_buf_released, K_MSEC(100)),
@ -151,14 +151,14 @@ void test_chained_read(void)
{ {
uint8_t tx_buf[10]; uint8_t tx_buf[10];
uart_rx_enable(uart_dev, chained_read_buf0, 10, 50); uart_rx_enable(uart_dev, chained_read_buf0, 10, 50 * USEC_PER_MSEC);
for (int i = 0; i < 6; i++) { for (int i = 0; i < 6; i++) {
zassert_not_equal(k_sem_take(&rx_disabled, K_MSEC(10)), zassert_not_equal(k_sem_take(&rx_disabled, K_MSEC(10)),
0, 0,
"RX_DISABLED occurred"); "RX_DISABLED occurred");
snprintf(tx_buf, sizeof(tx_buf), "Message %d", i); snprintf(tx_buf, sizeof(tx_buf), "Message %d", i);
uart_tx(uart_dev, tx_buf, sizeof(tx_buf), 100); uart_tx(uart_dev, tx_buf, sizeof(tx_buf), 100 * USEC_PER_MSEC);
zassert_equal(k_sem_take(&tx_done, K_MSEC(100)), 0, zassert_equal(k_sem_take(&tx_done, K_MSEC(100)), 0,
"TX_DONE timeout"); "TX_DONE timeout");
zassert_equal(k_sem_take(&rx_rdy, K_MSEC(1000)), 0, zassert_equal(k_sem_take(&rx_rdy, K_MSEC(1000)), 0,
@ -217,13 +217,13 @@ void test_double_buffer(void)
zassert_equal(uart_rx_enable(uart_dev, zassert_equal(uart_rx_enable(uart_dev,
double_buffer[0], double_buffer[0],
sizeof(double_buffer[0]), sizeof(double_buffer[0]),
50), 50 * USEC_PER_MSEC),
0, 0,
"Failed to enable receiving"); "Failed to enable receiving");
for (int i = 0; i < 100; i++) { for (int i = 0; i < 100; i++) {
snprintf(tx_buf, sizeof(tx_buf), "%03d", i); snprintf(tx_buf, sizeof(tx_buf), "%03d", i);
uart_tx(uart_dev, tx_buf, sizeof(tx_buf), 100); uart_tx(uart_dev, tx_buf, sizeof(tx_buf), 100 * USEC_PER_MSEC);
zassert_equal(k_sem_take(&tx_done, K_MSEC(100)), 0, zassert_equal(k_sem_take(&tx_done, K_MSEC(100)), 0,
"TX_DONE timeout"); "TX_DONE timeout");
zassert_equal(k_sem_take(&rx_rdy, K_MSEC(100)), 0, zassert_equal(k_sem_take(&rx_rdy, K_MSEC(100)), 0,
@ -285,14 +285,14 @@ void test_read_abort(void)
memset(rx_buf, 0, sizeof(rx_buf)); memset(rx_buf, 0, sizeof(rx_buf));
memset(tx_buf, 1, sizeof(tx_buf)); memset(tx_buf, 1, sizeof(tx_buf));
uart_rx_enable(uart_dev, rx_buf, sizeof(rx_buf), 50); uart_rx_enable(uart_dev, rx_buf, sizeof(rx_buf), 50 * USEC_PER_MSEC);
uart_tx(uart_dev, tx_buf, 5, 100); uart_tx(uart_dev, tx_buf, 5, 100 * USEC_PER_MSEC);
zassert_equal(k_sem_take(&tx_done, K_MSEC(100)), 0, "TX_DONE timeout"); zassert_equal(k_sem_take(&tx_done, K_MSEC(100)), 0, "TX_DONE timeout");
zassert_equal(k_sem_take(&rx_rdy, K_MSEC(100)), 0, "RX_RDY timeout"); zassert_equal(k_sem_take(&rx_rdy, K_MSEC(100)), 0, "RX_RDY timeout");
zassert_equal(memcmp(tx_buf, rx_buf, 5), 0, "Buffers not equal"); zassert_equal(memcmp(tx_buf, rx_buf, 5), 0, "Buffers not equal");
uart_tx(uart_dev, tx_buf, 95, 100); uart_tx(uart_dev, tx_buf, 95, 100 * USEC_PER_MSEC);
/* Wait for at least one character. RX_RDY event will be generated only /* Wait for at least one character. RX_RDY event will be generated only
* if there is pending data. * if there is pending data.
@ -309,7 +309,7 @@ void test_read_abort(void)
/* Read out possible other RX bytes /* Read out possible other RX bytes
* that may affect following test on RX * that may affect following test on RX
*/ */
uart_rx_enable(uart_dev, rx_buf, sizeof(rx_buf), 50); uart_rx_enable(uart_dev, rx_buf, sizeof(rx_buf), 50 * USEC_PER_MSEC);
while (k_sem_take(&rx_rdy, K_MSEC(1000)) != -EAGAIN) while (k_sem_take(&rx_rdy, K_MSEC(1000)) != -EAGAIN)
; ;
uart_rx_disable(uart_dev); uart_rx_disable(uart_dev);
@ -359,14 +359,14 @@ void test_write_abort(void)
memset(rx_buf, 0, sizeof(rx_buf)); memset(rx_buf, 0, sizeof(rx_buf));
memset(tx_buf, 1, sizeof(tx_buf)); memset(tx_buf, 1, sizeof(tx_buf));
uart_rx_enable(uart_dev, rx_buf, sizeof(rx_buf), 50); uart_rx_enable(uart_dev, rx_buf, sizeof(rx_buf), 50 * USEC_PER_MSEC);
uart_tx(uart_dev, tx_buf, 5, 100); uart_tx(uart_dev, tx_buf, 5, 100 * USEC_PER_MSEC);
zassert_equal(k_sem_take(&tx_done, K_MSEC(100)), 0, "TX_DONE timeout"); zassert_equal(k_sem_take(&tx_done, K_MSEC(100)), 0, "TX_DONE timeout");
zassert_equal(k_sem_take(&rx_rdy, K_MSEC(100)), 0, "RX_RDY timeout"); zassert_equal(k_sem_take(&rx_rdy, K_MSEC(100)), 0, "RX_RDY timeout");
zassert_equal(memcmp(tx_buf, rx_buf, 5), 0, "Buffers not equal"); zassert_equal(memcmp(tx_buf, rx_buf, 5), 0, "Buffers not equal");
uart_tx(uart_dev, tx_buf, 95, 100); uart_tx(uart_dev, tx_buf, 95, 100 * USEC_PER_MSEC);
uart_tx_abort(uart_dev); uart_tx_abort(uart_dev);
zassert_equal(k_sem_take(&tx_aborted, K_MSEC(100)), 0, zassert_equal(k_sem_take(&tx_aborted, K_MSEC(100)), 0,
"TX_ABORTED timeout"); "TX_ABORTED timeout");
@ -425,16 +425,16 @@ void test_forever_timeout(void)
memset(rx_buf, 0, sizeof(rx_buf)); memset(rx_buf, 0, sizeof(rx_buf));
memset(tx_buf, 1, sizeof(tx_buf)); memset(tx_buf, 1, sizeof(tx_buf));
uart_rx_enable(uart_dev, rx_buf, sizeof(rx_buf), SYS_FOREVER_MS); uart_rx_enable(uart_dev, rx_buf, sizeof(rx_buf), SYS_FOREVER_US);
uart_tx(uart_dev, tx_buf, 5, SYS_FOREVER_MS); uart_tx(uart_dev, tx_buf, 5, SYS_FOREVER_US);
zassert_not_equal(k_sem_take(&tx_aborted, K_MSEC(1000)), 0, zassert_not_equal(k_sem_take(&tx_aborted, K_MSEC(1000)), 0,
"TX_ABORTED timeout"); "TX_ABORTED timeout");
zassert_equal(k_sem_take(&tx_done, K_MSEC(100)), 0, "TX_DONE timeout"); zassert_equal(k_sem_take(&tx_done, K_MSEC(100)), 0, "TX_DONE timeout");
zassert_not_equal(k_sem_take(&rx_rdy, K_MSEC(1000)), 0, zassert_not_equal(k_sem_take(&rx_rdy, K_MSEC(1000)), 0,
"RX_RDY timeout"); "RX_RDY timeout");
uart_tx(uart_dev, tx_buf, 95, SYS_FOREVER_MS); uart_tx(uart_dev, tx_buf, 95, SYS_FOREVER_US);
zassert_not_equal(k_sem_take(&tx_aborted, K_MSEC(1000)), 0, zassert_not_equal(k_sem_take(&tx_aborted, K_MSEC(1000)), 0,
"TX_ABORTED timeout"); "TX_ABORTED timeout");
@ -463,7 +463,7 @@ void test_chained_write_callback(const struct device *uart_dev,
switch (evt->type) { switch (evt->type) {
case UART_TX_DONE: case UART_TX_DONE:
if (chained_write_next_buf) { if (chained_write_next_buf) {
uart_tx(uart_dev, chained_write_tx_bufs[1], 10, 100); uart_tx(uart_dev, chained_write_tx_bufs[1], 10, 100 * USEC_PER_MSEC);
chained_write_next_buf = false; chained_write_next_buf = false;
} }
tx_sent = 1; tx_sent = 1;
@ -499,9 +499,9 @@ void test_chained_write(void)
memset(rx_buf, 0, sizeof(rx_buf)); memset(rx_buf, 0, sizeof(rx_buf));
uart_rx_enable(uart_dev, rx_buf, sizeof(rx_buf), 50); uart_rx_enable(uart_dev, rx_buf, sizeof(rx_buf), 50 * USEC_PER_MSEC);
uart_tx(uart_dev, chained_write_tx_bufs[0], 10, 100); uart_tx(uart_dev, chained_write_tx_bufs[0], 10, 100 * USEC_PER_MSEC);
zassert_equal(k_sem_take(&tx_done, K_MSEC(100)), 0, "TX_DONE timeout"); zassert_equal(k_sem_take(&tx_done, K_MSEC(100)), 0, "TX_DONE timeout");
zassert_equal(k_sem_take(&tx_done, K_MSEC(100)), 0, "TX_DONE timeout"); zassert_equal(k_sem_take(&tx_done, K_MSEC(100)), 0, "TX_DONE timeout");
zassert_equal(chained_write_next_buf, false, "Sent no message"); zassert_equal(chained_write_next_buf, false, "Sent no message");
@ -573,9 +573,9 @@ void test_long_buffers(void)
memset(long_rx_buf, 0, sizeof(long_rx_buf)); memset(long_rx_buf, 0, sizeof(long_rx_buf));
memset(long_tx_buf, 1, sizeof(long_tx_buf)); memset(long_tx_buf, 1, sizeof(long_tx_buf));
uart_rx_enable(uart_dev, long_rx_buf, sizeof(long_rx_buf), 10); uart_rx_enable(uart_dev, long_rx_buf, sizeof(long_rx_buf), 10 * USEC_PER_MSEC);
uart_tx(uart_dev, long_tx_buf, 500, 200); uart_tx(uart_dev, long_tx_buf, 500, 200 * USEC_PER_MSEC);
zassert_equal(k_sem_take(&tx_done, K_MSEC(200)), 0, "TX_DONE timeout"); zassert_equal(k_sem_take(&tx_done, K_MSEC(200)), 0, "TX_DONE timeout");
zassert_equal(k_sem_take(&rx_rdy, K_MSEC(200)), 0, "RX_RDY timeout"); zassert_equal(k_sem_take(&rx_rdy, K_MSEC(200)), 0, "RX_RDY timeout");
zassert_equal(long_received[0], 500, "Wrong number of bytes received."); zassert_equal(long_received[0], 500, "Wrong number of bytes received.");
@ -584,7 +584,7 @@ void test_long_buffers(void)
"Buffers not equal"); "Buffers not equal");
evt_num = 0; evt_num = 0;
uart_tx(uart_dev, long_tx_buf, 1000, 200); uart_tx(uart_dev, long_tx_buf, 1000, 200 * USEC_PER_MSEC);
zassert_equal(k_sem_take(&tx_done, K_MSEC(200)), 0, "TX_DONE timeout"); zassert_equal(k_sem_take(&tx_done, K_MSEC(200)), 0, "TX_DONE timeout");
zassert_equal(k_sem_take(&rx_rdy, K_MSEC(200)), 0, "RX_RDY timeout"); zassert_equal(k_sem_take(&rx_rdy, K_MSEC(200)), 0, "RX_RDY timeout");
zassert_equal(k_sem_take(&rx_rdy, K_MSEC(200)), 0, "RX_RDY timeout"); zassert_equal(k_sem_take(&rx_rdy, K_MSEC(200)), 0, "RX_RDY timeout");

View file

@ -89,7 +89,7 @@ static void counter_top_handler(const struct device *dev, void *user_data)
int err; int err;
err = uart_rx_enable(uart_dev, async_rx_buf, err = uart_rx_enable(uart_dev, async_rx_buf,
sizeof(async_rx_buf), 1); sizeof(async_rx_buf), 1 * USEC_PER_MSEC);
zassert_true(err >= 0, NULL); zassert_true(err >= 0, NULL);
async_rx_enabled = true; async_rx_enabled = true;
} else if (int_driven) { } else if (int_driven) {
@ -248,7 +248,7 @@ static void int_async_thread_func(void *p_data, void *base, void *range)
buf = &int_async_data->buf[data->cnt & 0xF]; buf = &int_async_data->buf[data->cnt & 0xF];
data->cnt++; data->cnt++;
err = uart_tx(uart_dev, buf, 1, 1000); err = uart_tx(uart_dev, buf, 1, 1000 * USEC_PER_MSEC);
zassert_true(err >= 0, zassert_true(err >= 0,
"Unexpected err:%d", err); "Unexpected err:%d", err);
} else { } else {

View file

@ -83,11 +83,11 @@ static bool async_verify(const struct device *dev, bool active)
zassert_equal(err, 0, "Unexpected err: %d", err); zassert_equal(err, 0, "Unexpected err: %d", err);
if (HAS_RX) { if (HAS_RX) {
err = uart_rx_enable(dev, rxbuf, sizeof(rxbuf), 1); err = uart_rx_enable(dev, rxbuf, sizeof(rxbuf), 1 * USEC_PER_MSEC);
zassert_equal(err, 0, "Unexpected err: %d", err); zassert_equal(err, 0, "Unexpected err: %d", err);
} }
err = uart_tx(dev, txbuf, sizeof(txbuf), 10); err = uart_tx(dev, txbuf, sizeof(txbuf), 10 * USEC_PER_MSEC);
zassert_equal(err, 0, "Unexpected err: %d", err); zassert_equal(err, 0, "Unexpected err: %d", err);
k_busy_wait(10000); k_busy_wait(10000);