2016-05-18 16:49:04 +02:00
|
|
|
/*
|
2018-12-14 16:13:14 +01:00
|
|
|
* Copyright (c) 2016-2019 Nordic Semiconductor ASA
|
2016-05-18 16:49:04 +02:00
|
|
|
*
|
2017-01-19 02:01:01 +01:00
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
2016-05-18 16:49:04 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Driver for Nordic Semiconductor nRF5X UART
|
|
|
|
*/
|
|
|
|
|
2023-02-21 13:06:33 +01:00
|
|
|
#include <zephyr/drivers/pinctrl.h>
|
2022-05-06 10:25:46 +02:00
|
|
|
#include <zephyr/drivers/uart.h>
|
|
|
|
#include <zephyr/pm/device.h>
|
2022-10-17 10:24:11 +02:00
|
|
|
#include <zephyr/irq.h>
|
2022-02-23 13:56:38 +01:00
|
|
|
#include <soc.h>
|
2018-05-21 09:09:19 +02:00
|
|
|
#include <hal/nrf_uart.h>
|
2021-09-21 15:07:04 +02:00
|
|
|
|
2020-03-06 23:05:41 +01:00
|
|
|
/*
|
|
|
|
* Extract information from devicetree.
|
|
|
|
*
|
|
|
|
* This driver only supports one instance of this IP block, so the
|
|
|
|
* instance number is always 0.
|
|
|
|
*/
|
|
|
|
#define DT_DRV_COMPAT nordic_nrf_uart
|
|
|
|
|
|
|
|
#define PROP(prop) DT_INST_PROP(0, prop)
|
|
|
|
#define HAS_PROP(prop) DT_INST_NODE_HAS_PROP(0, prop)
|
|
|
|
|
|
|
|
#define BAUDRATE PROP(current_speed)
|
|
|
|
|
2022-03-17 09:18:06 +01:00
|
|
|
#define DISABLE_RX PROP(disable_rx)
|
|
|
|
#define HW_FLOW_CONTROL_AVAILABLE PROP(hw_flow_control)
|
2020-02-19 11:46:05 +01:00
|
|
|
|
2020-03-06 23:05:41 +01:00
|
|
|
#define IRQN DT_INST_IRQN(0)
|
|
|
|
#define IRQ_PRIO DT_INST_IRQ(0, priority)
|
2016-05-18 16:49:04 +02:00
|
|
|
|
2020-03-06 23:05:41 +01:00
|
|
|
static NRF_UART_Type *const uart0_addr = (NRF_UART_Type *)DT_INST_REG_ADDR(0);
|
2018-07-11 14:11:18 +02:00
|
|
|
|
2021-09-21 15:07:04 +02:00
|
|
|
struct uart_nrfx_config {
|
|
|
|
const struct pinctrl_dev_config *pcfg;
|
|
|
|
};
|
|
|
|
|
2018-11-20 12:24:42 +01:00
|
|
|
/* Device data structure */
|
|
|
|
struct uart_nrfx_data {
|
|
|
|
struct uart_config uart_config;
|
|
|
|
};
|
|
|
|
|
2019-03-22 21:26:00 +01:00
|
|
|
#ifdef CONFIG_UART_0_ASYNC
|
2018-12-14 16:13:14 +01:00
|
|
|
static struct {
|
|
|
|
uart_callback_t callback;
|
|
|
|
void *user_data;
|
|
|
|
|
2020-05-27 18:26:57 +02:00
|
|
|
uint8_t *rx_buffer;
|
|
|
|
uint8_t *rx_secondary_buffer;
|
2018-12-14 16:13:14 +01:00
|
|
|
size_t rx_buffer_length;
|
|
|
|
size_t rx_secondary_buffer_length;
|
|
|
|
volatile size_t rx_counter;
|
|
|
|
volatile size_t rx_offset;
|
2020-05-27 18:26:57 +02:00
|
|
|
int32_t rx_timeout;
|
2020-10-20 13:58:21 +02:00
|
|
|
struct k_timer rx_timeout_timer;
|
2018-12-14 16:13:14 +01:00
|
|
|
bool rx_enabled;
|
|
|
|
|
|
|
|
bool tx_abort;
|
2020-05-27 18:26:57 +02:00
|
|
|
const uint8_t *volatile tx_buffer;
|
2021-10-26 20:07:17 +02:00
|
|
|
/* note: this is aliased with atomic_t in uart_nrfx_poll_out() */
|
|
|
|
unsigned long tx_buffer_length;
|
2018-12-14 16:13:14 +01:00
|
|
|
volatile size_t tx_counter;
|
2020-06-05 15:25:14 +02:00
|
|
|
#if HW_FLOW_CONTROL_AVAILABLE
|
2020-06-11 10:06:53 +02:00
|
|
|
int32_t tx_timeout;
|
2020-10-20 13:58:21 +02:00
|
|
|
struct k_timer tx_timeout_timer;
|
2018-12-14 16:13:14 +01:00
|
|
|
#endif
|
|
|
|
} uart0_cb;
|
2019-03-22 21:26:00 +01:00
|
|
|
#endif /* CONFIG_UART_0_ASYNC */
|
2018-12-14 16:13:14 +01:00
|
|
|
|
2018-07-06 21:59:42 +02:00
|
|
|
#ifdef CONFIG_UART_0_INTERRUPT_DRIVEN
|
2018-06-28 22:16:39 +02:00
|
|
|
|
2018-07-16 20:12:26 +02:00
|
|
|
static uart_irq_callback_user_data_t irq_callback; /**< Callback function pointer */
|
|
|
|
static void *irq_cb_data; /**< Callback function arg */
|
2018-06-28 22:16:39 +02:00
|
|
|
|
|
|
|
/* Variable used to override the state of the TXDRDY event in the initial state
|
|
|
|
* of the driver. This event is not set by the hardware until a first byte is
|
|
|
|
* sent, and we want to use it as an indication if the transmitter is ready
|
|
|
|
* to accept a new byte.
|
|
|
|
*/
|
2020-05-27 18:26:57 +02:00
|
|
|
static volatile uint8_t uart_sw_event_txdrdy;
|
2020-06-29 09:55:55 +02:00
|
|
|
static volatile bool disable_tx_irq;
|
2018-06-28 22:16:39 +02:00
|
|
|
|
2018-07-06 21:59:42 +02:00
|
|
|
#endif /* CONFIG_UART_0_INTERRUPT_DRIVEN */
|
2016-05-18 16:49:04 +02:00
|
|
|
|
2018-06-28 22:16:39 +02:00
|
|
|
static bool event_txdrdy_check(void)
|
|
|
|
{
|
2018-07-11 14:11:18 +02:00
|
|
|
return (nrf_uart_event_check(uart0_addr, NRF_UART_EVENT_TXDRDY)
|
2018-07-06 21:59:42 +02:00
|
|
|
#ifdef CONFIG_UART_0_INTERRUPT_DRIVEN
|
2018-06-28 22:16:39 +02:00
|
|
|
|| uart_sw_event_txdrdy
|
|
|
|
#endif
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void event_txdrdy_clear(void)
|
|
|
|
{
|
2018-07-11 14:11:18 +02:00
|
|
|
nrf_uart_event_clear(uart0_addr, NRF_UART_EVENT_TXDRDY);
|
2018-07-06 21:59:42 +02:00
|
|
|
#ifdef CONFIG_UART_0_INTERRUPT_DRIVEN
|
2018-11-29 20:12:22 +01:00
|
|
|
uart_sw_event_txdrdy = 0U;
|
2018-06-28 22:16:39 +02:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-05-18 16:49:04 +02:00
|
|
|
/**
|
|
|
|
* @brief Set the baud rate
|
|
|
|
*
|
|
|
|
* This routine set the given baud rate for the UART.
|
|
|
|
*
|
|
|
|
* @param dev UART device struct
|
|
|
|
* @param baudrate Baud rate
|
|
|
|
*
|
2022-01-07 01:43:40 +01:00
|
|
|
* @retval 0 on success.
|
|
|
|
* @retval -EINVAL for invalid baudrate.
|
2016-05-18 16:49:04 +02:00
|
|
|
*/
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static int baudrate_set(const struct device *dev, uint32_t baudrate)
|
2016-05-18 16:49:04 +02:00
|
|
|
{
|
2018-05-21 09:09:19 +02:00
|
|
|
nrf_uart_baudrate_t nrf_baudrate; /* calculated baudrate divisor */
|
2016-10-25 14:06:55 +02:00
|
|
|
|
|
|
|
switch (baudrate) {
|
2017-07-19 16:43:11 +02:00
|
|
|
case 300:
|
2018-05-21 09:09:19 +02:00
|
|
|
/* value not supported by Nordic HAL */
|
|
|
|
nrf_baudrate = 0x00014000;
|
2017-07-19 16:43:11 +02:00
|
|
|
break;
|
|
|
|
case 600:
|
2018-05-21 09:09:19 +02:00
|
|
|
/* value not supported by Nordic HAL */
|
|
|
|
nrf_baudrate = 0x00027000;
|
2017-07-19 16:43:11 +02:00
|
|
|
break;
|
2016-10-25 14:06:55 +02:00
|
|
|
case 1200:
|
2018-05-21 09:09:19 +02:00
|
|
|
nrf_baudrate = NRF_UART_BAUDRATE_1200;
|
2016-10-25 14:06:55 +02:00
|
|
|
break;
|
|
|
|
case 2400:
|
2018-05-21 09:09:19 +02:00
|
|
|
nrf_baudrate = NRF_UART_BAUDRATE_2400;
|
2016-10-25 14:06:55 +02:00
|
|
|
break;
|
|
|
|
case 4800:
|
2018-05-21 09:09:19 +02:00
|
|
|
nrf_baudrate = NRF_UART_BAUDRATE_4800;
|
2016-10-25 14:06:55 +02:00
|
|
|
break;
|
|
|
|
case 9600:
|
2018-05-21 09:09:19 +02:00
|
|
|
nrf_baudrate = NRF_UART_BAUDRATE_9600;
|
2016-10-25 14:06:55 +02:00
|
|
|
break;
|
|
|
|
case 14400:
|
2018-05-21 09:09:19 +02:00
|
|
|
nrf_baudrate = NRF_UART_BAUDRATE_14400;
|
2016-10-25 14:06:55 +02:00
|
|
|
break;
|
|
|
|
case 19200:
|
2018-05-21 09:09:19 +02:00
|
|
|
nrf_baudrate = NRF_UART_BAUDRATE_19200;
|
2016-10-25 14:06:55 +02:00
|
|
|
break;
|
|
|
|
case 28800:
|
2018-05-21 09:09:19 +02:00
|
|
|
nrf_baudrate = NRF_UART_BAUDRATE_28800;
|
2016-10-25 14:06:55 +02:00
|
|
|
break;
|
2022-03-31 14:57:23 +02:00
|
|
|
#if defined(UART_BAUDRATE_BAUDRATE_Baud31250)
|
2018-07-09 08:34:39 +02:00
|
|
|
case 31250:
|
|
|
|
nrf_baudrate = NRF_UART_BAUDRATE_31250;
|
|
|
|
break;
|
2022-03-31 14:57:23 +02:00
|
|
|
#endif
|
2016-10-25 14:06:55 +02:00
|
|
|
case 38400:
|
2018-05-21 09:09:19 +02:00
|
|
|
nrf_baudrate = NRF_UART_BAUDRATE_38400;
|
2016-10-25 14:06:55 +02:00
|
|
|
break;
|
2022-03-31 14:57:23 +02:00
|
|
|
#if defined(UART_BAUDRATE_BAUDRATE_Baud56000)
|
2018-07-09 08:34:39 +02:00
|
|
|
case 56000:
|
|
|
|
nrf_baudrate = NRF_UART_BAUDRATE_56000;
|
|
|
|
break;
|
2022-03-31 14:57:23 +02:00
|
|
|
#endif
|
2016-10-25 14:06:55 +02:00
|
|
|
case 57600:
|
2018-05-21 09:09:19 +02:00
|
|
|
nrf_baudrate = NRF_UART_BAUDRATE_57600;
|
2016-10-25 14:06:55 +02:00
|
|
|
break;
|
|
|
|
case 76800:
|
2018-05-21 09:09:19 +02:00
|
|
|
nrf_baudrate = NRF_UART_BAUDRATE_76800;
|
2016-10-25 14:06:55 +02:00
|
|
|
break;
|
|
|
|
case 115200:
|
2018-05-21 09:09:19 +02:00
|
|
|
nrf_baudrate = NRF_UART_BAUDRATE_115200;
|
2016-10-25 14:06:55 +02:00
|
|
|
break;
|
|
|
|
case 230400:
|
2018-05-21 09:09:19 +02:00
|
|
|
nrf_baudrate = NRF_UART_BAUDRATE_230400;
|
2016-10-25 14:06:55 +02:00
|
|
|
break;
|
|
|
|
case 250000:
|
2018-05-21 09:09:19 +02:00
|
|
|
nrf_baudrate = NRF_UART_BAUDRATE_250000;
|
2016-10-25 14:06:55 +02:00
|
|
|
break;
|
|
|
|
case 460800:
|
2018-05-21 09:09:19 +02:00
|
|
|
nrf_baudrate = NRF_UART_BAUDRATE_460800;
|
2016-10-25 14:06:55 +02:00
|
|
|
break;
|
|
|
|
case 921600:
|
2018-05-21 09:09:19 +02:00
|
|
|
nrf_baudrate = NRF_UART_BAUDRATE_921600;
|
2016-10-25 14:06:55 +02:00
|
|
|
break;
|
|
|
|
case 1000000:
|
2018-05-21 09:09:19 +02:00
|
|
|
nrf_baudrate = NRF_UART_BAUDRATE_1000000;
|
2016-10-25 14:06:55 +02:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2016-05-18 16:49:04 +02:00
|
|
|
|
2018-07-11 14:11:18 +02:00
|
|
|
nrf_uart_baudrate_set(uart0_addr, nrf_baudrate);
|
2016-05-18 16:49:04 +02:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Poll the device for input.
|
|
|
|
*
|
|
|
|
* @param dev UART device struct
|
|
|
|
* @param c Pointer to character
|
|
|
|
*
|
|
|
|
* @return 0 if a character arrived, -1 if the input buffer if empty.
|
|
|
|
*/
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uart_nrfx_poll_in(const struct device *dev, unsigned char *c)
|
2016-05-18 16:49:04 +02:00
|
|
|
{
|
2018-07-11 14:11:18 +02:00
|
|
|
if (!nrf_uart_event_check(uart0_addr, NRF_UART_EVENT_RXDRDY)) {
|
2016-05-18 16:49:04 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Clear the interrupt */
|
2018-07-11 14:11:18 +02:00
|
|
|
nrf_uart_event_clear(uart0_addr, NRF_UART_EVENT_RXDRDY);
|
2016-05-18 16:49:04 +02:00
|
|
|
|
|
|
|
/* got a character */
|
2018-07-11 14:11:18 +02:00
|
|
|
*c = nrf_uart_rxd_get(uart0_addr);
|
2016-05-18 16:49:04 +02:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-01-29 12:34:55 +01:00
|
|
|
#ifdef CONFIG_UART_0_ASYNC
|
isr: Normalize usage of device instance through ISR
The goal of this patch is to replace the 'void *' parameter by 'struct
device *' if they use such variable or just 'const void *' on all
relevant ISRs
This will avoid not-so-nice const qualifier tweaks when device instances
will be constant.
Note that only the ISR passed to IRQ_CONNECT are of interest here.
In order to do so, the script fix_isr.py below is necessary:
from pathlib import Path
import subprocess
import pickle
import mmap
import sys
import re
import os
cocci_template = """
@r_fix_isr_0
@
type ret_type;
identifier P;
identifier D;
@@
-ret_type <!fn!>(void *P)
+ret_type <!fn!>(const struct device *P)
{
...
(
const struct device *D = (const struct device *)P;
|
const struct device *D = P;
)
...
}
@r_fix_isr_1
@
type ret_type;
identifier P;
identifier D;
@@
-ret_type <!fn!>(void *P)
+ret_type <!fn!>(const struct device *P)
{
...
const struct device *D;
...
(
D = (const struct device *)P;
|
D = P;
)
...
}
@r_fix_isr_2
@
type ret_type;
identifier A;
@@
-ret_type <!fn!>(void *A)
+ret_type <!fn!>(const void *A)
{
...
}
@r_fix_isr_3
@
const struct device *D;
@@
-<!fn!>((void *)D);
+<!fn!>(D);
@r_fix_isr_4
@
type ret_type;
identifier D;
identifier P;
@@
-ret_type <!fn!>(const struct device *P)
+ret_type <!fn!>(const struct device *D)
{
...
(
-const struct device *D = (const struct device *)P;
|
-const struct device *D = P;
)
...
}
@r_fix_isr_5
@
type ret_type;
identifier D;
identifier P;
@@
-ret_type <!fn!>(const struct device *P)
+ret_type <!fn!>(const struct device *D)
{
...
-const struct device *D;
...
(
-D = (const struct device *)P;
|
-D = P;
)
...
}
"""
def find_isr(fn):
db = []
data = None
start = 0
try:
with open(fn, 'r+') as f:
data = str(mmap.mmap(f.fileno(), 0).read())
except Exception as e:
return db
while True:
isr = ""
irq = data.find('IRQ_CONNECT', start)
while irq > -1:
p = 1
arg = 1
p_o = data.find('(', irq)
if p_o < 0:
irq = -1
break;
pos = p_o + 1
while p > 0:
if data[pos] == ')':
p -= 1
elif data[pos] == '(':
p += 1
elif data[pos] == ',' and p == 1:
arg += 1
if arg == 3:
isr += data[pos]
pos += 1
isr = isr.strip(',\\n\\t ')
if isr not in db and len(isr) > 0:
db.append(isr)
start = pos
break
if irq < 0:
break
return db
def patch_isr(fn, isr_list):
if len(isr_list) <= 0:
return
for isr in isr_list:
tmplt = cocci_template.replace('<!fn!>', isr)
with open('/tmp/isr_fix.cocci', 'w') as f:
f.write(tmplt)
cmd = ['spatch', '--sp-file', '/tmp/isr_fix.cocci', '--in-place', fn]
subprocess.run(cmd)
def process_files(path):
if path.is_file() and path.suffix in ['.h', '.c']:
p = str(path.parent) + '/' + path.name
isr_list = find_isr(p)
patch_isr(p, isr_list)
elif path.is_dir():
for p in path.iterdir():
process_files(p)
if len(sys.argv) < 2:
print("You need to provide a dir/file path")
sys.exit(1)
process_files(Path(sys.argv[1]))
And is run: ./fix_isr.py <zephyr root directory>
Finally, some files needed manual fixes such.
Fixes #27399
Signed-off-by: Tomasz Bursztyka <tomasz.bursztyka@linux.intel.com>
2020-06-17 14:58:56 +02:00
|
|
|
static void uart_nrfx_isr(const struct device *dev);
|
2020-01-29 12:34:55 +01:00
|
|
|
#endif
|
|
|
|
|
2016-05-18 16:49:04 +02:00
|
|
|
/**
|
|
|
|
* @brief Output a character in polled mode.
|
|
|
|
*
|
|
|
|
* @param dev UART device struct
|
|
|
|
* @param c Character to send
|
|
|
|
*/
|
2020-04-30 20:33:38 +02:00
|
|
|
static void uart_nrfx_poll_out(const struct device *dev, unsigned char c)
|
2016-05-18 16:49:04 +02:00
|
|
|
{
|
2020-01-29 12:34:55 +01:00
|
|
|
atomic_t *lock;
|
|
|
|
#ifdef CONFIG_UART_0_ASYNC
|
|
|
|
while (uart0_cb.tx_buffer) {
|
|
|
|
/* If there is ongoing asynchronous transmission, and we are in
|
|
|
|
* ISR, then call uart interrupt routine, otherwise
|
|
|
|
* busy wait until transmission is finished.
|
|
|
|
*/
|
|
|
|
if (k_is_in_isr()) {
|
isr: Normalize usage of device instance through ISR
The goal of this patch is to replace the 'void *' parameter by 'struct
device *' if they use such variable or just 'const void *' on all
relevant ISRs
This will avoid not-so-nice const qualifier tweaks when device instances
will be constant.
Note that only the ISR passed to IRQ_CONNECT are of interest here.
In order to do so, the script fix_isr.py below is necessary:
from pathlib import Path
import subprocess
import pickle
import mmap
import sys
import re
import os
cocci_template = """
@r_fix_isr_0
@
type ret_type;
identifier P;
identifier D;
@@
-ret_type <!fn!>(void *P)
+ret_type <!fn!>(const struct device *P)
{
...
(
const struct device *D = (const struct device *)P;
|
const struct device *D = P;
)
...
}
@r_fix_isr_1
@
type ret_type;
identifier P;
identifier D;
@@
-ret_type <!fn!>(void *P)
+ret_type <!fn!>(const struct device *P)
{
...
const struct device *D;
...
(
D = (const struct device *)P;
|
D = P;
)
...
}
@r_fix_isr_2
@
type ret_type;
identifier A;
@@
-ret_type <!fn!>(void *A)
+ret_type <!fn!>(const void *A)
{
...
}
@r_fix_isr_3
@
const struct device *D;
@@
-<!fn!>((void *)D);
+<!fn!>(D);
@r_fix_isr_4
@
type ret_type;
identifier D;
identifier P;
@@
-ret_type <!fn!>(const struct device *P)
+ret_type <!fn!>(const struct device *D)
{
...
(
-const struct device *D = (const struct device *)P;
|
-const struct device *D = P;
)
...
}
@r_fix_isr_5
@
type ret_type;
identifier D;
identifier P;
@@
-ret_type <!fn!>(const struct device *P)
+ret_type <!fn!>(const struct device *D)
{
...
-const struct device *D;
...
(
-D = (const struct device *)P;
|
-D = P;
)
...
}
"""
def find_isr(fn):
db = []
data = None
start = 0
try:
with open(fn, 'r+') as f:
data = str(mmap.mmap(f.fileno(), 0).read())
except Exception as e:
return db
while True:
isr = ""
irq = data.find('IRQ_CONNECT', start)
while irq > -1:
p = 1
arg = 1
p_o = data.find('(', irq)
if p_o < 0:
irq = -1
break;
pos = p_o + 1
while p > 0:
if data[pos] == ')':
p -= 1
elif data[pos] == '(':
p += 1
elif data[pos] == ',' and p == 1:
arg += 1
if arg == 3:
isr += data[pos]
pos += 1
isr = isr.strip(',\\n\\t ')
if isr not in db and len(isr) > 0:
db.append(isr)
start = pos
break
if irq < 0:
break
return db
def patch_isr(fn, isr_list):
if len(isr_list) <= 0:
return
for isr in isr_list:
tmplt = cocci_template.replace('<!fn!>', isr)
with open('/tmp/isr_fix.cocci', 'w') as f:
f.write(tmplt)
cmd = ['spatch', '--sp-file', '/tmp/isr_fix.cocci', '--in-place', fn]
subprocess.run(cmd)
def process_files(path):
if path.is_file() and path.suffix in ['.h', '.c']:
p = str(path.parent) + '/' + path.name
isr_list = find_isr(p)
patch_isr(p, isr_list)
elif path.is_dir():
for p in path.iterdir():
process_files(p)
if len(sys.argv) < 2:
print("You need to provide a dir/file path")
sys.exit(1)
process_files(Path(sys.argv[1]))
And is run: ./fix_isr.py <zephyr root directory>
Finally, some files needed manual fixes such.
Fixes #27399
Signed-off-by: Tomasz Bursztyka <tomasz.bursztyka@linux.intel.com>
2020-06-17 14:58:56 +02:00
|
|
|
uart_nrfx_isr(dev);
|
2020-01-29 12:34:55 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
/* Use tx_buffer_length as lock, this way uart_nrfx_tx will
|
|
|
|
* return -EBUSY during poll_out.
|
2018-05-09 15:41:42 +02:00
|
|
|
*/
|
2020-01-29 12:34:55 +01:00
|
|
|
lock = &uart0_cb.tx_buffer_length;
|
|
|
|
#else
|
|
|
|
static atomic_val_t poll_out_lock;
|
|
|
|
|
|
|
|
lock = &poll_out_lock;
|
|
|
|
#endif
|
2018-05-09 15:41:42 +02:00
|
|
|
|
2020-01-29 12:34:55 +01:00
|
|
|
if (!k_is_in_isr()) {
|
2020-05-27 18:26:57 +02:00
|
|
|
uint8_t safety_cnt = 100;
|
2020-01-29 12:34:55 +01:00
|
|
|
|
|
|
|
while (atomic_cas((atomic_t *) lock,
|
|
|
|
(atomic_val_t) 0,
|
|
|
|
(atomic_val_t) 1) == false) {
|
2021-04-15 12:33:40 +02:00
|
|
|
if (IS_ENABLED(CONFIG_MULTITHREADING)) {
|
|
|
|
/* k_sleep allows other threads to execute and finish
|
|
|
|
* their transactions.
|
|
|
|
*/
|
|
|
|
k_msleep(1);
|
|
|
|
} else {
|
|
|
|
k_busy_wait(1000);
|
|
|
|
}
|
2020-01-29 12:34:55 +01:00
|
|
|
if (--safety_cnt == 0) {
|
2020-05-25 14:26:40 +02:00
|
|
|
break;
|
2020-01-29 12:34:55 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
*lock = 1;
|
|
|
|
}
|
2018-07-25 15:42:29 +02:00
|
|
|
/* Reset the transmitter ready state. */
|
2018-06-28 22:16:39 +02:00
|
|
|
event_txdrdy_clear();
|
2018-05-09 15:41:42 +02:00
|
|
|
|
2018-07-25 15:42:29 +02:00
|
|
|
/* Activate the transmitter. */
|
|
|
|
nrf_uart_task_trigger(uart0_addr, NRF_UART_TASK_STARTTX);
|
|
|
|
|
|
|
|
/* Send the provided character. */
|
2020-05-27 18:26:57 +02:00
|
|
|
nrf_uart_txd_set(uart0_addr, (uint8_t)c);
|
2016-05-18 16:49:04 +02:00
|
|
|
|
2018-07-25 15:42:29 +02:00
|
|
|
/* Wait until the transmitter is ready, i.e. the character is sent. */
|
2023-09-14 11:00:22 +02:00
|
|
|
bool res;
|
2020-01-29 12:34:55 +01:00
|
|
|
|
|
|
|
NRFX_WAIT_FOR(event_txdrdy_check(), 1000, 1, res);
|
2016-05-18 16:49:04 +02:00
|
|
|
|
2020-01-29 12:34:55 +01:00
|
|
|
/* Deactivate the transmitter so that it does not needlessly
|
|
|
|
* consume power.
|
2018-07-25 15:42:29 +02:00
|
|
|
*/
|
|
|
|
nrf_uart_task_trigger(uart0_addr, NRF_UART_TASK_STOPTX);
|
2020-01-29 12:34:55 +01:00
|
|
|
|
|
|
|
/* Release the lock. */
|
|
|
|
*lock = 0;
|
2016-05-18 16:49:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Console I/O function */
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uart_nrfx_err_check(const struct device *dev)
|
2016-05-18 16:49:04 +02:00
|
|
|
{
|
2019-11-22 11:13:57 +01:00
|
|
|
/* register bitfields maps to the defines in uart.h */
|
|
|
|
return nrf_uart_errorsrc_get_and_clear(uart0_addr);
|
2016-05-18 16:49:04 +02:00
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uart_nrfx_configure(const struct device *dev,
|
2018-11-20 12:24:42 +01:00
|
|
|
const struct uart_config *cfg)
|
|
|
|
{
|
2022-01-19 16:16:23 +01:00
|
|
|
struct uart_nrfx_data *data = dev->data;
|
2019-11-07 22:07:47 +01:00
|
|
|
nrf_uart_config_t uart_cfg;
|
2018-11-20 12:24:42 +01:00
|
|
|
|
2019-11-07 22:07:47 +01:00
|
|
|
#if defined(UART_CONFIG_STOP_Msk)
|
2019-09-09 15:33:18 +02:00
|
|
|
switch (cfg->stop_bits) {
|
|
|
|
case UART_CFG_STOP_BITS_1:
|
2019-11-07 22:07:47 +01:00
|
|
|
uart_cfg.stop = NRF_UART_STOP_ONE;
|
2019-09-09 15:33:18 +02:00
|
|
|
break;
|
|
|
|
case UART_CFG_STOP_BITS_2:
|
2019-11-07 22:07:47 +01:00
|
|
|
uart_cfg.stop = NRF_UART_STOP_TWO;
|
2019-09-09 15:33:18 +02:00
|
|
|
break;
|
|
|
|
default:
|
2018-11-20 12:24:42 +01:00
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
2019-11-07 22:07:47 +01:00
|
|
|
#else
|
|
|
|
if (cfg->stop_bits != UART_CFG_STOP_BITS_1) {
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
#endif
|
2018-11-20 12:24:42 +01:00
|
|
|
|
|
|
|
if (cfg->data_bits != UART_CFG_DATA_BITS_8) {
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (cfg->flow_ctrl) {
|
|
|
|
case UART_CFG_FLOW_CTRL_NONE:
|
2019-11-07 22:07:47 +01:00
|
|
|
uart_cfg.hwfc = NRF_UART_HWFC_DISABLED;
|
2018-11-20 12:24:42 +01:00
|
|
|
break;
|
|
|
|
case UART_CFG_FLOW_CTRL_RTS_CTS:
|
2020-06-05 15:25:14 +02:00
|
|
|
if (HW_FLOW_CONTROL_AVAILABLE) {
|
2019-11-07 22:07:47 +01:00
|
|
|
uart_cfg.hwfc = NRF_UART_HWFC_ENABLED;
|
2018-11-20 12:24:42 +01:00
|
|
|
} else {
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
2019-11-07 22:07:47 +01:00
|
|
|
#if defined(UART_CONFIG_PARITYTYPE_Msk)
|
|
|
|
uart_cfg.paritytype = NRF_UART_PARITYTYPE_EVEN;
|
|
|
|
#endif
|
2018-11-20 12:24:42 +01:00
|
|
|
switch (cfg->parity) {
|
|
|
|
case UART_CFG_PARITY_NONE:
|
2019-11-07 22:07:47 +01:00
|
|
|
uart_cfg.parity = NRF_UART_PARITY_EXCLUDED;
|
2018-11-20 12:24:42 +01:00
|
|
|
break;
|
|
|
|
case UART_CFG_PARITY_EVEN:
|
2019-11-07 22:07:47 +01:00
|
|
|
uart_cfg.parity = NRF_UART_PARITY_INCLUDED;
|
|
|
|
break;
|
|
|
|
#if defined(UART_CONFIG_PARITYTYPE_Msk)
|
|
|
|
case UART_CFG_PARITY_ODD:
|
|
|
|
uart_cfg.parity = NRF_UART_PARITY_INCLUDED;
|
|
|
|
uart_cfg.paritytype = NRF_UART_PARITYTYPE_ODD;
|
2018-11-20 12:24:42 +01:00
|
|
|
break;
|
2019-11-07 22:07:47 +01:00
|
|
|
#endif
|
2018-11-20 12:24:42 +01:00
|
|
|
default:
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (baudrate_set(dev, cfg->baudrate) != 0) {
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
2019-11-07 22:07:47 +01:00
|
|
|
nrf_uart_configure(uart0_addr, &uart_cfg);
|
2018-11-20 12:24:42 +01:00
|
|
|
|
2022-01-19 16:16:23 +01:00
|
|
|
data->uart_config = *cfg;
|
2018-11-20 12:24:42 +01:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-05-26 21:33:37 +02:00
|
|
|
#ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uart_nrfx_config_get(const struct device *dev,
|
|
|
|
struct uart_config *cfg)
|
2018-11-20 12:24:42 +01:00
|
|
|
{
|
2022-01-19 16:16:23 +01:00
|
|
|
struct uart_nrfx_data *data = dev->data;
|
|
|
|
|
|
|
|
*cfg = data->uart_config;
|
2018-11-20 12:24:42 +01:00
|
|
|
return 0;
|
|
|
|
}
|
2021-05-26 21:33:37 +02:00
|
|
|
#endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */
|
2018-12-14 16:13:14 +01:00
|
|
|
|
2019-03-22 21:26:00 +01:00
|
|
|
#ifdef CONFIG_UART_0_ASYNC
|
2018-12-14 16:13:14 +01:00
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static void user_callback(const struct device *dev, struct uart_event *event)
|
2018-12-14 16:13:14 +01:00
|
|
|
{
|
|
|
|
if (uart0_cb.callback) {
|
2020-06-24 14:28:05 +02:00
|
|
|
uart0_cb.callback(dev, event, uart0_cb.user_data);
|
2018-12-14 16:13:14 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uart_nrfx_callback_set(const struct device *dev,
|
|
|
|
uart_callback_t callback,
|
2018-12-14 16:13:14 +01:00
|
|
|
void *user_data)
|
|
|
|
{
|
|
|
|
uart0_cb.callback = callback;
|
|
|
|
uart0_cb.user_data = user_data;
|
|
|
|
|
2023-07-24 15:55:19 +02:00
|
|
|
#if defined(CONFIG_UART_EXCLUSIVE_API_CALLBACKS) && defined(CONFIG_UART_0_INTERRUPT_DRIVEN)
|
2022-08-18 20:19:46 +02:00
|
|
|
irq_callback = NULL;
|
|
|
|
irq_cb_data = NULL;
|
|
|
|
#endif
|
|
|
|
|
2018-12-14 16:13:14 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uart_nrfx_tx(const struct device *dev, const uint8_t *buf,
|
|
|
|
size_t len,
|
2020-05-27 18:26:57 +02:00
|
|
|
int32_t timeout)
|
2018-12-14 16:13:14 +01:00
|
|
|
{
|
2020-01-29 12:34:55 +01:00
|
|
|
if (atomic_cas((atomic_t *) &uart0_cb.tx_buffer_length,
|
|
|
|
(atomic_val_t) 0,
|
|
|
|
(atomic_val_t) len) == false) {
|
2018-12-14 16:13:14 +01:00
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
|
|
|
uart0_cb.tx_buffer = buf;
|
2020-06-05 15:25:14 +02:00
|
|
|
#if HW_FLOW_CONTROL_AVAILABLE
|
2018-12-14 16:13:14 +01:00
|
|
|
uart0_cb.tx_timeout = timeout;
|
|
|
|
#endif
|
|
|
|
nrf_uart_event_clear(uart0_addr, NRF_UART_EVENT_TXDRDY);
|
|
|
|
nrf_uart_task_trigger(uart0_addr, NRF_UART_TASK_STARTTX);
|
|
|
|
nrf_uart_int_enable(uart0_addr, NRF_UART_INT_MASK_TXDRDY);
|
|
|
|
|
2020-05-27 18:26:57 +02:00
|
|
|
uint8_t txd = uart0_cb.tx_buffer[uart0_cb.tx_counter];
|
2018-12-14 16:13:14 +01:00
|
|
|
|
|
|
|
nrf_uart_txd_set(uart0_addr, txd);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uart_nrfx_tx_abort(const struct device *dev)
|
2018-12-14 16:13:14 +01:00
|
|
|
{
|
|
|
|
if (uart0_cb.tx_buffer_length == 0) {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2020-06-05 15:25:14 +02:00
|
|
|
#if HW_FLOW_CONTROL_AVAILABLE
|
2021-10-01 15:47:40 +02:00
|
|
|
if (uart0_cb.tx_timeout != SYS_FOREVER_US) {
|
2020-10-20 13:58:21 +02:00
|
|
|
k_timer_stop(&uart0_cb.tx_timeout_timer);
|
2020-01-24 16:04:16 +01:00
|
|
|
}
|
2018-12-14 16:13:14 +01:00
|
|
|
#endif
|
|
|
|
nrf_uart_task_trigger(uart0_addr, NRF_UART_TASK_STOPTX);
|
|
|
|
|
|
|
|
struct uart_event evt = {
|
|
|
|
.type = UART_TX_ABORTED,
|
|
|
|
.data.tx.buf = uart0_cb.tx_buffer,
|
|
|
|
.data.tx.len = uart0_cb.tx_counter
|
|
|
|
};
|
|
|
|
|
|
|
|
uart0_cb.tx_buffer_length = 0;
|
|
|
|
uart0_cb.tx_counter = 0;
|
|
|
|
|
2020-06-24 14:28:05 +02:00
|
|
|
user_callback(dev, &evt);
|
2018-12-14 16:13:14 +01:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uart_nrfx_rx_enable(const struct device *dev, uint8_t *buf,
|
|
|
|
size_t len,
|
2020-05-27 18:26:57 +02:00
|
|
|
int32_t timeout)
|
2018-12-14 16:13:14 +01:00
|
|
|
{
|
2021-09-21 15:07:04 +02:00
|
|
|
if (DISABLE_RX) {
|
2020-02-19 11:46:05 +01:00
|
|
|
__ASSERT(false, "TX only UART instance");
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
2018-12-14 16:13:14 +01:00
|
|
|
if (uart0_cb.rx_buffer_length != 0) {
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
2020-02-19 11:46:05 +01:00
|
|
|
|
2018-12-14 16:13:14 +01:00
|
|
|
uart0_cb.rx_enabled = 1;
|
|
|
|
uart0_cb.rx_buffer = buf;
|
|
|
|
uart0_cb.rx_buffer_length = len;
|
|
|
|
uart0_cb.rx_counter = 0;
|
|
|
|
uart0_cb.rx_secondary_buffer_length = 0;
|
|
|
|
uart0_cb.rx_timeout = timeout;
|
|
|
|
|
|
|
|
nrf_uart_event_clear(uart0_addr, NRF_UART_EVENT_ERROR);
|
|
|
|
nrf_uart_event_clear(uart0_addr, NRF_UART_EVENT_RXDRDY);
|
|
|
|
nrf_uart_event_clear(uart0_addr, NRF_UART_EVENT_RXTO);
|
|
|
|
nrf_uart_task_trigger(uart0_addr, NRF_UART_TASK_STARTRX);
|
|
|
|
nrf_uart_int_enable(uart0_addr, NRF_UART_INT_MASK_RXDRDY |
|
|
|
|
NRF_UART_INT_MASK_ERROR |
|
|
|
|
NRF_UART_INT_MASK_RXTO);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uart_nrfx_rx_buf_rsp(const struct device *dev, uint8_t *buf,
|
|
|
|
size_t len)
|
2018-12-14 16:13:14 +01:00
|
|
|
{
|
2020-07-31 15:19:50 +02:00
|
|
|
int err;
|
2022-07-13 16:50:25 +02:00
|
|
|
unsigned int key = irq_lock();
|
2020-07-31 15:19:50 +02:00
|
|
|
|
2018-12-14 16:13:14 +01:00
|
|
|
if (!uart0_cb.rx_enabled) {
|
2020-07-31 15:19:50 +02:00
|
|
|
err = -EACCES;
|
|
|
|
} else if (uart0_cb.rx_secondary_buffer_length != 0) {
|
|
|
|
err = -EBUSY;
|
|
|
|
} else {
|
|
|
|
uart0_cb.rx_secondary_buffer = buf;
|
|
|
|
uart0_cb.rx_secondary_buffer_length = len;
|
|
|
|
err = 0;
|
2018-12-14 16:13:14 +01:00
|
|
|
}
|
|
|
|
|
2020-07-31 15:19:50 +02:00
|
|
|
irq_unlock(key);
|
|
|
|
|
|
|
|
return err;
|
2018-12-14 16:13:14 +01:00
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uart_nrfx_rx_disable(const struct device *dev)
|
2018-12-14 16:13:14 +01:00
|
|
|
{
|
|
|
|
if (uart0_cb.rx_buffer_length == 0) {
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
|
|
|
|
uart0_cb.rx_enabled = 0;
|
2021-10-01 15:47:40 +02:00
|
|
|
if (uart0_cb.rx_timeout != SYS_FOREVER_US) {
|
2020-10-20 13:58:21 +02:00
|
|
|
k_timer_stop(&uart0_cb.rx_timeout_timer);
|
2020-01-24 16:04:16 +01:00
|
|
|
}
|
2018-12-14 16:13:14 +01:00
|
|
|
nrf_uart_task_trigger(uart0_addr, NRF_UART_TASK_STOPRX);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static void rx_rdy_evt(const struct device *dev)
|
2018-12-14 16:13:14 +01:00
|
|
|
{
|
|
|
|
struct uart_event event;
|
|
|
|
size_t rx_cnt = uart0_cb.rx_counter;
|
|
|
|
|
|
|
|
event.type = UART_RX_RDY;
|
|
|
|
event.data.rx.buf = uart0_cb.rx_buffer;
|
|
|
|
event.data.rx.len = rx_cnt - uart0_cb.rx_offset;
|
|
|
|
event.data.rx.offset = uart0_cb.rx_offset;
|
|
|
|
|
|
|
|
uart0_cb.rx_offset = rx_cnt;
|
|
|
|
|
2020-06-24 14:28:05 +02:00
|
|
|
user_callback(dev, &event);
|
2018-12-14 16:13:14 +01:00
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static void buf_released_evt(const struct device *dev)
|
2018-12-14 16:13:14 +01:00
|
|
|
{
|
|
|
|
struct uart_event event = {
|
|
|
|
.type = UART_RX_BUF_RELEASED,
|
|
|
|
.data.rx_buf.buf = uart0_cb.rx_buffer
|
|
|
|
};
|
2020-06-24 14:28:05 +02:00
|
|
|
user_callback(dev, &event);
|
2018-12-14 16:13:14 +01:00
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static void rx_disabled_evt(const struct device *dev)
|
2018-12-14 16:13:14 +01:00
|
|
|
{
|
|
|
|
struct uart_event event = {
|
|
|
|
.type = UART_RX_DISABLED
|
|
|
|
};
|
2020-06-24 14:28:05 +02:00
|
|
|
user_callback(dev, &event);
|
2018-12-14 16:13:14 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void rx_reset_state(void)
|
|
|
|
{
|
|
|
|
nrf_uart_int_disable(uart0_addr,
|
|
|
|
NRF_UART_INT_MASK_RXDRDY |
|
|
|
|
NRF_UART_INT_MASK_ERROR |
|
|
|
|
NRF_UART_INT_MASK_RXTO);
|
|
|
|
uart0_cb.rx_buffer_length = 0;
|
|
|
|
uart0_cb.rx_enabled = 0;
|
|
|
|
uart0_cb.rx_counter = 0;
|
|
|
|
uart0_cb.rx_offset = 0;
|
|
|
|
uart0_cb.rx_secondary_buffer_length = 0;
|
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static void rx_isr(const struct device *dev)
|
2018-12-14 16:13:14 +01:00
|
|
|
{
|
|
|
|
struct uart_event event;
|
|
|
|
|
|
|
|
nrf_uart_event_clear(uart0_addr, NRF_UART_EVENT_RXDRDY);
|
|
|
|
|
|
|
|
if (!uart0_cb.rx_buffer_length || !uart0_cb.rx_enabled) {
|
|
|
|
/* Byte received when receiving is disabled - data lost. */
|
|
|
|
nrf_uart_rxd_get(uart0_addr);
|
|
|
|
} else {
|
2023-06-05 15:32:00 +02:00
|
|
|
if (uart0_cb.rx_counter == 0 &&
|
|
|
|
uart0_cb.rx_secondary_buffer_length == 0) {
|
2018-12-14 16:13:14 +01:00
|
|
|
event.type = UART_RX_BUF_REQUEST;
|
2020-06-24 14:28:05 +02:00
|
|
|
user_callback(dev, &event);
|
2018-12-14 16:13:14 +01:00
|
|
|
}
|
|
|
|
uart0_cb.rx_buffer[uart0_cb.rx_counter] =
|
|
|
|
nrf_uart_rxd_get(uart0_addr);
|
|
|
|
uart0_cb.rx_counter++;
|
2020-05-01 11:58:15 +02:00
|
|
|
if (uart0_cb.rx_timeout == 0) {
|
2020-06-24 14:28:05 +02:00
|
|
|
rx_rdy_evt(dev);
|
2021-10-01 15:47:40 +02:00
|
|
|
} else if (uart0_cb.rx_timeout != SYS_FOREVER_US) {
|
2020-10-20 13:58:21 +02:00
|
|
|
k_timer_start(&uart0_cb.rx_timeout_timer,
|
2021-10-01 15:47:40 +02:00
|
|
|
K_USEC(uart0_cb.rx_timeout),
|
2020-10-20 13:58:21 +02:00
|
|
|
K_NO_WAIT);
|
2018-12-14 16:13:14 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (uart0_cb.rx_buffer_length == uart0_cb.rx_counter) {
|
2021-10-01 15:47:40 +02:00
|
|
|
if (uart0_cb.rx_timeout != SYS_FOREVER_US) {
|
2020-10-20 13:58:21 +02:00
|
|
|
k_timer_stop(&uart0_cb.rx_timeout_timer);
|
2020-01-24 16:04:16 +01:00
|
|
|
}
|
2020-06-24 14:28:05 +02:00
|
|
|
rx_rdy_evt(dev);
|
2018-12-14 16:13:14 +01:00
|
|
|
|
2022-07-13 16:50:25 +02:00
|
|
|
unsigned int key = irq_lock();
|
2020-07-31 15:19:50 +02:00
|
|
|
|
|
|
|
if (uart0_cb.rx_secondary_buffer_length == 0) {
|
|
|
|
uart0_cb.rx_enabled = 0;
|
|
|
|
}
|
|
|
|
irq_unlock(key);
|
|
|
|
|
2018-12-14 16:13:14 +01:00
|
|
|
if (uart0_cb.rx_secondary_buffer_length) {
|
2020-06-24 14:28:05 +02:00
|
|
|
buf_released_evt(dev);
|
2018-12-14 16:13:14 +01:00
|
|
|
/* Switch to secondary buffer. */
|
|
|
|
uart0_cb.rx_buffer_length =
|
|
|
|
uart0_cb.rx_secondary_buffer_length;
|
|
|
|
uart0_cb.rx_buffer = uart0_cb.rx_secondary_buffer;
|
|
|
|
uart0_cb.rx_secondary_buffer_length = 0;
|
|
|
|
uart0_cb.rx_counter = 0;
|
|
|
|
uart0_cb.rx_offset = 0;
|
|
|
|
|
|
|
|
event.type = UART_RX_BUF_REQUEST;
|
2020-06-24 14:28:05 +02:00
|
|
|
user_callback(dev, &event);
|
2018-12-14 16:13:14 +01:00
|
|
|
} else {
|
|
|
|
uart_nrfx_rx_disable(dev);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static void tx_isr(const struct device *dev)
|
2018-12-14 16:13:14 +01:00
|
|
|
{
|
|
|
|
uart0_cb.tx_counter++;
|
|
|
|
if (uart0_cb.tx_counter < uart0_cb.tx_buffer_length &&
|
|
|
|
!uart0_cb.tx_abort) {
|
2020-06-05 15:25:14 +02:00
|
|
|
#if HW_FLOW_CONTROL_AVAILABLE
|
2021-10-01 15:47:40 +02:00
|
|
|
if (uart0_cb.tx_timeout != SYS_FOREVER_US) {
|
2020-10-20 13:58:21 +02:00
|
|
|
k_timer_start(&uart0_cb.tx_timeout_timer,
|
2021-10-01 15:47:40 +02:00
|
|
|
K_USEC(uart0_cb.tx_timeout),
|
2020-10-20 13:58:21 +02:00
|
|
|
K_NO_WAIT);
|
2020-01-24 16:04:16 +01:00
|
|
|
}
|
2018-12-14 16:13:14 +01:00
|
|
|
#endif
|
|
|
|
nrf_uart_event_clear(uart0_addr, NRF_UART_EVENT_TXDRDY);
|
|
|
|
|
2020-05-27 18:26:57 +02:00
|
|
|
uint8_t txd = uart0_cb.tx_buffer[uart0_cb.tx_counter];
|
2018-12-14 16:13:14 +01:00
|
|
|
|
|
|
|
nrf_uart_txd_set(uart0_addr, txd);
|
|
|
|
} else {
|
2020-06-05 15:25:14 +02:00
|
|
|
#if HW_FLOW_CONTROL_AVAILABLE
|
2020-01-24 16:04:16 +01:00
|
|
|
|
2021-10-01 15:47:40 +02:00
|
|
|
if (uart0_cb.tx_timeout != SYS_FOREVER_US) {
|
2020-10-20 13:58:21 +02:00
|
|
|
k_timer_stop(&uart0_cb.tx_timeout_timer);
|
2020-01-24 16:04:16 +01:00
|
|
|
}
|
2018-12-14 16:13:14 +01:00
|
|
|
#endif
|
2020-01-29 12:34:55 +01:00
|
|
|
nrf_uart_task_trigger(uart0_addr, NRF_UART_TASK_STOPTX);
|
2018-12-14 16:13:14 +01:00
|
|
|
struct uart_event event = {
|
|
|
|
.type = UART_TX_DONE,
|
|
|
|
.data.tx.buf = uart0_cb.tx_buffer,
|
|
|
|
.data.tx.len = uart0_cb.tx_counter
|
|
|
|
};
|
2020-01-29 12:34:55 +01:00
|
|
|
nrf_uart_event_clear(uart0_addr, NRF_UART_EVENT_TXDRDY);
|
|
|
|
uart0_cb.tx_buffer_length = 0;
|
|
|
|
uart0_cb.tx_counter = 0;
|
|
|
|
uart0_cb.tx_buffer = NULL;
|
|
|
|
|
|
|
|
nrf_uart_int_disable(uart0_addr, NRF_UART_INT_MASK_TXDRDY);
|
2020-06-24 14:28:05 +02:00
|
|
|
user_callback(dev, &event);
|
2018-12-14 16:13:14 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#define UART_ERROR_FROM_MASK(mask) \
|
|
|
|
(mask & NRF_UART_ERROR_OVERRUN_MASK ? UART_ERROR_OVERRUN \
|
|
|
|
: mask & NRF_UART_ERROR_PARITY_MASK ? UART_ERROR_PARITY \
|
|
|
|
: mask & NRF_UART_ERROR_FRAMING_MASK ? UART_ERROR_FRAMING \
|
|
|
|
: mask & NRF_UART_ERROR_BREAK_MASK ? UART_BREAK \
|
|
|
|
: 0)
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static void error_isr(const struct device *dev)
|
2018-12-14 16:13:14 +01:00
|
|
|
{
|
2021-10-01 15:47:40 +02:00
|
|
|
if (uart0_cb.rx_timeout != SYS_FOREVER_US) {
|
2020-10-20 13:58:21 +02:00
|
|
|
k_timer_stop(&uart0_cb.rx_timeout_timer);
|
2020-01-24 16:04:16 +01:00
|
|
|
}
|
2018-12-14 16:13:14 +01:00
|
|
|
nrf_uart_event_clear(uart0_addr, NRF_UART_EVENT_ERROR);
|
|
|
|
|
|
|
|
if (!uart0_cb.rx_enabled) {
|
|
|
|
nrf_uart_task_trigger(uart0_addr, NRF_UART_TASK_STOPRX);
|
|
|
|
}
|
|
|
|
struct uart_event event = {
|
|
|
|
.type = UART_RX_STOPPED,
|
|
|
|
.data.rx_stop.reason =
|
|
|
|
UART_ERROR_FROM_MASK(
|
|
|
|
nrf_uart_errorsrc_get_and_clear(uart0_addr)),
|
|
|
|
.data.rx_stop.data.len = uart0_cb.rx_counter
|
|
|
|
- uart0_cb.rx_offset,
|
|
|
|
.data.rx_stop.data.offset = uart0_cb.rx_offset,
|
|
|
|
.data.rx_stop.data.buf = uart0_cb.rx_buffer
|
|
|
|
};
|
|
|
|
|
2020-06-24 14:28:05 +02:00
|
|
|
user_callback(dev, &event);
|
2018-12-14 16:13:14 +01:00
|
|
|
/* Abort transfer. */
|
|
|
|
uart_nrfx_rx_disable(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* In nRF hardware RX timeout can occur only after stopping the peripheral,
|
|
|
|
* it is used as a sign that peripheral has finished its operation and is
|
|
|
|
* disabled.
|
|
|
|
*/
|
2020-04-30 20:33:38 +02:00
|
|
|
static void rxto_isr(const struct device *dev)
|
2018-12-14 16:13:14 +01:00
|
|
|
{
|
|
|
|
nrf_uart_event_clear(uart0_addr, NRF_UART_EVENT_RXTO);
|
|
|
|
|
2020-06-04 12:36:37 +02:00
|
|
|
/* Send rxrdy if there is any data pending. */
|
|
|
|
if (uart0_cb.rx_counter - uart0_cb.rx_offset) {
|
2020-06-24 14:28:05 +02:00
|
|
|
rx_rdy_evt(dev);
|
2020-06-04 12:36:37 +02:00
|
|
|
}
|
|
|
|
|
2020-06-24 14:28:05 +02:00
|
|
|
buf_released_evt(dev);
|
2018-12-14 16:13:14 +01:00
|
|
|
if (uart0_cb.rx_secondary_buffer_length) {
|
|
|
|
uart0_cb.rx_buffer = uart0_cb.rx_secondary_buffer;
|
2020-06-24 14:28:05 +02:00
|
|
|
buf_released_evt(dev);
|
2018-12-14 16:13:14 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
rx_reset_state();
|
2020-06-24 14:28:05 +02:00
|
|
|
rx_disabled_evt(dev);
|
2018-12-14 16:13:14 +01:00
|
|
|
}
|
|
|
|
|
isr: Normalize usage of device instance through ISR
The goal of this patch is to replace the 'void *' parameter by 'struct
device *' if they use such variable or just 'const void *' on all
relevant ISRs
This will avoid not-so-nice const qualifier tweaks when device instances
will be constant.
Note that only the ISR passed to IRQ_CONNECT are of interest here.
In order to do so, the script fix_isr.py below is necessary:
from pathlib import Path
import subprocess
import pickle
import mmap
import sys
import re
import os
cocci_template = """
@r_fix_isr_0
@
type ret_type;
identifier P;
identifier D;
@@
-ret_type <!fn!>(void *P)
+ret_type <!fn!>(const struct device *P)
{
...
(
const struct device *D = (const struct device *)P;
|
const struct device *D = P;
)
...
}
@r_fix_isr_1
@
type ret_type;
identifier P;
identifier D;
@@
-ret_type <!fn!>(void *P)
+ret_type <!fn!>(const struct device *P)
{
...
const struct device *D;
...
(
D = (const struct device *)P;
|
D = P;
)
...
}
@r_fix_isr_2
@
type ret_type;
identifier A;
@@
-ret_type <!fn!>(void *A)
+ret_type <!fn!>(const void *A)
{
...
}
@r_fix_isr_3
@
const struct device *D;
@@
-<!fn!>((void *)D);
+<!fn!>(D);
@r_fix_isr_4
@
type ret_type;
identifier D;
identifier P;
@@
-ret_type <!fn!>(const struct device *P)
+ret_type <!fn!>(const struct device *D)
{
...
(
-const struct device *D = (const struct device *)P;
|
-const struct device *D = P;
)
...
}
@r_fix_isr_5
@
type ret_type;
identifier D;
identifier P;
@@
-ret_type <!fn!>(const struct device *P)
+ret_type <!fn!>(const struct device *D)
{
...
-const struct device *D;
...
(
-D = (const struct device *)P;
|
-D = P;
)
...
}
"""
def find_isr(fn):
db = []
data = None
start = 0
try:
with open(fn, 'r+') as f:
data = str(mmap.mmap(f.fileno(), 0).read())
except Exception as e:
return db
while True:
isr = ""
irq = data.find('IRQ_CONNECT', start)
while irq > -1:
p = 1
arg = 1
p_o = data.find('(', irq)
if p_o < 0:
irq = -1
break;
pos = p_o + 1
while p > 0:
if data[pos] == ')':
p -= 1
elif data[pos] == '(':
p += 1
elif data[pos] == ',' and p == 1:
arg += 1
if arg == 3:
isr += data[pos]
pos += 1
isr = isr.strip(',\\n\\t ')
if isr not in db and len(isr) > 0:
db.append(isr)
start = pos
break
if irq < 0:
break
return db
def patch_isr(fn, isr_list):
if len(isr_list) <= 0:
return
for isr in isr_list:
tmplt = cocci_template.replace('<!fn!>', isr)
with open('/tmp/isr_fix.cocci', 'w') as f:
f.write(tmplt)
cmd = ['spatch', '--sp-file', '/tmp/isr_fix.cocci', '--in-place', fn]
subprocess.run(cmd)
def process_files(path):
if path.is_file() and path.suffix in ['.h', '.c']:
p = str(path.parent) + '/' + path.name
isr_list = find_isr(p)
patch_isr(p, isr_list)
elif path.is_dir():
for p in path.iterdir():
process_files(p)
if len(sys.argv) < 2:
print("You need to provide a dir/file path")
sys.exit(1)
process_files(Path(sys.argv[1]))
And is run: ./fix_isr.py <zephyr root directory>
Finally, some files needed manual fixes such.
Fixes #27399
Signed-off-by: Tomasz Bursztyka <tomasz.bursztyka@linux.intel.com>
2020-06-17 14:58:56 +02:00
|
|
|
void uart_nrfx_isr(const struct device *uart)
|
2018-12-14 16:13:14 +01:00
|
|
|
{
|
|
|
|
if (nrf_uart_int_enable_check(uart0_addr, NRF_UART_INT_MASK_ERROR) &&
|
|
|
|
nrf_uart_event_check(uart0_addr, NRF_UART_EVENT_ERROR)) {
|
|
|
|
error_isr(uart);
|
|
|
|
} else if (nrf_uart_int_enable_check(uart0_addr,
|
|
|
|
NRF_UART_INT_MASK_RXDRDY) &&
|
|
|
|
nrf_uart_event_check(uart0_addr, NRF_UART_EVENT_RXDRDY)) {
|
|
|
|
rx_isr(uart);
|
|
|
|
}
|
|
|
|
|
2020-01-29 12:34:55 +01:00
|
|
|
if (nrf_uart_event_check(uart0_addr, NRF_UART_EVENT_TXDRDY)
|
|
|
|
&& nrf_uart_int_enable_check(uart0_addr,
|
|
|
|
NRF_UART_INT_MASK_TXDRDY)) {
|
2020-06-24 14:28:05 +02:00
|
|
|
tx_isr(uart);
|
2018-12-14 16:13:14 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (nrf_uart_event_check(uart0_addr, NRF_UART_EVENT_RXTO)) {
|
2020-06-24 14:28:05 +02:00
|
|
|
rxto_isr(uart);
|
2018-12-14 16:13:14 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-20 13:58:21 +02:00
|
|
|
static void rx_timeout(struct k_timer *timer)
|
2018-12-14 16:13:14 +01:00
|
|
|
{
|
2021-12-16 11:21:29 +01:00
|
|
|
rx_rdy_evt(DEVICE_DT_INST_GET(0));
|
2018-12-14 16:13:14 +01:00
|
|
|
}
|
2020-01-24 16:04:16 +01:00
|
|
|
|
2020-06-05 15:25:14 +02:00
|
|
|
#if HW_FLOW_CONTROL_AVAILABLE
|
2020-10-20 13:58:21 +02:00
|
|
|
static void tx_timeout(struct k_timer *timer)
|
2018-12-14 16:13:14 +01:00
|
|
|
{
|
|
|
|
struct uart_event evt;
|
|
|
|
|
2021-10-01 15:47:40 +02:00
|
|
|
if (uart0_cb.tx_timeout != SYS_FOREVER_US) {
|
2020-10-20 13:58:21 +02:00
|
|
|
k_timer_stop(&uart0_cb.tx_timeout_timer);
|
2020-01-24 16:04:16 +01:00
|
|
|
}
|
2018-12-14 16:13:14 +01:00
|
|
|
nrf_uart_task_trigger(uart0_addr, NRF_UART_TASK_STOPTX);
|
|
|
|
evt.type = UART_TX_ABORTED;
|
|
|
|
evt.data.tx.buf = uart0_cb.tx_buffer;
|
|
|
|
evt.data.tx.len = uart0_cb.tx_buffer_length;
|
|
|
|
uart0_cb.tx_buffer_length = 0;
|
|
|
|
uart0_cb.tx_counter = 0;
|
2021-12-16 11:21:29 +01:00
|
|
|
user_callback(DEVICE_DT_INST_GET(0), &evt);
|
2018-12-14 16:13:14 +01:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2019-03-22 21:26:00 +01:00
|
|
|
#endif /* CONFIG_UART_0_ASYNC */
|
2018-12-14 16:13:14 +01:00
|
|
|
|
|
|
|
|
2018-07-06 21:59:42 +02:00
|
|
|
#ifdef CONFIG_UART_0_INTERRUPT_DRIVEN
|
2016-05-18 16:49:04 +02:00
|
|
|
|
|
|
|
/** Interrupt driven FIFO fill function */
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uart_nrfx_fifo_fill(const struct device *dev,
|
2020-05-27 18:26:57 +02:00
|
|
|
const uint8_t *tx_data,
|
2018-05-21 09:09:19 +02:00
|
|
|
int len)
|
2016-05-18 16:49:04 +02:00
|
|
|
{
|
2020-05-27 18:26:57 +02:00
|
|
|
uint8_t num_tx = 0U;
|
2016-05-18 16:49:04 +02:00
|
|
|
|
2018-05-21 09:09:19 +02:00
|
|
|
while ((len - num_tx > 0) &&
|
2018-06-28 22:16:39 +02:00
|
|
|
event_txdrdy_check()) {
|
|
|
|
|
2016-05-18 16:49:04 +02:00
|
|
|
/* Clear the interrupt */
|
2018-06-28 22:16:39 +02:00
|
|
|
event_txdrdy_clear();
|
2016-05-18 16:49:04 +02:00
|
|
|
|
|
|
|
/* Send a character */
|
2020-05-27 18:26:57 +02:00
|
|
|
nrf_uart_txd_set(uart0_addr, (uint8_t)tx_data[num_tx++]);
|
2016-05-18 16:49:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return (int)num_tx;
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Interrupt driven FIFO read function */
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uart_nrfx_fifo_read(const struct device *dev,
|
2020-05-27 18:26:57 +02:00
|
|
|
uint8_t *rx_data,
|
2018-05-21 09:09:19 +02:00
|
|
|
const int size)
|
2016-05-18 16:49:04 +02:00
|
|
|
{
|
2020-05-27 18:26:57 +02:00
|
|
|
uint8_t num_rx = 0U;
|
2016-05-18 16:49:04 +02:00
|
|
|
|
2018-05-21 09:09:19 +02:00
|
|
|
while ((size - num_rx > 0) &&
|
2018-07-11 14:11:18 +02:00
|
|
|
nrf_uart_event_check(uart0_addr, NRF_UART_EVENT_RXDRDY)) {
|
2016-05-18 16:49:04 +02:00
|
|
|
/* Clear the interrupt */
|
2018-07-11 14:11:18 +02:00
|
|
|
nrf_uart_event_clear(uart0_addr, NRF_UART_EVENT_RXDRDY);
|
2016-05-18 16:49:04 +02:00
|
|
|
|
|
|
|
/* Receive a character */
|
2020-05-27 18:26:57 +02:00
|
|
|
rx_data[num_rx++] = (uint8_t)nrf_uart_rxd_get(uart0_addr);
|
2016-05-18 16:49:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return num_rx;
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Interrupt driven transfer enabling function */
|
2020-04-30 20:33:38 +02:00
|
|
|
static void uart_nrfx_irq_tx_enable(const struct device *dev)
|
2016-05-18 16:49:04 +02:00
|
|
|
{
|
2020-05-27 18:26:57 +02:00
|
|
|
uint32_t key;
|
2018-06-28 22:16:39 +02:00
|
|
|
|
2020-06-29 09:55:55 +02:00
|
|
|
disable_tx_irq = false;
|
|
|
|
|
2018-07-25 15:42:29 +02:00
|
|
|
/* Indicate that this device started a transaction that should not be
|
|
|
|
* interrupted by putting the SoC into the deep sleep mode.
|
|
|
|
*/
|
2021-05-31 15:24:34 +02:00
|
|
|
pm_device_busy_set(dev);
|
2018-07-25 15:42:29 +02:00
|
|
|
|
|
|
|
/* Activate the transmitter. */
|
|
|
|
nrf_uart_task_trigger(uart0_addr, NRF_UART_TASK_STARTTX);
|
|
|
|
|
2018-07-11 14:11:18 +02:00
|
|
|
nrf_uart_int_enable(uart0_addr, NRF_UART_INT_MASK_TXDRDY);
|
2018-06-28 22:16:39 +02:00
|
|
|
|
|
|
|
/* Critical section is used to avoid any UART related interrupt which
|
|
|
|
* can occur after the if statement and before call of the function
|
|
|
|
* forcing an interrupt.
|
|
|
|
*/
|
|
|
|
key = irq_lock();
|
|
|
|
if (uart_sw_event_txdrdy) {
|
|
|
|
/* Due to HW limitation first TXDRDY interrupt shall be
|
|
|
|
* triggered by the software.
|
|
|
|
*/
|
2020-03-06 23:05:41 +01:00
|
|
|
NVIC_SetPendingIRQ(IRQN);
|
2018-06-28 22:16:39 +02:00
|
|
|
}
|
|
|
|
irq_unlock(key);
|
2016-05-18 16:49:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Interrupt driven transfer disabling function */
|
2020-04-30 20:33:38 +02:00
|
|
|
static void uart_nrfx_irq_tx_disable(const struct device *dev)
|
2016-05-18 16:49:04 +02:00
|
|
|
{
|
2020-06-29 09:55:55 +02:00
|
|
|
/* Disable TX interrupt in uart_nrfx_isr() when transmission is done. */
|
|
|
|
disable_tx_irq = true;
|
2016-05-18 16:49:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Interrupt driven receiver enabling function */
|
2020-04-30 20:33:38 +02:00
|
|
|
static void uart_nrfx_irq_rx_enable(const struct device *dev)
|
2016-05-18 16:49:04 +02:00
|
|
|
{
|
2018-07-11 14:11:18 +02:00
|
|
|
nrf_uart_int_enable(uart0_addr, NRF_UART_INT_MASK_RXDRDY);
|
2016-05-18 16:49:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Interrupt driven receiver disabling function */
|
2020-04-30 20:33:38 +02:00
|
|
|
static void uart_nrfx_irq_rx_disable(const struct device *dev)
|
2016-05-18 16:49:04 +02:00
|
|
|
{
|
2018-07-11 14:11:18 +02:00
|
|
|
nrf_uart_int_disable(uart0_addr, NRF_UART_INT_MASK_RXDRDY);
|
2016-05-18 16:49:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Interrupt driven transfer empty function */
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uart_nrfx_irq_tx_ready_complete(const struct device *dev)
|
2016-05-18 16:49:04 +02:00
|
|
|
{
|
2020-06-29 09:55:55 +02:00
|
|
|
/* Signal TX readiness only when the TX interrupt is enabled and there
|
|
|
|
* is no pending request to disable it. Note that this function may get
|
|
|
|
* called after the TX interrupt is requested to be disabled but before
|
|
|
|
* the disabling is actually performed (in the IRQ handler).
|
|
|
|
*/
|
|
|
|
return nrf_uart_int_enable_check(uart0_addr,
|
|
|
|
NRF_UART_INT_MASK_TXDRDY) &&
|
|
|
|
!disable_tx_irq &&
|
|
|
|
event_txdrdy_check();
|
2016-05-18 16:49:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Interrupt driven receiver ready function */
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uart_nrfx_irq_rx_ready(const struct device *dev)
|
2016-05-18 16:49:04 +02:00
|
|
|
{
|
2018-07-11 14:11:18 +02:00
|
|
|
return nrf_uart_event_check(uart0_addr, NRF_UART_EVENT_RXDRDY);
|
2016-05-18 16:49:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Interrupt driven error enabling function */
|
2020-04-30 20:33:38 +02:00
|
|
|
static void uart_nrfx_irq_err_enable(const struct device *dev)
|
2016-05-18 16:49:04 +02:00
|
|
|
{
|
2018-07-11 14:11:18 +02:00
|
|
|
nrf_uart_int_enable(uart0_addr, NRF_UART_INT_MASK_ERROR);
|
2016-05-18 16:49:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Interrupt driven error disabling function */
|
2020-04-30 20:33:38 +02:00
|
|
|
static void uart_nrfx_irq_err_disable(const struct device *dev)
|
2016-05-18 16:49:04 +02:00
|
|
|
{
|
2018-07-11 14:11:18 +02:00
|
|
|
nrf_uart_int_disable(uart0_addr, NRF_UART_INT_MASK_ERROR);
|
2016-05-18 16:49:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Interrupt driven pending status function */
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uart_nrfx_irq_is_pending(const struct device *dev)
|
2016-05-18 16:49:04 +02:00
|
|
|
{
|
2018-07-11 14:11:18 +02:00
|
|
|
return ((nrf_uart_int_enable_check(uart0_addr,
|
2018-05-21 09:09:19 +02:00
|
|
|
NRF_UART_INT_MASK_TXDRDY) &&
|
2020-06-29 09:55:55 +02:00
|
|
|
uart_nrfx_irq_tx_ready_complete(dev))
|
2018-05-21 09:09:19 +02:00
|
|
|
||
|
2018-07-11 14:11:18 +02:00
|
|
|
(nrf_uart_int_enable_check(uart0_addr,
|
2018-05-21 09:09:19 +02:00
|
|
|
NRF_UART_INT_MASK_RXDRDY) &&
|
2018-06-28 22:16:39 +02:00
|
|
|
uart_nrfx_irq_rx_ready(dev)));
|
2016-05-18 16:49:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Interrupt driven interrupt update function */
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uart_nrfx_irq_update(const struct device *dev)
|
2016-05-18 16:49:04 +02:00
|
|
|
{
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Set the callback function */
|
2020-04-30 20:33:38 +02:00
|
|
|
static void uart_nrfx_irq_callback_set(const struct device *dev,
|
2018-07-16 20:12:26 +02:00
|
|
|
uart_irq_callback_user_data_t cb,
|
|
|
|
void *cb_data)
|
2016-05-18 16:49:04 +02:00
|
|
|
{
|
2018-05-21 09:09:19 +02:00
|
|
|
(void)dev;
|
2018-06-28 22:16:39 +02:00
|
|
|
irq_callback = cb;
|
2018-07-16 20:12:26 +02:00
|
|
|
irq_cb_data = cb_data;
|
2022-08-18 20:19:46 +02:00
|
|
|
|
2023-07-24 15:55:19 +02:00
|
|
|
#if defined(CONFIG_UART_0_ASYNC) && defined(CONFIG_UART_EXCLUSIVE_API_CALLBACKS)
|
2022-08-18 20:19:46 +02:00
|
|
|
uart0_cb.callback = NULL;
|
|
|
|
uart0_cb.user_data = NULL;
|
|
|
|
#endif
|
2016-05-18 16:49:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Interrupt service routine.
|
|
|
|
*
|
|
|
|
* This simply calls the callback function, if one exists.
|
|
|
|
*
|
|
|
|
* @param arg Argument to ISR.
|
|
|
|
*/
|
isr: Normalize usage of device instance through ISR
The goal of this patch is to replace the 'void *' parameter by 'struct
device *' if they use such variable or just 'const void *' on all
relevant ISRs
This will avoid not-so-nice const qualifier tweaks when device instances
will be constant.
Note that only the ISR passed to IRQ_CONNECT are of interest here.
In order to do so, the script fix_isr.py below is necessary:
from pathlib import Path
import subprocess
import pickle
import mmap
import sys
import re
import os
cocci_template = """
@r_fix_isr_0
@
type ret_type;
identifier P;
identifier D;
@@
-ret_type <!fn!>(void *P)
+ret_type <!fn!>(const struct device *P)
{
...
(
const struct device *D = (const struct device *)P;
|
const struct device *D = P;
)
...
}
@r_fix_isr_1
@
type ret_type;
identifier P;
identifier D;
@@
-ret_type <!fn!>(void *P)
+ret_type <!fn!>(const struct device *P)
{
...
const struct device *D;
...
(
D = (const struct device *)P;
|
D = P;
)
...
}
@r_fix_isr_2
@
type ret_type;
identifier A;
@@
-ret_type <!fn!>(void *A)
+ret_type <!fn!>(const void *A)
{
...
}
@r_fix_isr_3
@
const struct device *D;
@@
-<!fn!>((void *)D);
+<!fn!>(D);
@r_fix_isr_4
@
type ret_type;
identifier D;
identifier P;
@@
-ret_type <!fn!>(const struct device *P)
+ret_type <!fn!>(const struct device *D)
{
...
(
-const struct device *D = (const struct device *)P;
|
-const struct device *D = P;
)
...
}
@r_fix_isr_5
@
type ret_type;
identifier D;
identifier P;
@@
-ret_type <!fn!>(const struct device *P)
+ret_type <!fn!>(const struct device *D)
{
...
-const struct device *D;
...
(
-D = (const struct device *)P;
|
-D = P;
)
...
}
"""
def find_isr(fn):
db = []
data = None
start = 0
try:
with open(fn, 'r+') as f:
data = str(mmap.mmap(f.fileno(), 0).read())
except Exception as e:
return db
while True:
isr = ""
irq = data.find('IRQ_CONNECT', start)
while irq > -1:
p = 1
arg = 1
p_o = data.find('(', irq)
if p_o < 0:
irq = -1
break;
pos = p_o + 1
while p > 0:
if data[pos] == ')':
p -= 1
elif data[pos] == '(':
p += 1
elif data[pos] == ',' and p == 1:
arg += 1
if arg == 3:
isr += data[pos]
pos += 1
isr = isr.strip(',\\n\\t ')
if isr not in db and len(isr) > 0:
db.append(isr)
start = pos
break
if irq < 0:
break
return db
def patch_isr(fn, isr_list):
if len(isr_list) <= 0:
return
for isr in isr_list:
tmplt = cocci_template.replace('<!fn!>', isr)
with open('/tmp/isr_fix.cocci', 'w') as f:
f.write(tmplt)
cmd = ['spatch', '--sp-file', '/tmp/isr_fix.cocci', '--in-place', fn]
subprocess.run(cmd)
def process_files(path):
if path.is_file() and path.suffix in ['.h', '.c']:
p = str(path.parent) + '/' + path.name
isr_list = find_isr(p)
patch_isr(p, isr_list)
elif path.is_dir():
for p in path.iterdir():
process_files(p)
if len(sys.argv) < 2:
print("You need to provide a dir/file path")
sys.exit(1)
process_files(Path(sys.argv[1]))
And is run: ./fix_isr.py <zephyr root directory>
Finally, some files needed manual fixes such.
Fixes #27399
Signed-off-by: Tomasz Bursztyka <tomasz.bursztyka@linux.intel.com>
2020-06-17 14:58:56 +02:00
|
|
|
static void uart_nrfx_isr(const struct device *dev)
|
2016-05-18 16:49:04 +02:00
|
|
|
{
|
2020-06-29 09:55:55 +02:00
|
|
|
if (disable_tx_irq &&
|
|
|
|
nrf_uart_event_check(uart0_addr, NRF_UART_EVENT_TXDRDY)) {
|
|
|
|
nrf_uart_int_disable(uart0_addr, NRF_UART_INT_MASK_TXDRDY);
|
|
|
|
|
|
|
|
/* Deactivate the transmitter so that it does not needlessly
|
|
|
|
* consume power.
|
|
|
|
*/
|
|
|
|
nrf_uart_task_trigger(uart0_addr, NRF_UART_TASK_STOPTX);
|
|
|
|
|
|
|
|
/* The transaction is over. It is okay to enter the deep sleep
|
|
|
|
* mode if needed.
|
|
|
|
*/
|
2021-05-31 15:24:34 +02:00
|
|
|
pm_device_busy_clear(dev);
|
2020-06-29 09:55:55 +02:00
|
|
|
|
|
|
|
disable_tx_irq = false;
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
2016-05-18 16:49:04 +02:00
|
|
|
|
2019-11-22 11:13:57 +01:00
|
|
|
if (nrf_uart_event_check(uart0_addr, NRF_UART_EVENT_ERROR)) {
|
|
|
|
nrf_uart_event_clear(uart0_addr, NRF_UART_EVENT_ERROR);
|
|
|
|
}
|
|
|
|
|
2018-06-28 22:16:39 +02:00
|
|
|
if (irq_callback) {
|
2020-06-24 15:47:15 +02:00
|
|
|
irq_callback(dev, irq_cb_data);
|
2016-05-18 16:49:04 +02:00
|
|
|
}
|
|
|
|
}
|
2018-07-06 21:59:42 +02:00
|
|
|
#endif /* CONFIG_UART_0_INTERRUPT_DRIVEN */
|
2016-05-18 16:49:04 +02:00
|
|
|
|
2018-05-21 09:09:19 +02:00
|
|
|
/**
|
|
|
|
* @brief Initialize UART channel
|
|
|
|
*
|
|
|
|
* This routine is called to reset the chip in a quiescent state.
|
|
|
|
* It is assumed that this function is called only once per UART.
|
|
|
|
*
|
|
|
|
* @param dev UART device struct
|
|
|
|
*
|
|
|
|
* @return 0 on success
|
|
|
|
*/
|
2020-04-30 20:33:38 +02:00
|
|
|
static int uart_nrfx_init(const struct device *dev)
|
2018-05-21 09:09:19 +02:00
|
|
|
{
|
2023-02-21 13:06:33 +01:00
|
|
|
const struct uart_nrfx_config *config = dev->config;
|
2022-01-19 16:16:23 +01:00
|
|
|
struct uart_nrfx_data *data = dev->data;
|
2018-05-21 09:09:19 +02:00
|
|
|
int err;
|
|
|
|
|
2020-07-24 01:12:02 +02:00
|
|
|
nrf_uart_disable(uart0_addr);
|
|
|
|
|
2021-09-21 15:07:04 +02:00
|
|
|
err = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT);
|
|
|
|
if (err < 0) {
|
|
|
|
return err;
|
2020-06-05 15:25:14 +02:00
|
|
|
}
|
2018-05-21 09:09:19 +02:00
|
|
|
|
2018-11-20 12:24:42 +01:00
|
|
|
/* Set initial configuration */
|
2022-01-19 16:16:23 +01:00
|
|
|
err = uart_nrfx_configure(dev, &data->uart_config);
|
2018-05-21 09:09:19 +02:00
|
|
|
if (err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-07-25 15:42:29 +02:00
|
|
|
/* Enable the UART and activate its receiver. With the current API
|
|
|
|
* the receiver needs to be active all the time. The transmitter
|
|
|
|
* will be activated when there is something to send.
|
|
|
|
*/
|
2018-07-11 14:11:18 +02:00
|
|
|
nrf_uart_enable(uart0_addr);
|
2018-05-21 09:09:19 +02:00
|
|
|
|
2021-09-21 15:07:04 +02:00
|
|
|
if (!DISABLE_RX) {
|
2020-02-19 11:46:05 +01:00
|
|
|
nrf_uart_event_clear(uart0_addr, NRF_UART_EVENT_RXDRDY);
|
2018-05-21 09:09:19 +02:00
|
|
|
|
2020-02-19 11:46:05 +01:00
|
|
|
nrf_uart_task_trigger(uart0_addr, NRF_UART_TASK_STARTRX);
|
|
|
|
}
|
2018-05-21 09:09:19 +02:00
|
|
|
|
2018-07-06 21:59:42 +02:00
|
|
|
#ifdef CONFIG_UART_0_INTERRUPT_DRIVEN
|
2018-06-28 22:16:39 +02:00
|
|
|
/* Simulate that the TXDRDY event is set, so that the transmitter status
|
|
|
|
* is indicated correctly.
|
|
|
|
*/
|
2018-11-29 20:12:22 +01:00
|
|
|
uart_sw_event_txdrdy = 1U;
|
2018-12-14 16:13:14 +01:00
|
|
|
#endif
|
|
|
|
|
2019-03-22 21:26:00 +01:00
|
|
|
#if defined(CONFIG_UART_0_ASYNC) || defined(CONFIG_UART_0_INTERRUPT_DRIVEN)
|
2018-06-28 22:16:39 +02:00
|
|
|
|
2020-03-06 23:05:41 +01:00
|
|
|
IRQ_CONNECT(IRQN,
|
|
|
|
IRQ_PRIO,
|
2018-06-08 15:29:24 +02:00
|
|
|
uart_nrfx_isr,
|
2021-12-16 11:21:29 +01:00
|
|
|
DEVICE_DT_INST_GET(0),
|
2018-05-21 09:09:19 +02:00
|
|
|
0);
|
2020-03-06 23:05:41 +01:00
|
|
|
irq_enable(IRQN);
|
2018-05-21 09:09:19 +02:00
|
|
|
#endif
|
|
|
|
|
2019-03-22 21:26:00 +01:00
|
|
|
#ifdef CONFIG_UART_0_ASYNC
|
2020-10-20 13:58:21 +02:00
|
|
|
k_timer_init(&uart0_cb.rx_timeout_timer, rx_timeout, NULL);
|
2020-06-05 15:25:14 +02:00
|
|
|
#if HW_FLOW_CONTROL_AVAILABLE
|
2020-10-20 13:58:21 +02:00
|
|
|
k_timer_init(&uart0_cb.tx_timeout_timer, tx_timeout, NULL);
|
2018-12-14 16:13:14 +01:00
|
|
|
#endif
|
|
|
|
#endif
|
2018-05-21 09:09:19 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-06-28 22:16:39 +02:00
|
|
|
/* Common function: uart_nrfx_irq_tx_ready_complete is used for two API entries
|
|
|
|
* because Nordic hardware does not distinguish between them.
|
|
|
|
*/
|
2018-06-08 15:29:24 +02:00
|
|
|
static const struct uart_driver_api uart_nrfx_uart_driver_api = {
|
2019-03-22 21:26:00 +01:00
|
|
|
#ifdef CONFIG_UART_0_ASYNC
|
2018-12-14 16:13:14 +01:00
|
|
|
.callback_set = uart_nrfx_callback_set,
|
|
|
|
.tx = uart_nrfx_tx,
|
|
|
|
.tx_abort = uart_nrfx_tx_abort,
|
|
|
|
.rx_enable = uart_nrfx_rx_enable,
|
|
|
|
.rx_buf_rsp = uart_nrfx_rx_buf_rsp,
|
|
|
|
.rx_disable = uart_nrfx_rx_disable,
|
2019-03-22 21:26:00 +01:00
|
|
|
#endif /* CONFIG_UART_0_ASYNC */
|
2018-06-28 22:16:39 +02:00
|
|
|
.poll_in = uart_nrfx_poll_in,
|
|
|
|
.poll_out = uart_nrfx_poll_out,
|
|
|
|
.err_check = uart_nrfx_err_check,
|
2021-05-26 21:33:37 +02:00
|
|
|
#ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
|
2018-11-20 12:24:42 +01:00
|
|
|
.configure = uart_nrfx_configure,
|
|
|
|
.config_get = uart_nrfx_config_get,
|
2021-05-26 21:33:37 +02:00
|
|
|
#endif
|
2018-07-06 21:59:42 +02:00
|
|
|
#ifdef CONFIG_UART_0_INTERRUPT_DRIVEN
|
2018-06-28 22:16:39 +02:00
|
|
|
.fifo_fill = uart_nrfx_fifo_fill,
|
|
|
|
.fifo_read = uart_nrfx_fifo_read,
|
|
|
|
.irq_tx_enable = uart_nrfx_irq_tx_enable,
|
|
|
|
.irq_tx_disable = uart_nrfx_irq_tx_disable,
|
|
|
|
.irq_tx_ready = uart_nrfx_irq_tx_ready_complete,
|
|
|
|
.irq_rx_enable = uart_nrfx_irq_rx_enable,
|
|
|
|
.irq_rx_disable = uart_nrfx_irq_rx_disable,
|
|
|
|
.irq_tx_complete = uart_nrfx_irq_tx_ready_complete,
|
|
|
|
.irq_rx_ready = uart_nrfx_irq_rx_ready,
|
|
|
|
.irq_err_enable = uart_nrfx_irq_err_enable,
|
|
|
|
.irq_err_disable = uart_nrfx_irq_err_disable,
|
|
|
|
.irq_is_pending = uart_nrfx_irq_is_pending,
|
|
|
|
.irq_update = uart_nrfx_irq_update,
|
|
|
|
.irq_callback_set = uart_nrfx_irq_callback_set,
|
2018-07-06 21:59:42 +02:00
|
|
|
#endif /* CONFIG_UART_0_INTERRUPT_DRIVEN */
|
2016-05-18 16:49:04 +02:00
|
|
|
};
|
|
|
|
|
2020-09-02 00:31:40 +02:00
|
|
|
#ifdef CONFIG_PM_DEVICE
|
2021-11-02 16:19:41 +01:00
|
|
|
static int uart_nrfx_pm_action(const struct device *dev,
|
|
|
|
enum pm_device_action action)
|
2018-07-25 15:42:29 +02:00
|
|
|
{
|
2022-01-19 16:16:23 +01:00
|
|
|
const struct uart_nrfx_config *config = dev->config;
|
2021-09-21 15:07:04 +02:00
|
|
|
int ret;
|
|
|
|
|
2021-07-05 15:13:40 +02:00
|
|
|
switch (action) {
|
|
|
|
case PM_DEVICE_ACTION_RESUME:
|
2021-09-21 15:07:04 +02:00
|
|
|
if (IS_ENABLED(CONFIG_UART_0_GPIO_MANAGEMENT)) {
|
|
|
|
ret = pinctrl_apply_state(config->pcfg,
|
|
|
|
PINCTRL_STATE_DEFAULT);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-25 15:42:29 +02:00
|
|
|
nrf_uart_enable(uart0_addr);
|
2021-09-21 15:07:04 +02:00
|
|
|
if (!DISABLE_RX) {
|
2020-02-19 11:46:05 +01:00
|
|
|
nrf_uart_task_trigger(uart0_addr,
|
|
|
|
NRF_UART_TASK_STARTRX);
|
|
|
|
}
|
2021-07-05 10:35:15 +02:00
|
|
|
break;
|
2021-07-05 15:13:40 +02:00
|
|
|
case PM_DEVICE_ACTION_SUSPEND:
|
2018-07-25 15:42:29 +02:00
|
|
|
nrf_uart_disable(uart0_addr);
|
2021-09-21 15:07:04 +02:00
|
|
|
|
|
|
|
if (IS_ENABLED(CONFIG_UART_0_GPIO_MANAGEMENT)) {
|
|
|
|
ret = pinctrl_apply_state(config->pcfg,
|
|
|
|
PINCTRL_STATE_SLEEP);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
2021-07-05 10:35:15 +02:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -ENOTSUP;
|
2018-07-25 15:42:29 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2020-09-02 00:31:40 +02:00
|
|
|
#endif /* CONFIG_PM_DEVICE */
|
2018-07-25 15:42:29 +02:00
|
|
|
|
2021-09-21 15:07:04 +02:00
|
|
|
PINCTRL_DT_INST_DEFINE(0);
|
|
|
|
|
2023-02-21 14:06:12 +01:00
|
|
|
NRF_DT_CHECK_NODE_HAS_PINCTRL_SLEEP(DT_DRV_INST(0));
|
2022-02-23 13:56:38 +01:00
|
|
|
|
2021-09-21 15:07:04 +02:00
|
|
|
static const struct uart_nrfx_config uart_nrfx_uart0_config = {
|
|
|
|
.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(0),
|
|
|
|
};
|
|
|
|
|
2018-11-20 12:24:42 +01:00
|
|
|
static struct uart_nrfx_data uart_nrfx_uart0_data = {
|
|
|
|
.uart_config = {
|
|
|
|
.stop_bits = UART_CFG_STOP_BITS_1,
|
|
|
|
.data_bits = UART_CFG_DATA_BITS_8,
|
2020-03-06 23:05:41 +01:00
|
|
|
.baudrate = BAUDRATE,
|
2018-11-20 12:24:42 +01:00
|
|
|
#ifdef CONFIG_UART_0_NRF_PARITY_BIT
|
|
|
|
.parity = UART_CFG_PARITY_EVEN,
|
|
|
|
#else
|
|
|
|
.parity = UART_CFG_PARITY_NONE,
|
|
|
|
#endif /* CONFIG_UART_0_NRF_PARITY_BIT */
|
2020-06-05 15:25:14 +02:00
|
|
|
.flow_ctrl = PROP(hw_flow_control) ?
|
|
|
|
UART_CFG_FLOW_CTRL_RTS_CTS : UART_CFG_FLOW_CTRL_NONE,
|
2018-11-20 12:24:42 +01:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2021-10-13 12:25:06 +02:00
|
|
|
PM_DEVICE_DT_INST_DEFINE(0, uart_nrfx_pm_action);
|
|
|
|
|
2020-12-02 18:28:51 +01:00
|
|
|
DEVICE_DT_INST_DEFINE(0,
|
2018-07-25 15:42:29 +02:00
|
|
|
uart_nrfx_init,
|
2022-01-17 15:11:09 +01:00
|
|
|
PM_DEVICE_DT_INST_GET(0),
|
2018-11-20 12:24:42 +01:00
|
|
|
&uart_nrfx_uart0_data,
|
2021-09-21 15:07:04 +02:00
|
|
|
&uart_nrfx_uart0_config,
|
2018-07-25 15:42:29 +02:00
|
|
|
/* Initialize UART device before UART console. */
|
|
|
|
PRE_KERNEL_1,
|
2021-10-14 16:38:10 +02:00
|
|
|
CONFIG_SERIAL_INIT_PRIORITY,
|
2018-07-25 15:42:29 +02:00
|
|
|
&uart_nrfx_uart_driver_api);
|