drivers: clock_control: nrf: reimplementation including API updates

Reimplementation of clock control driver for nrf platform. It includes
latest API changes: asynchronous starting and getting clock status.

Additionally, it implements calibration algorithm which optionally
skips calibration based on no temperature change. Internal temperature
sensor is used for that.

Signed-off-by: Krzysztof Chruscinski <krzysztof.chruscinski@nordicsemi.no>
This commit is contained in:
Krzysztof Chruscinski 2019-06-26 13:27:31 +02:00 committed by Carles Cufí
parent a5e4d36a8d
commit 6700f2f194
8 changed files with 771 additions and 428 deletions

View file

@ -117,6 +117,7 @@
/drivers/bluetooth/ @joerchan @jhedberg @Vudentz
/drivers/can/ @alexanderwachter
/drivers/can/*mcp2515* @karstenkoenig
/drivers/clock_control/*nrf* @nordic-krch
/drivers/counter/ @nordic-krch
/drivers/counter/counter_cmos.c @gnuless
/drivers/display/ @vanwinkeljan

View file

@ -7,6 +7,7 @@ zephyr_sources_ifdef(CONFIG_CLOCK_CONTROL_MCUX_PCC clock_control_mcux
zephyr_sources_ifdef(CONFIG_CLOCK_CONTROL_MCUX_SCG clock_control_mcux_scg.c)
zephyr_sources_ifdef(CONFIG_CLOCK_CONTROL_MCUX_SIM clock_control_mcux_sim.c)
zephyr_sources_ifdef(CONFIG_CLOCK_CONTROL_NRF nrf_power_clock.c)
zephyr_sources_ifdef(CONFIG_CLOCK_CONTROL_NRF_K32SRC_RC_CALIBRATION nrf_clock_calibration.c)
zephyr_sources_ifdef(CONFIG_CLOCK_CONTROL_RV32M1_PCC clock_control_rv32m1_pcc.c)
if(CONFIG_CLOCK_CONTROL_STM32_CUBE)

View file

@ -47,14 +47,57 @@ config CLOCK_CONTROL_NRF_K32SRC_EXT_FULL_SWING
endchoice
config CLOCK_CONTROL_NRF_K32SRC_BLOCKING
bool "Blocking 32KHz crystal oscillator startup"
depends on CLOCK_CONTROL_NRF_K32SRC_XTAL
config CLOCK_CONTROL_NRF_K32SRC_RC_CALIBRATION
bool
depends on !SOC_SERIES_NRF91X
default y if CLOCK_CONTROL_NRF_K32SRC_RC
if CLOCK_CONTROL_NRF_K32SRC_RC_CALIBRATION
config CLOCK_CONTROL_NRF_CALIBRATION_PERIOD
int "Calibration opportunity period (in 250ms units)"
default 16
range 1 127
help
Clock control driver will spin wait in CPU sleep until 32KHz
crystal oscillator starts up. If not enabled, RC oscillator will
initially start running and automatically switch to crystal when
ready.
Periodically, calibration action is performed. Action includes
temperature measurement followed by clock calibration. Calibration may
be skipped if temperature change (compared to measurement of previous
calibration) did not exceeded CLOCK_CONTROL_NRF_CALIBRATION_TEMP_DIFF
and number of consecutive skips did not exceeded
CLOCK_CONTROL_NRF_CALIBRATION_MAX_SKIP.
config CLOCK_CONTROL_NRF_CALIBRATION_MAX_SKIP
int "Maximum number of calibration skips"
default 1
range 0 255
help
Calibration is skipped when temperature change since last calibration
was less than configured threshold. If number of consecutive skips
reaches configured value then calibration is performed
unconditionally. Set to 0 to perform calibration periodically
regardless of temperature change.
config CLOCK_CONTROL_NRF_CALIBRATION_TEMP_DIFF
int "Temperature change triggering calibration (in 0.25 degree units)"
default 2
help
Calibration is triggered if the temperature has changed by at least
this amount since the last calibration.
config CLOCK_CONTROL_NRF_CALIBRATION_DEBUG
bool "Calibration intrumentation"
help
Enables retrieving debug information like number of performed or
skipped calibrations.
config CLOCK_CONTROL_NRF_USES_TEMP_SENSOR
bool
default y if CLOCK_CONTROL_NRF_CALIBRATION_MAX_SKIP > 0 && \
CLOCK_CONTROL_NRF_CALIBRATION_TEMP_DIFF > 0
select TEMP_NRF5
select SENSOR
endif # CLOCK_CONTROL_NRF_K32SRC_RC_CALIBRATION
choice CLOCK_CONTROL_NRF_ACCURACY
prompt "32KHz clock accuracy"

View file

@ -0,0 +1,331 @@
/*
* Copyright (c) 2019 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <sensor.h>
#include <drivers/clock_control.h>
#include "nrf_clock_calibration.h"
#include <hal/nrf_clock.h>
#include <logging/log.h>
#include <stdlib.h>
LOG_MODULE_DECLARE(clock_control, CONFIG_CLOCK_CONTROL_LOG_LEVEL);
/* For platforms that do not have CTSTOPPED event CT timer can be started
* immediately after stop. Redefined events to avoid ifdefs in the code,
* CTSTOPPED interrupt handling will be removed during compilation.
*/
#ifndef CLOCK_EVENTS_CTSTOPPED_EVENTS_CTSTOPPED_Msk
#define NRF_CLOCK_EVENT_CTSTOPPED 0
#endif
#ifndef CLOCK_INTENSET_CTSTOPPED_Msk
#define NRF_CLOCK_INT_CTSTOPPED_MASK 0
#endif
#define TEMP_SENSOR_NAME \
COND_CODE_1(CONFIG_TEMP_NRF5, (CONFIG_TEMP_NRF5_NAME), (NULL))
/* Calibration state enum */
enum nrf_cal_state {
CAL_OFF,
CAL_IDLE, /* Calibration timer active, waiting for expiration. */
CAL_HFCLK_REQ, /* HFCLK XTAL requested. */
CAL_TEMP_REQ, /* Temperature measurement requested. */
CAL_ACTIVE, /* Ongoing calibration. */
CAL_ACTIVE_OFF /* Ongoing calibration, off requested. */
};
static enum nrf_cal_state cal_state; /* Calibration state. */
static s16_t prev_temperature; /* Previous temperature measurement. */
static u8_t calib_skip_cnt; /* Counting down skipped calibrations. */
static int total_cnt; /* Total number of calibrations. */
static int total_skips_cnt; /* Total number of skipped calibrations. */
/* Callback called on hfclk started. */
static void cal_hf_on_callback(struct device *dev, void *user_data);
static struct clock_control_async_data cal_hf_on_data = {
.cb = cal_hf_on_callback
};
static struct device *hfclk_dev; /* Handler to hfclk device. */
static struct device *temp_sensor; /* Handler to temperature sensor device. */
static void measure_temperature(struct k_work *work);
static K_WORK_DEFINE(temp_measure_work, measure_temperature);
static bool clock_event_check_and_clean(u32_t evt, u32_t intmask)
{
bool ret = nrf_clock_event_check(evt) &&
nrf_clock_int_enable_check(intmask);
if (ret) {
nrf_clock_event_clear(evt);
}
return ret;
}
bool z_nrf_clock_calibration_start(struct device *dev)
{
bool ret;
int key = irq_lock();
if (cal_state != CAL_ACTIVE_OFF) {
ret = true;
} else {
ret = false;
}
cal_state = CAL_IDLE;
irq_unlock(key);
calib_skip_cnt = 0;
return ret;
}
void z_nrf_clock_calibration_lfclk_started(struct device *dev)
{
/* Trigger unconditional calibration when lfclk is started. */
cal_state = CAL_HFCLK_REQ;
clock_control_async_on(hfclk_dev, 0, &cal_hf_on_data);
}
bool z_nrf_clock_calibration_stop(struct device *dev)
{
int key;
bool ret = true;
key = irq_lock();
nrf_clock_task_trigger(NRF_CLOCK_TASK_CTSTOP);
nrf_clock_event_clear(NRF_CLOCK_EVENT_CTTO);
/* If calibration is active then pend until completed.
* Currently (and most likely in the future), LFCLK is never stopped so
* it is not an issue.
*/
if (cal_state == CAL_ACTIVE) {
cal_state = CAL_ACTIVE_OFF;
ret = false;
} else {
cal_state = CAL_OFF;
}
irq_unlock(key);
LOG_DBG("Stop requested %s.", (cal_state == CAL_ACTIVE_OFF) ?
"during ongoing calibration" : "");
return ret;
}
void z_nrf_clock_calibration_init(struct device *dev)
{
/* Anomaly 36: After watchdog timeout reset, CPU lockup reset, soft
* reset, or pin reset EVENTS_DONE and EVENTS_CTTO are not reset.
*/
nrf_clock_event_clear(NRF_CLOCK_EVENT_DONE);
nrf_clock_event_clear(NRF_CLOCK_EVENT_CTTO);
nrf_clock_int_enable(NRF_CLOCK_INT_DONE_MASK |
NRF_CLOCK_INT_CTTO_MASK |
NRF_CLOCK_INT_CTSTOPPED_MASK);
nrf_clock_cal_timer_timeout_set(
CONFIG_CLOCK_CONTROL_NRF_CALIBRATION_PERIOD);
if (CONFIG_CLOCK_CONTROL_NRF_CALIBRATION_MAX_SKIP != 0) {
temp_sensor = device_get_binding(TEMP_SENSOR_NAME);
}
hfclk_dev = dev;
total_cnt = 0;
total_skips_cnt = 0;
}
/* Start calibration assuming that HFCLK XTAL is on. */
static void start_calibration(void)
{
cal_state = CAL_ACTIVE;
/* Workaround for Errata 192 */
if (IS_ENABLED(CONFIG_SOC_SERIES_NRF52X)) {
*(volatile uint32_t *)0x40000C34 = 0x00000002;
}
nrf_clock_task_trigger(NRF_CLOCK_TASK_CAL);
calib_skip_cnt = CONFIG_CLOCK_CONTROL_NRF_CALIBRATION_MAX_SKIP;
}
/* Restart calibration timer, release HFCLK XTAL. */
static void to_idle(void)
{
cal_state = CAL_IDLE;
clock_control_off(hfclk_dev, 0);
nrf_clock_task_trigger(NRF_CLOCK_TASK_CTSTART);
}
/* Convert sensor value to 0.25'C units. */
static inline s16_t sensor_value_to_temp_unit(struct sensor_value *val)
{
return (s16_t)(4 * val->val1 + val->val2 / 250000);
}
/* Function reads from temperature sensor and converts to 0.25'C units. */
static s16_t get_temperature(void)
{
struct sensor_value sensor_val;
sensor_sample_fetch(temp_sensor);
sensor_channel_get(temp_sensor, SENSOR_CHAN_DIE_TEMP, &sensor_val);
return sensor_value_to_temp_unit(&sensor_val);
}
/* Function determines if calibration should be performed based on temperature
* measurement. Function is called from system work queue context. It is
* reading temperature from TEMP sensor and compares with last measurement.
*/
static void measure_temperature(struct k_work *work)
{
s16_t temperature;
s16_t diff;
bool started = false;
int key;
temperature = get_temperature();
diff = abs(temperature - prev_temperature);
key = irq_lock();
if (cal_state != CAL_OFF) {
if ((calib_skip_cnt == 0) ||
(diff >= CONFIG_CLOCK_CONTROL_NRF_CALIBRATION_TEMP_DIFF)) {
prev_temperature = temperature;
start_calibration();
started = true;
} else {
to_idle();
calib_skip_cnt--;
total_skips_cnt++;
}
}
irq_unlock(key);
LOG_DBG("Calibration %s. Temperature diff: %d (in 0.25'C units).",
started ? "started" : "skipped", diff);
}
/* Called when HFCLK XTAL is on. Schedules temperature measurement or triggers
* calibration.
*/
static void cal_hf_on_callback(struct device *dev, void *user_data)
{
int key = irq_lock();
if (cal_state == CAL_HFCLK_REQ) {
if ((CONFIG_CLOCK_CONTROL_NRF_CALIBRATION_MAX_SKIP == 0) ||
(IS_ENABLED(CONFIG_MULTITHREADING) == false)) {
start_calibration();
} else {
cal_state = CAL_TEMP_REQ;
k_work_submit(&temp_measure_work);
}
} else {
clock_control_off(hfclk_dev, 0);
}
irq_unlock(key);
}
static void on_cal_done(void)
{
/* Workaround for Errata 192 */
if (IS_ENABLED(CONFIG_SOC_SERIES_NRF52X)) {
*(volatile uint32_t *)0x40000C34 = 0x00000000;
}
total_cnt++;
LOG_DBG("Calibration done.");
int key = irq_lock();
if (cal_state == CAL_ACTIVE_OFF) {
clock_control_off(hfclk_dev, 0);
nrf_clock_task_trigger(NRF_CLOCK_TASK_LFCLKSTOP);
cal_state = CAL_OFF;
} else {
to_idle();
}
irq_unlock(key);
}
void z_nrf_clock_calibration_force_start(void)
{
int key = irq_lock();
calib_skip_cnt = 0;
if (cal_state == CAL_IDLE) {
cal_state = CAL_HFCLK_REQ;
clock_control_async_on(hfclk_dev, 0, &cal_hf_on_data);
}
irq_unlock(key);
}
void z_nrf_clock_calibration_isr(void)
{
if (clock_event_check_and_clean(NRF_CLOCK_EVENT_CTTO,
NRF_CLOCK_INT_CTTO_MASK)) {
LOG_DBG("Calibration timeout.");
/* Start XTAL HFCLK. It is needed for temperature measurement
* and calibration.
*/
if (cal_state == CAL_IDLE) {
cal_state = CAL_HFCLK_REQ;
clock_control_async_on(hfclk_dev, 0, &cal_hf_on_data);
}
}
if (clock_event_check_and_clean(NRF_CLOCK_EVENT_DONE,
NRF_CLOCK_INT_DONE_MASK)) {
on_cal_done();
}
if (NRF_CLOCK_INT_CTSTOPPED_MASK &&
clock_event_check_and_clean(NRF_CLOCK_EVENT_CTSTOPPED,
NRF_CLOCK_INT_CTSTOPPED_MASK)) {
LOG_INF("CT stopped.");
if (cal_state == CAL_IDLE) {
/* If LF clock was restarted then CT might not be
* started because it was not yet stopped.
*/
LOG_INF("restarting");
nrf_clock_task_trigger(NRF_CLOCK_TASK_CTSTART);
}
}
}
int z_nrf_clock_calibration_count(void)
{
if (!IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_CALIBRATION_DEBUG)) {
return -1;
}
return total_cnt;
}
int z_nrf_clock_calibration_skips_count(void)
{
if (!IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_CALIBRATION_DEBUG)) {
return -1;
}
return total_skips_cnt;
}

View file

@ -0,0 +1,68 @@
/*
* Copyright (c) 2019 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_DRIVERS_CLOCK_CONTROL_NRF_CLOCK_CALIBRATION_H_
#define ZEPHYR_DRIVERS_CLOCK_CONTROL_NRF_CLOCK_CALIBRATION_H_
#include <device.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Initialize LFCLK RC calibration.
*
* @param hfclk_dev HFCLK device.
*/
void z_nrf_clock_calibration_init(struct device *hfclk_dev);
/**
* @brief Calibration interrupts handler
*
* Must be called from clock interrupt context.
*/
void z_nrf_clock_calibration_isr(void);
/**
* @brief Start calibration.
*
* Function called when LFCLK RC clock is being started.
*
* @param dev LFCLK device.
*
* @retval true if clock can be started.
* @retval false if clock was not stopped due to ongoing calibration and don't
* need to be started again because it is still on.
*/
bool z_nrf_clock_calibration_start(struct device *dev);
/**
* @brief Notify calibration module about LF clock start
*
* @param dev LFCLK device.
*/
void z_nrf_clock_calibration_lfclk_started(struct device *dev);
/**
* @brief Stop calibration.
*
* Function called when LFCLK RC clock is being stopped.
*
* @param dev LFCLK device.
*
* @retval true if clock can be stopped.
* @retval false if due to ongoing calibration clock cannot be stopped. In that
* case calibration module will stop clock when calibration is
* completed.
*/
bool z_nrf_clock_calibration_stop(struct device *dev);
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_DRIVERS_CLOCK_CONTROL_NRF_CLOCK_CALIBRATION_H_ */

View file

@ -1,281 +1,322 @@
/*
* Copyright (c) 2016 Nordic Semiconductor ASA
* Copyright (c) 2016-2019 Nordic Semiconductor ASA
* Copyright (c) 2016 Vinayak Kariappa Chettimada
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <soc.h>
#include <errno.h>
#include <sys/atomic.h>
#include <device.h>
#include <drivers/clock_control.h>
#include <sys/__assert.h>
#include <hal/nrf_clock.h>
#if defined(CONFIG_USB) && defined(CONFIG_SOC_NRF52840)
#include <hal/nrf_power.h>
#include <drivers/clock_control/nrf_clock_control.h>
#endif
#include "nrf_clock_calibration.h"
#include <logging/log.h>
#include <hal/nrf_power.h>
static u8_t m16src_ref;
static u8_t m16src_grd;
static u8_t k32src_initialized;
LOG_MODULE_REGISTER(clock_control, CONFIG_CLOCK_CONTROL_LOG_LEVEL);
static int m16src_start(struct device *dev, clock_control_subsys_t sub_system)
/* Helper logging macros which prepends device name to the log. */
#define CLOCK_LOG(lvl, dev, ...) \
LOG_##lvl("%s: " GET_ARG1(__VA_ARGS__), dev->config->name \
COND_CODE_0(NUM_VA_ARGS_LESS_1(__VA_ARGS__),\
(), (, GET_ARGS_LESS_1(__VA_ARGS__))))
#define ERR(dev, ...) CLOCK_LOG(ERR, dev, __VA_ARGS__)
#define WRN(dev, ...) CLOCK_LOG(WRN, dev, __VA_ARGS__)
#define INF(dev, ...) CLOCK_LOG(INF, dev, __VA_ARGS__)
#define DBG(dev, ...) CLOCK_LOG(DBG, dev, __VA_ARGS__)
/* returns true if clock stopping or starting can be performed. If false then
* start/stop will be deferred and performed later on by handler owner.
*/
typedef bool (*nrf_clock_handler_t)(struct device *dev);
/* Clock instance structure */
struct nrf_clock_control {
sys_slist_t list; /* List of users requesting callback */
s8_t ref; /* Users counter */
bool started; /* Indicated that clock is started */
};
/* Clock instance static configuration */
struct nrf_clock_control_config {
nrf_clock_handler_t start_handler; /* Called before start */
nrf_clock_handler_t stop_handler; /* Called before stop */
nrf_clock_event_t started_evt; /* Clock started event */
nrf_clock_task_t start_tsk; /* Clock start task */
nrf_clock_task_t stop_tsk; /* Clock stop task */
};
/* Return true if given event has enabled interrupt and is triggered. Event
* is cleared.
*/
static bool clock_event_check_and_clean(nrf_clock_event_t evt, u32_t intmask)
{
bool blocking;
u32_t imask;
u32_t stat;
bool ret = nrf_clock_event_check(evt) &&
nrf_clock_int_enable_check(intmask);
/* If the clock is already started then just increment refcount.
* If the start and stop don't happen in pairs, a rollover will
* be caught and in that case system should assert.
*/
/* Test for reference increment from zero and resource guard not taken.
*/
imask = irq_lock();
if (m16src_ref++) {
irq_unlock(imask);
goto hf_already_started;
if (ret) {
nrf_clock_event_clear(evt);
}
if (m16src_grd) {
m16src_ref--;
irq_unlock(imask);
return -EAGAIN;
}
m16src_grd = 1U;
irq_unlock(imask);
/* If blocking then spin-wait in CPU sleep until 16MHz clock settles. */
blocking = POINTER_TO_UINT(sub_system);
if (blocking) {
u32_t intenset;
irq_disable(DT_INST_0_NORDIC_NRF_CLOCK_IRQ_0);
NRF_CLOCK->EVENTS_HFCLKSTARTED = 0;
intenset = NRF_CLOCK->INTENSET;
nrf_clock_int_enable(NRF_CLOCK_INT_HF_STARTED_MASK);
nrf_clock_task_trigger(NRF_CLOCK_TASK_HFCLKSTART);
while (NRF_CLOCK->EVENTS_HFCLKSTARTED == 0) {
__WFE();
__SEV();
__WFE();
}
NRF_CLOCK->EVENTS_HFCLKSTARTED = 0;
if (!(intenset & CLOCK_INTENSET_HFCLKSTARTED_Msk)) {
nrf_clock_int_disable(NRF_CLOCK_INT_HF_STARTED_MASK);
}
NVIC_ClearPendingIRQ(DT_INST_0_NORDIC_NRF_CLOCK_IRQ_0);
irq_enable(DT_INST_0_NORDIC_NRF_CLOCK_IRQ_0);
} else {
NRF_CLOCK->EVENTS_HFCLKSTARTED = 0;
nrf_clock_task_trigger(NRF_CLOCK_TASK_HFCLKSTART);
}
/* release resource guard */
m16src_grd = 0U;
hf_already_started:
/* rollover should not happen as start and stop shall be
* called in pairs.
*/
__ASSERT_NO_MSG(m16src_ref);
stat = NRF_CLOCK_HFCLK_HIGH_ACCURACY | CLOCK_HFCLKSTAT_STATE_Msk;
if ((NRF_CLOCK->HFCLKSTAT & stat) == stat) {
return 0;
} else {
return -EINPROGRESS;
}
return ret;
}
static int m16src_stop(struct device *dev, clock_control_subsys_t sub_system)
static enum clock_control_status get_status(struct device *dev,
clock_control_subsys_t sys)
{
u32_t imask;
struct nrf_clock_control *data = dev->driver_data;
ARG_UNUSED(sub_system);
/* Test for started resource, if so, decrement reference and acquire
* resource guard.
*/
imask = irq_lock();
if (!m16src_ref) {
irq_unlock(imask);
return -EALREADY;
if (data->started) {
return CLOCK_CONTROL_STATUS_ON;
}
if (--m16src_ref) {
irq_unlock(imask);
return -EBUSY;
if (data->ref > 0) {
return CLOCK_CONTROL_STATUS_STARTING;
}
if (m16src_grd) {
m16src_ref++;
irq_unlock(imask);
return -EAGAIN;
return CLOCK_CONTROL_STATUS_OFF;
}
static int clock_stop(struct device *dev, clock_control_subsys_t sub_system)
{
const struct nrf_clock_control_config *config =
dev->config->config_info;
struct nrf_clock_control *data = dev->driver_data;
int err = 0;
int key;
key = irq_lock();
data->ref--;
if (data->ref == 0) {
bool do_stop;
DBG(dev, "Stopping");
sys_slist_init(&data->list);
do_stop = (config->stop_handler) ?
config->stop_handler(dev) : true;
if (do_stop) {
nrf_clock_task_trigger(config->stop_tsk);
/* It may happen that clock is being stopped when it
* has just been started and start is not yet handled
* (due to irq_lock). In that case after stopping the
* clock, started event is cleared to prevent false
* interrupt being triggered.
*/
nrf_clock_event_clear(config->started_evt);
}
data->started = false;
} else if (data->ref < 0) {
data->ref = 0;
err = -EALREADY;
}
m16src_grd = 1U;
irq_unlock(key);
irq_unlock(imask);
return err;
}
/* re-entrancy and mult-context safe, and reference count is zero, */
static bool is_in_list(sys_slist_t *list, sys_snode_t *node)
{
sys_snode_t *item = sys_slist_peek_head(list);
nrf_clock_task_trigger(NRF_CLOCK_TASK_HFCLKSTOP);
do {
if (item == node) {
return true;
}
/* release resource guard */
m16src_grd = 0U;
item = sys_slist_peek_next(item);
} while (item);
return false;
}
static int clock_async_start(struct device *dev,
clock_control_subsys_t sub_system,
struct clock_control_async_data *data)
{
const struct nrf_clock_control_config *config =
dev->config->config_info;
struct nrf_clock_control *clk_data = dev->driver_data;
int key;
s8_t ref;
__ASSERT_NO_MSG((data == NULL) ||
((data != NULL) && (data->cb != NULL)));
key = irq_lock();
ref = ++clk_data->ref;
irq_unlock(key);
if (clk_data->started) {
if (data) {
data->cb(dev, data->user_data);
}
} else {
if (ref == 1) {
bool do_start;
do_start = (config->start_handler) ?
config->start_handler(dev) : true;
if (do_start) {
nrf_clock_task_trigger(config->start_tsk);
DBG(dev, "Triggered start task");
} else if (data) {
data->cb(dev, data->user_data);
}
}
/* if node is in the list it means that it is scheduled for
* the second time.
*/
if (data) {
if (is_in_list(&clk_data->list, &data->node)) {
return -EALREADY;
}
sys_slist_append(&clk_data->list, &data->node);
}
}
return 0;
}
static int k32src_start(struct device *dev, clock_control_subsys_t sub_system)
static int clock_start(struct device *dev, clock_control_subsys_t sub_system)
{
u32_t lf_clk_src;
u32_t imask;
u32_t stat;
return clock_async_start(dev, sub_system, NULL);
}
#if defined(CONFIG_CLOCK_CONTROL_NRF_K32SRC_BLOCKING)
u32_t intenset;
#endif /* CONFIG_CLOCK_CONTROL_NRF_K32SRC_BLOCKING */
static void nrf_power_clock_isr(void *arg);
/* If the LF clock is already started, but wasn't initialized with
* this function, allow it to run once. This is needed because if a
* soft reset is triggered while watchdog is active, the LF clock will
* already be running, but won't be configured yet (watchdog forces LF
* clock to be running).
*
* That is, a hardware check won't work here, because even if the LF
* clock is already running it might not be initialized. We need an
* initialized flag.
*/
imask = irq_lock();
if (k32src_initialized) {
irq_unlock(imask);
goto lf_already_started;
}
k32src_initialized = 1U;
irq_unlock(imask);
/* Clear events if any */
NRF_CLOCK->EVENTS_LFCLKSTARTED = 0;
/* Set LF Clock Source */
lf_clk_src = POINTER_TO_UINT(sub_system);
NRF_CLOCK->LFCLKSRC = lf_clk_src;
#if defined(CONFIG_CLOCK_CONTROL_NRF_K32SRC_BLOCKING)
irq_disable(DT_INST_0_NORDIC_NRF_CLOCK_IRQ_0);
intenset = NRF_CLOCK->INTENSET;
nrf_clock_int_enable(NRF_CLOCK_INT_LF_STARTED_MASK);
/* Start and spin-wait until clock settles */
nrf_clock_task_trigger(NRF_CLOCK_TASK_LFCLKSTART);
while (NRF_CLOCK->EVENTS_LFCLKSTARTED == 0) {
__WFE();
__SEV();
__WFE();
}
NRF_CLOCK->EVENTS_LFCLKSTARTED = 0;
if (!(intenset & CLOCK_INTENSET_LFCLKSTARTED_Msk)) {
nrf_clock_int_disable(NRF_CLOCK_INT_LF_STARTED_MASK);
}
NVIC_ClearPendingIRQ(DT_INST_0_NORDIC_NRF_CLOCK_IRQ_0);
static int hfclk_init(struct device *dev)
{
IRQ_CONNECT(DT_INST_0_NORDIC_NRF_CLOCK_IRQ_0,
DT_INST_0_NORDIC_NRF_CLOCK_IRQ_0_PRIORITY,
nrf_power_clock_isr, 0, 0);
irq_enable(DT_INST_0_NORDIC_NRF_CLOCK_IRQ_0);
#else /* !CONFIG_CLOCK_CONTROL_NRF_K32SRC_BLOCKING */
/* NOTE: LFCLK will initially start running from the LFRC if LFXO is
* selected.
*/
nrf_clock_int_enable(NRF_CLOCK_INT_LF_STARTED_MASK);
nrf_clock_task_trigger(NRF_CLOCK_TASK_LFCLKSTART);
#endif /* !CONFIG_CLOCK_CONTROL_NRF_K32SRC_BLOCKING */
nrf_clock_lf_src_set(CLOCK_CONTROL_NRF_K32SRC);
#if NRF_CLOCK_HAS_CALIBRATION
/* If RC selected, calibrate and start timer for consecutive
* calibrations.
*/
nrf_clock_int_disable(NRF_CLOCK_INT_DONE_MASK |
NRF_CLOCK_INT_CTTO_MASK);
NRF_CLOCK->EVENTS_DONE = 0;
NRF_CLOCK->EVENTS_CTTO = 0;
if ((lf_clk_src & CLOCK_LFCLKSRC_SRC_Msk) == CLOCK_LFCLKSRC_SRC_RC) {
int err;
/* Set the Calibration Timer Initial Value */
NRF_CLOCK->CTIV = 16; /* 4s in 0.25s units */
/* Enable DONE and CTTO IRQs */
nrf_clock_int_enable(NRF_CLOCK_INT_DONE_MASK |
NRF_CLOCK_INT_CTTO_MASK);
/* If non-blocking LF clock start, then start HF clock in ISR */
if ((NRF_CLOCK->LFCLKSTAT & CLOCK_LFCLKSTAT_STATE_Msk) == 0) {
nrf_clock_int_enable(NRF_CLOCK_INT_LF_STARTED_MASK);
goto lf_already_started;
}
/* Start HF clock, if already started then explicitly
* assert IRQ.
* NOTE: The INTENSET is used as state flag to start
* calibration in ISR.
*/
nrf_clock_int_enable(NRF_CLOCK_INT_HF_STARTED_MASK);
err = m16src_start(dev, false);
if (!err) {
NVIC_SetPendingIRQ(DT_INST_0_NORDIC_NRF_CLOCK_IRQ_0);
} else {
__ASSERT_NO_MSG(err == -EINPROGRESS);
}
if (IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_K32SRC_RC_CALIBRATION)) {
z_nrf_clock_calibration_init(dev);
}
#endif /* NRF_CLOCK_HAS_CALIBRATION */
lf_already_started:
stat = (NRF_CLOCK->LFCLKSRCCOPY & CLOCK_LFCLKSRCCOPY_SRC_Msk) |
CLOCK_LFCLKSTAT_STATE_Msk;
if ((NRF_CLOCK->LFCLKSTAT & stat) == stat) {
return 0;
} else {
return -EINPROGRESS;
nrf_clock_int_enable((
NRF_CLOCK_INT_HF_STARTED_MASK |
NRF_CLOCK_INT_LF_STARTED_MASK |
COND_CODE_1(CONFIG_USB_NRF52840,
(NRF_POWER_INT_USBDETECTED_MASK |
NRF_POWER_INT_USBREMOVED_MASK |
NRF_POWER_INT_USBPWRRDY_MASK),
(0))));
sys_slist_init(&((struct nrf_clock_control *)dev->driver_data)->list);
return 0;
}
static int lfclk_init(struct device *dev)
{
sys_slist_init(&((struct nrf_clock_control *)dev->driver_data)->list);
return 0;
}
static const struct clock_control_driver_api clock_control_api = {
.on = clock_start,
.off = clock_stop,
.async_on = clock_async_start,
.get_status = get_status,
};
static struct nrf_clock_control hfclk;
static const struct nrf_clock_control_config hfclk_config = {
.start_tsk = NRF_CLOCK_TASK_HFCLKSTART,
.started_evt = NRF_CLOCK_EVENT_HFCLKSTARTED,
.stop_tsk = NRF_CLOCK_TASK_HFCLKSTOP
};
DEVICE_AND_API_INIT(clock_nrf5_m16src,
DT_INST_0_NORDIC_NRF_CLOCK_LABEL "_16M",
hfclk_init, &hfclk, &hfclk_config, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_DEVICE,
&clock_control_api);
static struct nrf_clock_control lfclk;
static const struct nrf_clock_control_config lfclk_config = {
.start_tsk = NRF_CLOCK_TASK_LFCLKSTART,
.started_evt = NRF_CLOCK_EVENT_LFCLKSTARTED,
.stop_tsk = NRF_CLOCK_TASK_LFCLKSTOP,
.start_handler =
IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_K32SRC_RC_CALIBRATION) ?
z_nrf_clock_calibration_start : NULL,
.stop_handler =
IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_K32SRC_RC_CALIBRATION) ?
z_nrf_clock_calibration_stop : NULL
};
DEVICE_AND_API_INIT(clock_nrf5_k32src,
DT_INST_0_NORDIC_NRF_CLOCK_LABEL "_32K",
lfclk_init, &lfclk, &lfclk_config, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_DEVICE,
&clock_control_api);
static void clkstarted_handle(struct device *dev)
{
struct clock_control_async_data *async_data;
struct nrf_clock_control *data = dev->driver_data;
sys_snode_t *node = sys_slist_get(&data->list);
DBG(dev, "Clock started");
data->started = true;
while (node != NULL) {
async_data = CONTAINER_OF(node,
struct clock_control_async_data, node);
async_data->cb(dev, async_data->user_data);
node = sys_slist_get(&data->list);
}
}
#if defined(CONFIG_USB) && defined(CONFIG_SOC_NRF52840)
static inline void power_event_cb(nrf_power_event_t event)
#if defined(CONFIG_USB_NRF52840)
static bool power_event_check_and_clean(nrf_power_event_t evt, u32_t intmask)
{
extern void usb_dc_nrfx_power_event_callback(nrf_power_event_t event);
bool ret = nrf_power_event_check(evt) &&
nrf_power_int_enable_check(intmask);
usb_dc_nrfx_power_event_callback(event);
if (ret) {
nrf_power_event_clear(evt);
}
return ret;
}
#endif
static void usb_power_isr(void)
{
#if defined(CONFIG_USB_NRF52840)
extern void usb_dc_nrfx_power_event_callback(nrf_power_event_t event);
if (power_event_check_and_clean(NRF_POWER_EVENT_USBDETECTED,
NRF_POWER_INT_USBDETECTED_MASK)) {
usb_dc_nrfx_power_event_callback(NRF_POWER_EVENT_USBDETECTED);
}
if (power_event_check_and_clean(NRF_POWER_EVENT_USBPWRRDY,
NRF_POWER_INT_USBPWRRDY_MASK)) {
usb_dc_nrfx_power_event_callback(NRF_POWER_EVENT_USBPWRRDY);
}
if (power_event_check_and_clean(NRF_POWER_EVENT_USBREMOVED,
NRF_POWER_INT_USBREMOVED_MASK)) {
usb_dc_nrfx_power_event_callback(NRF_POWER_EVENT_USBREMOVED);
}
#endif
}
/* Note: this function has public linkage, and MUST have this
* particular name. The platform architecture itself doesn't care,
* but there is a test (tests/kernel/arm_irq_vector_table) that needs
@ -286,203 +327,44 @@ static inline void power_event_cb(nrf_power_event_t event)
*/
void nrf_power_clock_isr(void *arg)
{
u8_t pof, hf_intenset, hf, lf_intenset, lf;
#if NRF_CLOCK_HAS_CALIBRATION
u8_t ctto, done;
struct device *dev = arg;
#endif
#if defined(CONFIG_USB) && defined(CONFIG_SOC_NRF52840)
bool usb_detected, usb_pwr_rdy, usb_removed;
#endif
ARG_UNUSED(arg);
pof = (NRF_POWER->EVENTS_POFWARN != 0);
if (clock_event_check_and_clean(NRF_CLOCK_EVENT_HFCLKSTARTED,
NRF_CLOCK_INT_HF_STARTED_MASK)) {
struct device *hfclk_dev = DEVICE_GET(clock_nrf5_m16src);
struct nrf_clock_control *data = hfclk_dev->driver_data;
hf_intenset = ((NRF_CLOCK->INTENSET &
CLOCK_INTENSET_HFCLKSTARTED_Msk) != 0);
hf = (NRF_CLOCK->EVENTS_HFCLKSTARTED != 0);
lf_intenset = ((NRF_CLOCK->INTENSET &
CLOCK_INTENSET_LFCLKSTARTED_Msk) != 0);
lf = (NRF_CLOCK->EVENTS_LFCLKSTARTED != 0);
#if NRF_CLOCK_HAS_CALIBRATION
done = (NRF_CLOCK->EVENTS_DONE != 0);
ctto = (NRF_CLOCK->EVENTS_CTTO != 0);
#endif
#if defined(CONFIG_USB) && defined(CONFIG_SOC_NRF52840)
usb_detected = nrf_power_event_check(NRF_POWER_EVENT_USBDETECTED);
usb_pwr_rdy = nrf_power_event_check(NRF_POWER_EVENT_USBPWRRDY);
usb_removed = nrf_power_event_check(NRF_POWER_EVENT_USBREMOVED);
#endif
__ASSERT_NO_MSG(pof || hf || hf_intenset || lf
#if NRF_CLOCK_HAS_CALIBRATION
|| done || ctto
#endif
#if defined(CONFIG_USB) && defined(CONFIG_SOC_NRF52840)
|| usb_detected || usb_pwr_rdy || usb_removed
#endif
);
if (pof) {
NRF_POWER->EVENTS_POFWARN = 0;
}
if (hf) {
NRF_CLOCK->EVENTS_HFCLKSTARTED = 0;
}
if (hf_intenset && (hf || ((NRF_CLOCK->HFCLKSTAT &
(CLOCK_HFCLKSTAT_STATE_Msk |
CLOCK_HFCLKSTAT_SRC_Msk)) ==
(CLOCK_HFCLKSTAT_STATE_Msk |
CLOCK_HFCLKSTAT_SRC_Msk)))){
/* INTENSET is used as state flag to start calibration,
* hence clear it here.
/* Check needed due to anomaly 201:
* HFCLKSTARTED may be generated twice.
*/
NRF_CLOCK->INTENCLR = CLOCK_INTENCLR_HFCLKSTARTED_Msk;
#if defined(CONFIG_SOC_SERIES_NRF52X)
/* NOTE: Errata [192] CLOCK: LFRC frequency offset after
* calibration.
* Calibration start, workaround.
*/
*(volatile u32_t *)0x40000C34 = 0x00000002;
#endif /* CONFIG_SOC_SERIES_NRF52X */
#if NRF_CLOCK_HAS_CALIBRATION
/* Start Calibration */
NRF_CLOCK->TASKS_CAL = 1;
#endif
}
if (lf) {
NRF_CLOCK->EVENTS_LFCLKSTARTED = 0;
if (lf_intenset) {
/* INTENSET is used as state flag to start calibration,
* hence clear it here.
*/
NRF_CLOCK->INTENCLR = CLOCK_INTENCLR_LFCLKSTARTED_Msk;
#if NRF_CLOCK_HAS_CALIBRATION
/* Start HF Clock if LF RC is used. */
if ((NRF_CLOCK->LFCLKSRCCOPY & CLOCK_LFCLKSRCCOPY_SRC_Msk) ==
CLOCK_LFCLKSRCCOPY_SRC_RC) {
ctto = 1U;
}
#endif
if (!data->started) {
clkstarted_handle(hfclk_dev);
}
}
#if NRF_CLOCK_HAS_CALIBRATION
if (done) {
int err;
if (clock_event_check_and_clean(NRF_CLOCK_EVENT_LFCLKSTARTED,
NRF_CLOCK_INT_LF_STARTED_MASK)) {
struct device *lfclk_dev = DEVICE_GET(clock_nrf5_k32src);
#if defined(CONFIG_SOC_SERIES_NRF52X)
/* NOTE: Errata [192] CLOCK: LFRC frequency offset after
* calibration.
* Calibration done, workaround.
*/
*(volatile u32_t *)0x40000C34 = 0x00000000;
#endif /* CONFIG_SOC_SERIES_NRF52X */
NRF_CLOCK->EVENTS_DONE = 0;
/* Calibration done, stop 16M Xtal. */
err = m16src_stop(dev, NULL);
__ASSERT_NO_MSG(!err || err == -EBUSY);
/* Start timer for next calibration. */
NRF_CLOCK->TASKS_CTSTART = 1;
}
if (ctto) {
int err;
NRF_CLOCK->EVENTS_CTTO = 0;
/* Start HF clock, if already started
* then explicitly assert IRQ; we use the INTENSET
* as a state flag to start calibration.
*/
NRF_CLOCK->INTENSET = CLOCK_INTENSET_HFCLKSTARTED_Msk;
err = m16src_start(dev, false);
if (!err) {
NVIC_SetPendingIRQ(DT_INST_0_NORDIC_NRF_CLOCK_IRQ_0);
} else {
__ASSERT_NO_MSG(err == -EINPROGRESS);
if (IS_ENABLED(
CONFIG_CLOCK_CONTROL_NRF_K32SRC_RC_CALIBRATION)) {
z_nrf_clock_calibration_lfclk_started(lfclk_dev);
}
}
#endif /* NRF_CLOCK_HAS_CALIBRATION */
#if defined(CONFIG_USB) && defined(CONFIG_SOC_NRF52840)
if (usb_detected) {
nrf_power_event_clear(NRF_POWER_EVENT_USBDETECTED);
power_event_cb(NRF_POWER_EVENT_USBDETECTED);
clkstarted_handle(lfclk_dev);
}
if (usb_pwr_rdy) {
nrf_power_event_clear(NRF_POWER_EVENT_USBPWRRDY);
power_event_cb(NRF_POWER_EVENT_USBPWRRDY);
}
usb_power_isr();
if (usb_removed) {
nrf_power_event_clear(NRF_POWER_EVENT_USBREMOVED);
power_event_cb(NRF_POWER_EVENT_USBREMOVED);
if (IS_ENABLED(CONFIG_CLOCK_CONTROL_NRF_K32SRC_RC_CALIBRATION)) {
z_nrf_clock_calibration_isr();
}
#endif
}
static int clock_control_init(struct device *dev)
{
/* TODO: Initialization will be called twice, once for 32KHz and then
* for 16 MHz clock. The vector is also shared for other power related
* features. Hence, design a better way to init IRQISR when adding
* power peripheral driver and/or new SoC series.
* NOTE: Currently the operations here are idempotent.
*/
IRQ_CONNECT(DT_INST_0_NORDIC_NRF_CLOCK_IRQ_0,
DT_INST_0_NORDIC_NRF_CLOCK_IRQ_0_PRIORITY,
nrf_power_clock_isr, 0, 0);
irq_enable(DT_INST_0_NORDIC_NRF_CLOCK_IRQ_0);
return 0;
}
static const struct clock_control_driver_api _m16src_clock_control_api = {
.on = m16src_start,
.off = m16src_stop,
.get_rate = NULL,
};
DEVICE_AND_API_INIT(clock_nrf5_m16src,
DT_INST_0_NORDIC_NRF_CLOCK_LABEL "_16M",
clock_control_init, NULL, NULL, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_DEVICE,
&_m16src_clock_control_api);
static const struct clock_control_driver_api _k32src_clock_control_api = {
.on = k32src_start,
.off = NULL,
.get_rate = NULL,
};
DEVICE_AND_API_INIT(clock_nrf5_k32src,
DT_INST_0_NORDIC_NRF_CLOCK_LABEL "_32K",
clock_control_init, NULL, NULL, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_DEVICE,
&_k32src_clock_control_api);
#if defined(CONFIG_USB) && defined(CONFIG_SOC_NRF52840)
void nrf5_power_usb_power_int_enable(bool enable)
{
#ifdef CONFIG_USB_NRF52840
u32_t mask;
mask = NRF_POWER_INT_USBDETECTED_MASK |
NRF_POWER_INT_USBREMOVED_MASK |
NRF_POWER_INT_USBPWRRDY_MASK;
@ -493,6 +375,5 @@ void nrf5_power_usb_power_int_enable(bool enable)
} else {
nrf_power_int_disable(mask);
}
}
#endif
}

View file

@ -25,10 +25,18 @@ struct temp_nrf5_data {
struct device *hfclk_dev;
};
static void hfclk_on_callback(struct device *dev, void *user_data)
{
nrf_temp_task_trigger(NRF_TEMP, NRF_TEMP_TASK_START);
}
static int temp_nrf5_sample_fetch(struct device *dev, enum sensor_channel chan)
{
struct temp_nrf5_data *data = dev->driver_data;
struct clock_control_async_data clk_data = {
.cb = hfclk_on_callback
};
int r;
@ -36,18 +44,13 @@ static int temp_nrf5_sample_fetch(struct device *dev, enum sensor_channel chan)
return -ENOTSUP;
}
/* The clock driver for nrf51 currently overloads the
* subsystem parameter with a flag to indicate whether or not
* it should block.
*/
r = clock_control_on(data->hfclk_dev, (void *)1);
r = clock_control_async_on(data->hfclk_dev, NULL, &clk_data);
__ASSERT_NO_MSG(!r);
nrf_temp_task_trigger(NRF_TEMP, NRF_TEMP_TASK_START);
k_sem_take(&data->device_sync_sem, K_FOREVER);
r = clock_control_off(data->hfclk_dev, (void *)1);
__ASSERT_NO_MSG(!r || r == -EBUSY);
r = clock_control_off(data->hfclk_dev, 0);
__ASSERT_NO_MSG(!r);
data->sample = nrf_temp_result_get(NRF_TEMP);
LOG_DBG("sample: %d", data->sample);

View file

@ -7,9 +7,7 @@
#ifndef ZEPHYR_INCLUDE_DRIVERS_CLOCK_CONTROL_NRF_CLOCK_CONTROL_H_
#define ZEPHYR_INCLUDE_DRIVERS_CLOCK_CONTROL_NRF_CLOCK_CONTROL_H_
#if defined(CONFIG_USB) && defined(CONFIG_SOC_NRF52840)
#include <device.h>
#endif
#include <hal/nrf_clock.h>
/* TODO: move all these to clock_control.h ? */
@ -57,8 +55,25 @@
#define CLOCK_CONTROL_NRF_K32SRC_ACCURACY 7
#endif
#if defined(CONFIG_USB) && defined(CONFIG_SOC_NRF52840)
void nrf5_power_usb_power_int_enable(bool enable);
#endif
/** @brief Force LF clock calibration. */
void z_nrf_clock_calibration_force_start(void);
/** @brief Return number of calibrations performed.
*
* Valid when @ref CONFIG_CLOCK_CONTROL_NRF_CALIBRATION_DEBUG is set.
*
* @return Number of calibrations or -1 if feature is disabled.
*/
int z_nrf_clock_calibration_count(void);
/** @brief Return number of attempts when calibration was skipped.
*
* Valid when @ref CONFIG_CLOCK_CONTROL_NRF_CALIBRATION_DEBUG is set.
*
* @return Number of calibrations or -1 if feature is disabled.
*/
int z_nrf_clock_calibration_skips_count(void);
#endif /* ZEPHYR_INCLUDE_DRIVERS_CLOCK_CONTROL_NRF_CLOCK_CONTROL_H_ */