2019-11-08 10:17:08 +01:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2018 Foundries.io Ltd
|
|
|
|
* Copyright (c) 2019 STMicroelectronics
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
|
|
|
|
2022-05-06 10:25:46 +02:00
|
|
|
#include <zephyr/device.h>
|
2019-11-08 10:17:08 +01:00
|
|
|
#include <soc.h>
|
2020-11-20 18:41:15 +01:00
|
|
|
#include <stm32_ll_lptim.h>
|
2020-12-01 16:14:28 +01:00
|
|
|
#include <stm32_ll_bus.h>
|
|
|
|
#include <stm32_ll_rcc.h>
|
|
|
|
#include <stm32_ll_pwr.h>
|
2021-01-05 21:21:16 +01:00
|
|
|
#include <stm32_ll_system.h>
|
2022-05-06 10:25:46 +02:00
|
|
|
#include <zephyr/drivers/clock_control.h>
|
|
|
|
#include <zephyr/drivers/clock_control/stm32_clock_control.h>
|
|
|
|
#include <zephyr/drivers/timer/system_timer.h>
|
|
|
|
#include <zephyr/sys_clock.h>
|
2019-11-08 10:17:08 +01:00
|
|
|
|
2022-05-06 10:25:46 +02:00
|
|
|
#include <zephyr/spinlock.h>
|
2019-11-08 10:17:08 +01:00
|
|
|
|
2022-08-05 11:34:49 +02:00
|
|
|
#define DT_DRV_COMPAT st_stm32_lptim
|
|
|
|
|
|
|
|
#if DT_NUM_INST_STATUS_OKAY(DT_DRV_COMPAT) > 1
|
|
|
|
#error Only one LPTIM instance should be enabled
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define LPTIM (LPTIM_TypeDef *) DT_INST_REG_ADDR(0)
|
|
|
|
|
2019-11-08 10:17:08 +01:00
|
|
|
/*
|
|
|
|
* Assumptions and limitations:
|
|
|
|
*
|
2022-08-05 11:34:49 +02:00
|
|
|
* - system clock based on an LPTIM instance, clocked by LSI or LSE
|
2019-11-08 10:17:08 +01:00
|
|
|
* - prescaler is set to 1 (LL_LPTIM_PRESCALER_DIV1 in the related register)
|
2022-08-05 11:34:49 +02:00
|
|
|
* - using LPTIM AutoReload capability to trig the IRQ (timeout irq)
|
2019-11-08 10:17:08 +01:00
|
|
|
* - when timeout irq occurs the counter is already reset
|
|
|
|
* - the maximum timeout duration is reached with the LPTIM_TIMEBASE value
|
|
|
|
* - with prescaler of 1, the max timeout (LPTIM_TIMEBASE) is 2seconds
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define LPTIM_CLOCK CONFIG_STM32_LPTIM_CLOCK
|
|
|
|
#define LPTIM_TIMEBASE CONFIG_STM32_LPTIM_TIMEBASE
|
|
|
|
|
|
|
|
/* nb of LPTIM counter unit per kernel tick */
|
|
|
|
#define COUNT_PER_TICK (LPTIM_CLOCK / CONFIG_SYS_CLOCK_TICKS_PER_SEC)
|
|
|
|
|
2020-06-03 14:07:26 +02:00
|
|
|
/* minimum nb of clock cycles to have to set autoreload register correctly */
|
|
|
|
#define LPTIM_GUARD_VALUE 2
|
|
|
|
|
2019-11-08 10:17:08 +01:00
|
|
|
/* A 32bit value cannot exceed 0xFFFFFFFF/LPTIM_TIMEBASE counting cycles.
|
|
|
|
* This is for example about of 65000 x 2000ms when clocked by LSI
|
|
|
|
*/
|
2020-05-27 18:26:57 +02:00
|
|
|
static uint32_t accumulated_lptim_cnt;
|
2022-06-23 11:37:48 +02:00
|
|
|
/* Next autoreload value to set */
|
|
|
|
static uint32_t autoreload_next;
|
|
|
|
/* Indicate if the autoreload register is ready for a write */
|
|
|
|
static bool autoreload_ready = true;
|
2019-11-08 10:17:08 +01:00
|
|
|
|
|
|
|
static struct k_spinlock lock;
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
static void lptim_irq_handler(const struct device *unused)
|
2019-11-08 10:17:08 +01:00
|
|
|
{
|
|
|
|
|
|
|
|
ARG_UNUSED(unused);
|
|
|
|
|
2022-08-05 11:34:49 +02:00
|
|
|
uint32_t autoreload = LL_LPTIM_GetAutoReload(LPTIM);
|
2022-06-23 11:37:48 +02:00
|
|
|
|
2022-08-05 11:34:49 +02:00
|
|
|
if ((LL_LPTIM_IsActiveFlag_ARROK(LPTIM) != 0)
|
|
|
|
&& LL_LPTIM_IsEnabledIT_ARROK(LPTIM) != 0) {
|
|
|
|
LL_LPTIM_ClearFlag_ARROK(LPTIM);
|
2022-06-23 11:37:48 +02:00
|
|
|
if ((autoreload_next > 0) && (autoreload_next != autoreload)) {
|
|
|
|
/* the new autoreload value change, we set it */
|
|
|
|
autoreload_ready = false;
|
2022-08-05 11:34:49 +02:00
|
|
|
LL_LPTIM_SetAutoReload(LPTIM, autoreload_next);
|
2022-06-23 11:37:48 +02:00
|
|
|
} else {
|
|
|
|
autoreload_ready = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-08-05 11:34:49 +02:00
|
|
|
if ((LL_LPTIM_IsActiveFlag_ARRM(LPTIM) != 0)
|
|
|
|
&& LL_LPTIM_IsEnabledIT_ARRM(LPTIM) != 0) {
|
2019-11-08 10:17:08 +01:00
|
|
|
|
|
|
|
k_spinlock_key_t key = k_spin_lock(&lock);
|
|
|
|
|
2021-02-25 21:33:15 +01:00
|
|
|
/* do not change ARR yet, sys_clock_announce will do */
|
2022-08-05 11:34:49 +02:00
|
|
|
LL_LPTIM_ClearFLAG_ARRM(LPTIM);
|
2019-11-08 10:17:08 +01:00
|
|
|
|
2020-05-08 11:39:46 +02:00
|
|
|
/* increase the total nb of autoreload count
|
2021-03-12 18:46:52 +01:00
|
|
|
* used in the sys_clock_cycle_get_32() function.
|
2019-11-08 10:17:08 +01:00
|
|
|
*/
|
2022-06-23 11:37:48 +02:00
|
|
|
autoreload++;
|
2020-05-08 11:39:46 +02:00
|
|
|
|
|
|
|
accumulated_lptim_cnt += autoreload;
|
2019-11-08 10:17:08 +01:00
|
|
|
|
|
|
|
k_spin_unlock(&lock, key);
|
|
|
|
|
|
|
|
/* announce the elapsed time in ms (count register is 16bit) */
|
2020-05-27 18:26:57 +02:00
|
|
|
uint32_t dticks = (autoreload
|
2019-11-08 10:17:08 +01:00
|
|
|
* CONFIG_SYS_CLOCK_TICKS_PER_SEC)
|
|
|
|
/ LPTIM_CLOCK;
|
|
|
|
|
2021-02-25 21:33:15 +01:00
|
|
|
sys_clock_announce(IS_ENABLED(CONFIG_TICKLESS_KERNEL)
|
2020-05-19 08:52:33 +02:00
|
|
|
? dticks : (dticks > 0));
|
2019-11-08 10:17:08 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-23 11:37:48 +02:00
|
|
|
static void lptim_set_autoreload(uint32_t arr)
|
|
|
|
{
|
|
|
|
/* Update autoreload register */
|
|
|
|
autoreload_next = arr;
|
|
|
|
|
|
|
|
if (!autoreload_ready)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* The ARR register ready, we could set it directly */
|
2022-08-05 11:34:49 +02:00
|
|
|
if ((arr > 0) && (arr != LL_LPTIM_GetAutoReload(LPTIM))) {
|
2022-06-23 11:37:48 +02:00
|
|
|
/* The new autoreload value change, we set it */
|
|
|
|
autoreload_ready = false;
|
2022-08-05 11:34:49 +02:00
|
|
|
LL_LPTIM_ClearFlag_ARROK(LPTIM);
|
|
|
|
LL_LPTIM_SetAutoReload(LPTIM, arr);
|
2022-06-23 11:37:48 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-18 16:34:28 +02:00
|
|
|
static inline uint32_t z_clock_lptim_getcounter(void)
|
|
|
|
{
|
|
|
|
uint32_t lp_time;
|
|
|
|
uint32_t lp_time_prev_read;
|
|
|
|
|
|
|
|
/* It should be noted that to read reliably the content
|
|
|
|
* of the LPTIM_CNT register, two successive read accesses
|
|
|
|
* must be performed and compared
|
|
|
|
*/
|
2022-08-05 11:34:49 +02:00
|
|
|
lp_time = LL_LPTIM_GetCounter(LPTIM);
|
2020-06-18 16:34:28 +02:00
|
|
|
do {
|
|
|
|
lp_time_prev_read = lp_time;
|
2022-08-05 11:34:49 +02:00
|
|
|
lp_time = LL_LPTIM_GetCounter(LPTIM);
|
2020-06-18 16:34:28 +02:00
|
|
|
} while (lp_time != lp_time_prev_read);
|
|
|
|
return lp_time;
|
|
|
|
}
|
|
|
|
|
2021-02-25 21:33:15 +01:00
|
|
|
void sys_clock_set_timeout(int32_t ticks, bool idle)
|
2019-11-08 10:17:08 +01:00
|
|
|
{
|
2022-08-05 11:34:49 +02:00
|
|
|
/* new LPTIM AutoReload value to set (aligned on Kernel ticks) */
|
2020-05-27 18:26:57 +02:00
|
|
|
uint32_t next_arr = 0;
|
2019-11-08 10:17:08 +01:00
|
|
|
|
|
|
|
ARG_UNUSED(idle);
|
|
|
|
|
2020-05-11 13:12:52 +02:00
|
|
|
if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-04-02 00:41:27 +02:00
|
|
|
if (ticks == K_TICKS_FOREVER) {
|
2020-06-04 16:57:12 +02:00
|
|
|
/* disable LPTIM clock to avoid counting */
|
2021-09-08 09:18:58 +02:00
|
|
|
#if defined(LL_APB1_GRP1_PERIPH_LPTIM1)
|
2019-11-08 10:17:08 +01:00
|
|
|
LL_APB1_GRP1_DisableClock(LL_APB1_GRP1_PERIPH_LPTIM1);
|
2021-09-08 09:18:58 +02:00
|
|
|
#elif defined(LL_APB3_GRP1_PERIPH_LPTIM1)
|
|
|
|
LL_APB3_GRP1_DisableClock(LL_APB3_GRP1_PERIPH_LPTIM1);
|
|
|
|
#endif
|
2019-11-08 10:17:08 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-06-04 16:57:12 +02:00
|
|
|
/* if LPTIM clock was previously stopped, it must now be restored */
|
2021-09-08 09:18:58 +02:00
|
|
|
#if defined(LL_APB1_GRP1_PERIPH_LPTIM1)
|
2020-06-04 16:57:12 +02:00
|
|
|
if (!LL_APB1_GRP1_IsEnabledClock(LL_APB1_GRP1_PERIPH_LPTIM1)) {
|
|
|
|
LL_APB1_GRP1_EnableClock(LL_APB1_GRP1_PERIPH_LPTIM1);
|
|
|
|
}
|
2021-09-08 09:18:58 +02:00
|
|
|
#elif defined(LL_APB3_GRP1_PERIPH_LPTIM1)
|
|
|
|
if (!LL_APB3_GRP1_IsEnabledClock(LL_APB3_GRP1_PERIPH_LPTIM1)) {
|
|
|
|
LL_APB3_GRP1_EnableClock(LL_APB3_GRP1_PERIPH_LPTIM1);
|
|
|
|
}
|
|
|
|
#endif
|
2020-06-04 16:57:12 +02:00
|
|
|
|
2019-11-08 10:17:08 +01:00
|
|
|
/* passing ticks==1 means "announce the next tick",
|
|
|
|
* ticks value of zero (or even negative) is legal and
|
|
|
|
* treated identically: it simply indicates the kernel would like the
|
|
|
|
* next tick announcement as soon as possible.
|
|
|
|
*/
|
2020-10-27 12:27:25 +01:00
|
|
|
ticks = CLAMP(ticks - 1, 1, (int32_t)LPTIM_TIMEBASE);
|
2019-11-08 10:17:08 +01:00
|
|
|
|
|
|
|
k_spinlock_key_t key = k_spin_lock(&lock);
|
|
|
|
|
|
|
|
/* read current counter value (cannot exceed 16bit) */
|
|
|
|
|
2020-06-18 16:34:28 +02:00
|
|
|
uint32_t lp_time = z_clock_lptim_getcounter();
|
2019-11-08 10:17:08 +01:00
|
|
|
|
2022-08-05 11:34:49 +02:00
|
|
|
uint32_t autoreload = LL_LPTIM_GetAutoReload(LPTIM);
|
2020-06-03 14:07:26 +02:00
|
|
|
|
2022-08-05 11:34:49 +02:00
|
|
|
if (LL_LPTIM_IsActiveFlag_ARRM(LPTIM)
|
2020-06-03 14:07:26 +02:00
|
|
|
|| ((autoreload - lp_time) < LPTIM_GUARD_VALUE)) {
|
|
|
|
/* interrupt happens or happens soon.
|
|
|
|
* It's impossible to set autoreload value.
|
|
|
|
*/
|
|
|
|
k_spin_unlock(&lock, key);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-11-08 10:17:08 +01:00
|
|
|
/* calculate the next arr value (cannot exceed 16bit)
|
|
|
|
* adjust the next ARR match value to align on Ticks
|
|
|
|
* from the current counter value to first next Tick
|
|
|
|
*/
|
|
|
|
next_arr = (((lp_time * CONFIG_SYS_CLOCK_TICKS_PER_SEC)
|
|
|
|
/ LPTIM_CLOCK) + 1) * LPTIM_CLOCK
|
|
|
|
/ (CONFIG_SYS_CLOCK_TICKS_PER_SEC);
|
|
|
|
/* add count unit from the expected nb of Ticks */
|
2020-05-27 18:26:57 +02:00
|
|
|
next_arr = next_arr + ((uint32_t)(ticks) * LPTIM_CLOCK)
|
2020-05-07 16:03:36 +02:00
|
|
|
/ CONFIG_SYS_CLOCK_TICKS_PER_SEC - 1;
|
2019-11-08 10:17:08 +01:00
|
|
|
|
|
|
|
/* maximise to TIMEBASE */
|
|
|
|
if (next_arr > LPTIM_TIMEBASE) {
|
|
|
|
next_arr = LPTIM_TIMEBASE;
|
|
|
|
}
|
2020-06-03 14:07:26 +02:00
|
|
|
/* The new autoreload value must be LPTIM_GUARD_VALUE clock cycles
|
|
|
|
* after current lptim to make sure we don't miss
|
|
|
|
* an autoreload interrupt
|
2020-05-11 15:53:19 +02:00
|
|
|
*/
|
2020-06-03 14:07:26 +02:00
|
|
|
else if (next_arr < (lp_time + LPTIM_GUARD_VALUE)) {
|
|
|
|
next_arr = lp_time + LPTIM_GUARD_VALUE;
|
2020-05-11 15:53:19 +02:00
|
|
|
}
|
2020-06-03 13:55:57 +02:00
|
|
|
|
2022-06-23 11:37:48 +02:00
|
|
|
/* Update autoreload register */
|
|
|
|
lptim_set_autoreload(next_arr);
|
2019-11-08 10:17:08 +01:00
|
|
|
|
|
|
|
k_spin_unlock(&lock, key);
|
|
|
|
}
|
|
|
|
|
2021-02-25 21:33:15 +01:00
|
|
|
uint32_t sys_clock_elapsed(void)
|
2019-11-08 10:17:08 +01:00
|
|
|
{
|
|
|
|
if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
k_spinlock_key_t key = k_spin_lock(&lock);
|
|
|
|
|
2020-06-18 16:34:28 +02:00
|
|
|
uint32_t lp_time = z_clock_lptim_getcounter();
|
2019-11-08 10:17:08 +01:00
|
|
|
|
2020-05-11 15:53:19 +02:00
|
|
|
/* In case of counter roll-over, add this value,
|
|
|
|
* even if the irq has not yet been handled
|
|
|
|
*/
|
2022-08-05 11:34:49 +02:00
|
|
|
if ((LL_LPTIM_IsActiveFlag_ARRM(LPTIM) != 0)
|
|
|
|
&& LL_LPTIM_IsEnabledIT_ARRM(LPTIM) != 0) {
|
|
|
|
lp_time += LL_LPTIM_GetAutoReload(LPTIM) + 1;
|
2020-05-11 15:53:19 +02:00
|
|
|
}
|
|
|
|
|
2019-11-08 10:17:08 +01:00
|
|
|
k_spin_unlock(&lock, key);
|
|
|
|
|
2022-08-05 11:34:49 +02:00
|
|
|
/* gives the value of LPTIM counter (ms)
|
2019-11-08 10:17:08 +01:00
|
|
|
* since the previous 'announce'
|
|
|
|
*/
|
2021-01-07 09:46:31 +01:00
|
|
|
uint64_t ret = ((uint64_t)lp_time * CONFIG_SYS_CLOCK_TICKS_PER_SEC) / LPTIM_CLOCK;
|
2019-11-08 10:17:08 +01:00
|
|
|
|
2020-05-27 18:26:57 +02:00
|
|
|
return (uint32_t)(ret);
|
2019-11-08 10:17:08 +01:00
|
|
|
}
|
|
|
|
|
2021-03-12 18:46:52 +01:00
|
|
|
uint32_t sys_clock_cycle_get_32(void)
|
2019-11-08 10:17:08 +01:00
|
|
|
{
|
|
|
|
/* just gives the accumulated count in a number of hw cycles */
|
|
|
|
|
|
|
|
k_spinlock_key_t key = k_spin_lock(&lock);
|
|
|
|
|
2020-06-18 16:34:28 +02:00
|
|
|
uint32_t lp_time = z_clock_lptim_getcounter();
|
2020-05-11 15:53:19 +02:00
|
|
|
|
|
|
|
/* In case of counter roll-over, add this value,
|
|
|
|
* even if the irq has not yet been handled
|
|
|
|
*/
|
2022-08-05 11:34:49 +02:00
|
|
|
if ((LL_LPTIM_IsActiveFlag_ARRM(LPTIM) != 0)
|
|
|
|
&& LL_LPTIM_IsEnabledIT_ARRM(LPTIM) != 0) {
|
|
|
|
lp_time += LL_LPTIM_GetAutoReload(LPTIM) + 1;
|
2020-05-11 15:53:19 +02:00
|
|
|
}
|
|
|
|
|
2019-11-08 10:17:08 +01:00
|
|
|
lp_time += accumulated_lptim_cnt;
|
|
|
|
|
2020-05-11 15:53:19 +02:00
|
|
|
/* convert lptim count in a nb of hw cycles with precision */
|
2021-01-07 09:46:31 +01:00
|
|
|
uint64_t ret = ((uint64_t)lp_time * sys_clock_hw_cycles_per_sec()) / LPTIM_CLOCK;
|
2020-05-11 15:53:19 +02:00
|
|
|
|
2019-11-08 10:17:08 +01:00
|
|
|
k_spin_unlock(&lock, key);
|
|
|
|
|
|
|
|
/* convert in hw cycles (keeping 32bit value) */
|
2020-05-27 18:26:57 +02:00
|
|
|
return (uint32_t)(ret);
|
2019-11-08 10:17:08 +01:00
|
|
|
}
|
2021-11-04 12:51:39 +01:00
|
|
|
|
|
|
|
static int sys_clock_driver_init(const struct device *dev)
|
|
|
|
{
|
|
|
|
ARG_UNUSED(dev);
|
|
|
|
|
|
|
|
/* enable LPTIM clock source */
|
|
|
|
#if defined(LL_APB1_GRP1_PERIPH_LPTIM1)
|
|
|
|
LL_APB1_GRP1_EnableClock(LL_APB1_GRP1_PERIPH_LPTIM1);
|
|
|
|
LL_APB1_GRP1_ReleaseReset(LL_APB1_GRP1_PERIPH_LPTIM1);
|
|
|
|
#elif defined(LL_APB3_GRP1_PERIPH_LPTIM1)
|
|
|
|
LL_APB3_GRP1_EnableClock(LL_APB3_GRP1_PERIPH_LPTIM1);
|
|
|
|
LL_SRDAMR_GRP1_EnableAutonomousClock(LL_SRDAMR_GRP1_PERIPH_LPTIM1AMEN);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if defined(CONFIG_STM32_LPTIM_CLOCK_LSI)
|
|
|
|
/* enable LSI clock */
|
|
|
|
#ifdef CONFIG_SOC_SERIES_STM32WBX
|
|
|
|
LL_RCC_LSI1_Enable();
|
|
|
|
while (!LL_RCC_LSI1_IsReady()) {
|
|
|
|
#else
|
|
|
|
LL_RCC_LSI_Enable();
|
|
|
|
while (!LL_RCC_LSI_IsReady()) {
|
|
|
|
#endif /* CONFIG_SOC_SERIES_STM32WBX */
|
|
|
|
/* Wait for LSI ready */
|
|
|
|
}
|
|
|
|
|
|
|
|
LL_RCC_SetLPTIMClockSource(LL_RCC_LPTIM1_CLKSOURCE_LSI);
|
|
|
|
|
|
|
|
#else /* CONFIG_STM32_LPTIM_CLOCK_LSI */
|
|
|
|
#if defined(LL_APB1_GRP1_PERIPH_PWR)
|
|
|
|
/* Enable the power interface clock */
|
|
|
|
LL_APB1_GRP1_EnableClock(LL_APB1_GRP1_PERIPH_PWR);
|
|
|
|
#endif /* LL_APB1_GRP1_PERIPH_PWR */
|
|
|
|
|
|
|
|
/* enable backup domain */
|
|
|
|
LL_PWR_EnableBkUpAccess();
|
|
|
|
|
|
|
|
/* enable LSE clock */
|
|
|
|
LL_RCC_LSE_DisableBypass();
|
2022-07-08 02:26:06 +02:00
|
|
|
#ifdef RCC_BDCR_LSEDRV_Pos
|
2022-04-19 11:33:09 +02:00
|
|
|
LL_RCC_LSE_SetDriveCapability(STM32_LSE_DRIVING << RCC_BDCR_LSEDRV_Pos);
|
2022-07-08 02:26:06 +02:00
|
|
|
#endif
|
2021-11-04 12:51:39 +01:00
|
|
|
LL_RCC_LSE_Enable();
|
|
|
|
while (!LL_RCC_LSE_IsReady()) {
|
|
|
|
/* Wait for LSE ready */
|
|
|
|
}
|
|
|
|
#ifdef RCC_BDCR_LSESYSEN
|
|
|
|
LL_RCC_LSE_EnablePropagation();
|
|
|
|
#endif /* RCC_BDCR_LSESYSEN */
|
|
|
|
LL_RCC_SetLPTIMClockSource(LL_RCC_LPTIM1_CLKSOURCE_LSE);
|
|
|
|
|
|
|
|
#endif /* CONFIG_STM32_LPTIM_CLOCK_LSI */
|
|
|
|
|
|
|
|
/* Clear the event flag and possible pending interrupt */
|
2022-08-05 11:34:49 +02:00
|
|
|
IRQ_CONNECT(DT_INST_IRQN(0),
|
|
|
|
DT_INST_IRQ(0, priority),
|
2021-11-04 12:51:39 +01:00
|
|
|
lptim_irq_handler, 0, 0);
|
2022-08-05 11:34:49 +02:00
|
|
|
irq_enable(DT_INST_IRQN(0));
|
2021-11-04 12:51:39 +01:00
|
|
|
|
|
|
|
#ifdef CONFIG_SOC_SERIES_STM32WLX
|
2022-08-05 11:34:49 +02:00
|
|
|
/* Enable the LPTIM wakeup EXTI line */
|
2021-11-04 12:51:39 +01:00
|
|
|
LL_EXTI_EnableIT_0_31(LL_EXTI_LINE_29);
|
|
|
|
#endif
|
|
|
|
|
2022-08-05 11:34:49 +02:00
|
|
|
/* configure the LPTIM counter */
|
|
|
|
LL_LPTIM_SetClockSource(LPTIM, LL_LPTIM_CLK_SOURCE_INTERNAL);
|
|
|
|
/* configure the LPTIM prescaler with 1 */
|
|
|
|
LL_LPTIM_SetPrescaler(LPTIM, LL_LPTIM_PRESCALER_DIV1);
|
2021-11-04 12:51:39 +01:00
|
|
|
#ifdef CONFIG_SOC_SERIES_STM32U5X
|
2022-08-05 11:34:49 +02:00
|
|
|
LL_LPTIM_OC_SetPolarity(LPTIM, LL_LPTIM_CHANNEL_CH1,
|
2021-11-04 12:51:39 +01:00
|
|
|
LL_LPTIM_OUTPUT_POLARITY_REGULAR);
|
|
|
|
#else
|
2022-08-05 11:34:49 +02:00
|
|
|
LL_LPTIM_SetPolarity(LPTIM, LL_LPTIM_OUTPUT_POLARITY_REGULAR);
|
2021-11-04 12:51:39 +01:00
|
|
|
#endif
|
2022-08-05 11:34:49 +02:00
|
|
|
LL_LPTIM_SetUpdateMode(LPTIM, LL_LPTIM_UPDATE_MODE_IMMEDIATE);
|
|
|
|
LL_LPTIM_SetCounterMode(LPTIM, LL_LPTIM_COUNTER_MODE_INTERNAL);
|
|
|
|
LL_LPTIM_DisableTimeout(LPTIM);
|
2021-11-04 12:51:39 +01:00
|
|
|
/* counting start is initiated by software */
|
2022-08-05 11:34:49 +02:00
|
|
|
LL_LPTIM_TrigSw(LPTIM);
|
2021-11-04 12:51:39 +01:00
|
|
|
|
|
|
|
#ifdef CONFIG_SOC_SERIES_STM32U5X
|
2022-08-05 11:34:49 +02:00
|
|
|
/* Enable the LPTIM before proceeding with configuration */
|
|
|
|
LL_LPTIM_Enable(LPTIM);
|
2021-11-04 12:51:39 +01:00
|
|
|
|
2022-08-05 11:34:49 +02:00
|
|
|
LL_LPTIM_DisableIT_CC1(LPTIM);
|
|
|
|
while (LL_LPTIM_IsActiveFlag_DIEROK(LPTIM) == 0) {
|
2021-11-04 12:51:39 +01:00
|
|
|
}
|
2022-08-05 11:34:49 +02:00
|
|
|
LL_LPTIM_ClearFlag_DIEROK(LPTIM);
|
|
|
|
LL_LPTIM_ClearFLAG_CC1(LPTIM);
|
2021-11-04 12:51:39 +01:00
|
|
|
#else
|
2022-08-05 11:34:49 +02:00
|
|
|
/* LPTIM interrupt set-up before enabling */
|
2021-11-04 12:51:39 +01:00
|
|
|
/* no Compare match Interrupt */
|
2022-08-05 11:34:49 +02:00
|
|
|
LL_LPTIM_DisableIT_CMPM(LPTIM);
|
|
|
|
LL_LPTIM_ClearFLAG_CMPM(LPTIM);
|
2021-11-04 12:51:39 +01:00
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Autoreload match Interrupt */
|
2022-08-05 11:34:49 +02:00
|
|
|
LL_LPTIM_EnableIT_ARRM(LPTIM);
|
2021-11-04 12:51:39 +01:00
|
|
|
#ifdef CONFIG_SOC_SERIES_STM32U5X
|
2022-08-05 11:34:49 +02:00
|
|
|
while (LL_LPTIM_IsActiveFlag_DIEROK(LPTIM) == 0) {
|
2021-11-04 12:51:39 +01:00
|
|
|
}
|
2022-08-05 11:34:49 +02:00
|
|
|
LL_LPTIM_ClearFlag_DIEROK(LPTIM);
|
2021-11-04 12:51:39 +01:00
|
|
|
#endif
|
2022-08-05 11:34:49 +02:00
|
|
|
LL_LPTIM_ClearFLAG_ARRM(LPTIM);
|
2021-11-04 12:51:39 +01:00
|
|
|
/* ARROK bit validates the write operation to ARR register */
|
2022-08-05 11:34:49 +02:00
|
|
|
LL_LPTIM_EnableIT_ARROK(LPTIM);
|
|
|
|
LL_LPTIM_ClearFlag_ARROK(LPTIM);
|
2021-11-04 12:51:39 +01:00
|
|
|
|
|
|
|
accumulated_lptim_cnt = 0;
|
|
|
|
|
|
|
|
#ifndef CONFIG_SOC_SERIES_STM32U5X
|
2022-08-05 11:34:49 +02:00
|
|
|
/* Enable the LPTIM counter */
|
|
|
|
LL_LPTIM_Enable(LPTIM);
|
2021-11-04 12:51:39 +01:00
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Set the Autoreload value once the timer is enabled */
|
|
|
|
if (IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
|
2022-08-05 11:34:49 +02:00
|
|
|
/* LPTIM is triggered on a LPTIM_TIMEBASE period */
|
2022-06-23 11:37:48 +02:00
|
|
|
lptim_set_autoreload(LPTIM_TIMEBASE);
|
2021-11-04 12:51:39 +01:00
|
|
|
} else {
|
2022-08-05 11:34:49 +02:00
|
|
|
/* LPTIM is triggered on a Tick period */
|
2022-06-23 11:37:48 +02:00
|
|
|
lptim_set_autoreload(COUNT_PER_TICK - 1);
|
2021-11-04 12:51:39 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Start the LPTIM counter in continuous mode */
|
2022-08-05 11:34:49 +02:00
|
|
|
LL_LPTIM_StartCounter(LPTIM, LL_LPTIM_OPERATING_MODE_CONTINUOUS);
|
2021-11-04 12:51:39 +01:00
|
|
|
|
|
|
|
#ifdef CONFIG_DEBUG
|
2022-08-05 11:34:49 +02:00
|
|
|
/* stop LPTIM during DEBUG */
|
2021-11-04 12:51:39 +01:00
|
|
|
#if defined(LL_DBGMCU_APB1_GRP1_LPTIM1_STOP)
|
|
|
|
LL_DBGMCU_APB1_GRP1_FreezePeriph(LL_DBGMCU_APB1_GRP1_LPTIM1_STOP);
|
|
|
|
#elif defined(LL_DBGMCU_APB3_GRP1_LPTIM1_STOP)
|
|
|
|
LL_DBGMCU_APB3_GRP1_FreezePeriph(LL_DBGMCU_APB3_GRP1_LPTIM1_STOP);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
SYS_INIT(sys_clock_driver_init, PRE_KERNEL_2,
|
|
|
|
CONFIG_SYSTEM_CLOCK_INIT_PRIORITY);
|