From 25173f71cda630d4fb0c860d4e45e6f93c0995dd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Krzysztof=20Chru=C5=9Bci=C5=84ski?= Date: Wed, 10 Jan 2024 11:01:11 +0100 Subject: [PATCH] pm: device_runtime: Extend with synchronous runtime PM MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In many cases suspending or resuming of a device is limited to just a few register writes. Current solution assumes that those operations may be blocking, asynchronous and take a lot of time. Due to this assumption runtime PM API cannot be effectively used from the interrupt context. Zephyr has few driver APIs which can be used from an interrupt context and now use of runtime PM is limited in those cases. Patch introduces a new type of PM device - synchronous PM. If device is specified as capable of synchronous PM operations then device runtime getting and putting is executed in the critical section. In that case, runtime API can be used from an interrupt context. Additionally, this approach reduces RAM needed for PM device (104 -> 20 bytes of RAM on ARM Cortex-M). Signed-off-by: Krzysztof Chruściński --- drivers/power_domain/power_domain_gpio.c | 2 +- .../power_domain/power_domain_gpio_monitor.c | 2 +- include/zephyr/device.h | 16 +- include/zephyr/pm/device.h | 184 ++++++++--- kernel/include/kernel_offsets.h | 2 +- subsys/pm/device.c | 36 +-- subsys/pm/device_runtime.c | 285 ++++++++++++++---- 7 files changed, 410 insertions(+), 117 deletions(-) diff --git a/drivers/power_domain/power_domain_gpio.c b/drivers/power_domain/power_domain_gpio.c index 82d9a17ef9..c5517d42f1 100644 --- a/drivers/power_domain/power_domain_gpio.c +++ b/drivers/power_domain/power_domain_gpio.c @@ -37,7 +37,7 @@ static int pd_on_domain_visitor(const struct device *dev, void *context) struct pd_visitor_context *visitor_context = context; /* Only run action if the device is on the specified domain */ - if (!dev->pm || (dev->pm->domain != visitor_context->domain)) { + if (!dev->pm || (dev->pm_base->domain != visitor_context->domain)) { return 0; } diff --git a/drivers/power_domain/power_domain_gpio_monitor.c b/drivers/power_domain/power_domain_gpio_monitor.c index 40c03bb8dd..62dfd60855 100644 --- a/drivers/power_domain/power_domain_gpio_monitor.c +++ b/drivers/power_domain/power_domain_gpio_monitor.c @@ -35,7 +35,7 @@ static int pd_on_domain_visitor(const struct device *dev, void *context) struct pd_visitor_context *visitor_context = context; /* Only run action if the device is on the specified domain */ - if (!dev->pm || (dev->pm->domain != visitor_context->domain)) { + if (!dev->pm || (dev->pm_base->domain != visitor_context->domain)) { return 0; } diff --git a/include/zephyr/device.h b/include/zephyr/device.h index c55958924f..e581fe74d3 100644 --- a/include/zephyr/device.h +++ b/include/zephyr/device.h @@ -371,7 +371,9 @@ struct device_state { bool initialized : 1; }; +struct pm_device_base; struct pm_device; +struct pm_device_isr; #ifdef CONFIG_DEVICE_DEPS_DYNAMIC #define Z_DEVICE_DEPS_CONST @@ -409,7 +411,11 @@ struct device { * Reference to the device PM resources (only available if * @kconfig{CONFIG_PM_DEVICE} is enabled). */ - struct pm_device *pm; + union { + struct pm_device_base *pm_base; + struct pm_device *pm; + struct pm_device_isr *pm_isr; + }; #endif }; @@ -885,7 +891,7 @@ static inline bool z_impl_device_is_ready(const struct device *dev) * @brief Initializer for @ref device. * * @param name_ Name of the device. - * @param pm_ Reference to @ref pm_device (optional). + * @param pm_ Reference to @ref pm_device_base (optional). * @param data_ Reference to device data. * @param config_ Reference to device config. * @param api_ Reference to device API ops. @@ -900,7 +906,7 @@ static inline bool z_impl_device_is_ready(const struct device *dev) .state = (state_), \ .data = (data_), \ IF_ENABLED(CONFIG_DEVICE_DEPS, (.deps = (deps_),)) /**/ \ - IF_ENABLED(CONFIG_PM_DEVICE, (.pm = (pm_),)) /**/ \ + IF_ENABLED(CONFIG_PM_DEVICE, (.pm_base = (pm_),)) /**/ \ } /** @@ -919,7 +925,7 @@ static inline bool z_impl_device_is_ready(const struct device *dev) * software device). * @param dev_id Device identifier (used to name the defined @ref device). * @param name Name of the device. - * @param pm Reference to @ref pm_device associated with the device. + * @param pm Reference to @ref pm_device_base associated with the device. * (optional). * @param data Reference to device data. * @param config Reference to device config. @@ -991,7 +997,7 @@ static inline bool z_impl_device_is_ready(const struct device *dev) * @param dev_id Device identifier (used to name the defined @ref device). * @param name Name of the device. * @param init_fn Device init function. - * @param pm Reference to @ref pm_device associated with the device. + * @param pm Reference to @ref pm_device_base associated with the device. * (optional). * @param data Reference to device data. * @param config Reference to device config. diff --git a/include/zephyr/pm/device.h b/include/zephyr/pm/device.h index ab54fd677f..d34a84c391 100644 --- a/include/zephyr/pm/device.h +++ b/include/zephyr/pm/device.h @@ -50,6 +50,8 @@ enum pm_device_flag { PM_DEVICE_FLAG_PD, /** Indicates if device runtime PM should be automatically enabled */ PM_DEVICE_FLAG_RUNTIME_AUTO, + /** Indicates that device runtime PM supports suspending and resuming from any context. */ + PM_DEVICE_FLAG_ISR_SAFE, }; /** @endcond */ @@ -122,8 +124,37 @@ typedef bool (*pm_device_action_failed_cb_t)(const struct device *dev, /** * @brief Device PM info + * + * Structure holds fields which are common for two PM devices: generic and + * synchronous. + */ +struct pm_device_base { + /** Device PM status flags. */ + atomic_t flags; + /** Device power state */ + enum pm_device_state state; + /** Device PM action callback */ + pm_device_action_cb_t action_cb; +#if defined(CONFIG_PM_DEVICE_RUNTIME) || defined(__DOXYGEN__) + /** Device usage count */ + uint32_t usage; +#endif /* CONFIG_PM_DEVICE_RUNTIME */ +#ifdef CONFIG_PM_DEVICE_POWER_DOMAIN + /** Power Domain it belongs */ + const struct device *domain; +#endif /* CONFIG_PM_DEVICE_POWER_DOMAIN */ +}; + +/** + * @brief Runtime PM info for device with generic PM. + * + * Generic PM involves suspending and resuming operations which can be blocking, + * long lasting or asynchronous. Runtime PM API is limited when used from + * interrupt context. */ struct pm_device { + /** Base info. */ + struct pm_device_base base; #if defined(CONFIG_PM_DEVICE_RUNTIME) || defined(__DOXYGEN__) /** Pointer to the device */ const struct device *dev; @@ -131,23 +162,31 @@ struct pm_device { struct k_sem lock; /** Event var to listen to the sync request events */ struct k_event event; - /** Device usage count */ - uint32_t usage; /** Work object for asynchronous calls */ struct k_work_delayable work; #endif /* CONFIG_PM_DEVICE_RUNTIME */ -#ifdef CONFIG_PM_DEVICE_POWER_DOMAIN - /** Power Domain it belongs */ - const struct device *domain; -#endif /* CONFIG_PM_DEVICE_POWER_DOMAIN */ - /* Device PM status flags. */ - atomic_t flags; - /** Device power state */ - enum pm_device_state state; - /** Device PM action callback */ - pm_device_action_cb_t action_cb; }; +/** + * @brief Runtime PM info for device with synchronous PM. + * + * Synchronous PM can be used with devices which suspend and resume operations can + * be performed in the critical section as they are short and non-blocking. + * Runtime PM API can be used from any context in that case. + */ +struct pm_device_isr { + /** Base info. */ + struct pm_device_base base; +#if defined(CONFIG_PM_DEVICE_RUNTIME) || defined(__DOXYGEN__) + /** Lock to synchronize the synchronous get/put operations */ + struct k_spinlock lock; +#endif +}; + +/* Base part must be the first element. */ +BUILD_ASSERT(offsetof(struct pm_device, base) == 0); +BUILD_ASSERT(offsetof(struct pm_device_isr, base) == 0); + /** @cond INTERNAL_HIDDEN */ #ifdef CONFIG_PM_DEVICE_RUNTIME @@ -167,7 +206,7 @@ struct pm_device { #endif /* CONFIG_PM_DEVICE_POWER_DOMAIN */ /** - * @brief Utility macro to initialize #pm_device flags + * @brief Utility macro to initialize #pm_device_base flags * * @param node_id Devicetree node for the initialized device (can be invalid). */ @@ -188,17 +227,34 @@ struct pm_device { * @note #DT_PROP_OR is used to retrieve the wakeup_source property because * it may not be defined on all devices. * - * @param obj Name of the #pm_device structure being initialized. + * @param obj Name of the #pm_device_base structure being initialized. + * @param node_id Devicetree node for the initialized device (can be invalid). + * @param pm_action_cb Device PM control callback function. + * @param _flags Additional flags passed to the structure. + */ +#define Z_PM_DEVICE_BASE_INIT(obj, node_id, pm_action_cb, _flags) \ + { \ + .action_cb = pm_action_cb, \ + .state = PM_DEVICE_STATE_ACTIVE, \ + .flags = ATOMIC_INIT(Z_PM_DEVICE_FLAGS(node_id) | (_flags)), \ + Z_PM_DEVICE_POWER_DOMAIN_INIT(node_id) \ + } + +/** + * @brief Utility macro to initialize #pm_device_rt. + * + * @note #DT_PROP_OR is used to retrieve the wakeup_source property because + * it may not be defined on all devices. + * + * @param obj Name of the #pm_device_base structure being initialized. * @param node_id Devicetree node for the initialized device (can be invalid). * @param pm_action_cb Device PM control callback function. */ -#define Z_PM_DEVICE_INIT(obj, node_id, pm_action_cb) \ - { \ - Z_PM_DEVICE_RUNTIME_INIT(obj) \ - .action_cb = pm_action_cb, \ - .state = PM_DEVICE_STATE_ACTIVE, \ - .flags = ATOMIC_INIT(Z_PM_DEVICE_FLAGS(node_id)), \ - Z_PM_DEVICE_POWER_DOMAIN_INIT(node_id) \ +#define Z_PM_DEVICE_INIT(obj, node_id, pm_action_cb, isr_safe) \ + { \ + .base = Z_PM_DEVICE_BASE_INIT(obj, node_id, pm_action_cb, \ + isr_safe ? BIT(PM_DEVICE_FLAG_ISR_SAFE) : 0), \ + COND_CODE_1(isr_safe, (), (Z_PM_DEVICE_RUNTIME_INIT(obj))) \ } /** @@ -231,21 +287,22 @@ struct pm_device { * @param dev_id Device id. * @param pm_action_cb PM control callback. */ -#define Z_PM_DEVICE_DEFINE(node_id, dev_id, pm_action_cb) \ - Z_PM_DEVICE_DEFINE_SLOT(dev_id); \ - static struct pm_device Z_PM_DEVICE_NAME(dev_id) = \ - Z_PM_DEVICE_INIT(Z_PM_DEVICE_NAME(dev_id), node_id, \ - pm_action_cb) +#define Z_PM_DEVICE_DEFINE(node_id, dev_id, pm_action_cb, isr_safe) \ + Z_PM_DEVICE_DEFINE_SLOT(dev_id); \ + static struct COND_CODE_1(isr_safe, (pm_device_isr), (pm_device)) \ + Z_PM_DEVICE_NAME(dev_id) = \ + Z_PM_DEVICE_INIT(Z_PM_DEVICE_NAME(dev_id), node_id, \ + pm_action_cb, isr_safe) /** * Get a reference to the device PM resources. * * @param dev_id Device id. */ -#define Z_PM_DEVICE_GET(dev_id) (&Z_PM_DEVICE_NAME(dev_id)) +#define Z_PM_DEVICE_GET(dev_id) ((struct pm_device_base *)&Z_PM_DEVICE_NAME(dev_id)) #else -#define Z_PM_DEVICE_DEFINE(node_id, dev_id, pm_action_cb) +#define Z_PM_DEVICE_DEFINE(node_id, dev_id, pm_action_cb, isr_safe) #define Z_PM_DEVICE_GET(dev_id) NULL #endif /* CONFIG_PM_DEVICE */ @@ -262,8 +319,26 @@ struct pm_device { * @see #PM_DEVICE_DT_DEFINE, #PM_DEVICE_DT_INST_DEFINE */ #define PM_DEVICE_DEFINE(dev_id, pm_action_cb) \ - Z_PM_DEVICE_DEFINE(DT_INVALID_NODE, dev_id, pm_action_cb) + Z_PM_DEVICE_DEFINE(DT_INVALID_NODE, dev_id, pm_action_cb, 0) +/** + * Define device PM resources for the given device name. + * + * PM actions are synchronous and can be executed from any context. This approach + * can be used for cases where suspending and resuming is short as it is + * executed in the critical section. This mode requires less resources (~80 byte + * less RAM) and allows to use device runtime PM from any context (including + * interrupts). + * + * @note This macro is a no-op if @kconfig{CONFIG_PM_DEVICE} is not enabled. + * + * @param dev_id Device id. + * @param pm_action_cb PM control callback. + * + * @see #PM_DEVICE_DT_DEFINE, #PM_DEVICE_DT_INST_DEFINE + */ +#define PM_DEVICE_ISR_SYNC_DEFINE(dev_id, pm_action_cb) \ + Z_PM_DEVICE_DEFINE(DT_INVALID_NODE, dev_id, pm_action_cb, 1) /** * Define device PM resources for the given node identifier. * @@ -274,9 +349,27 @@ struct pm_device { * * @see #PM_DEVICE_DT_INST_DEFINE, #PM_DEVICE_DEFINE */ -#define PM_DEVICE_DT_DEFINE(node_id, pm_action_cb) \ - Z_PM_DEVICE_DEFINE(node_id, Z_DEVICE_DT_DEV_ID(node_id), \ - pm_action_cb) +#define PM_DEVICE_DT_DEFINE(node_id, pm_action_cb) \ + Z_PM_DEVICE_DEFINE(node_id, Z_DEVICE_DT_DEV_ID(node_id), pm_action_cb, 0) + +/** + * Define device PM resources for the given node identifier. + * + * PM actions are synchronous and can be executed from any context. This approach + * can be used for cases where suspending and resuming is short as it is + * executed in the critical section. This mode requires less resources (~80 byte + * less RAM) and allows to use device runtime PM from any context (including + * interrupts). + * + * @note This macro is a no-op if @kconfig{CONFIG_PM_DEVICE} is not enabled. + * + * @param node_id Node identifier. + * @param pm_action_cb PM control callback. + * + * @see #PM_DEVICE_DT_INST_DEFINE, #PM_DEVICE_DEFINE + */ +#define PM_DEVICE_ISR_SAFE_DT_DEFINE(node_id, pm_action_cb) \ + Z_PM_DEVICE_DEFINE(node_id, Z_DEVICE_DT_DEV_ID(node_id), pm_action_cb, 1) /** * Define device PM resources for the given instance. @@ -291,7 +384,28 @@ struct pm_device { #define PM_DEVICE_DT_INST_DEFINE(idx, pm_action_cb) \ Z_PM_DEVICE_DEFINE(DT_DRV_INST(idx), \ Z_DEVICE_DT_DEV_ID(DT_DRV_INST(idx)), \ - pm_action_cb) + pm_action_cb, 0) + +/** + * Define device PM resources for the given instance. + * + * PM actions are synchronous and can be executed from any context. This approach + * can be used for cases where suspending and resuming is short as it is + * executed in the critical section. This mode requires less resources (~80 byte + * less RAM) and allows to use device runtime PM from any context (including + * interrupts). + * + * @note This macro is a no-op if @kconfig{CONFIG_PM_DEVICE} is not enabled. + * + * @param idx Instance index. + * @param pm_action_cb PM control callback. + * + * @see #PM_DEVICE_DT_DEFINE, #PM_DEVICE_DEFINE + */ +#define PM_DEVICE_ISR_SAFE_DT_INST_DEFINE(idx, pm_action_cb) \ + Z_PM_DEVICE_DEFINE(DT_DRV_INST(idx), \ + Z_DEVICE_DT_DEV_ID(DT_DRV_INST(idx)), \ + pm_action_cb, 1) /** * @brief Obtain a reference to the device PM resources for the given device. @@ -393,7 +507,7 @@ int pm_device_state_get(const struct device *dev, */ static inline void pm_device_init_suspended(const struct device *dev) { - struct pm_device *pm = dev->pm; + struct pm_device_base *pm = dev->pm_base; pm->state = PM_DEVICE_STATE_SUSPENDED; } @@ -413,7 +527,7 @@ static inline void pm_device_init_suspended(const struct device *dev) */ static inline void pm_device_init_off(const struct device *dev) { - struct pm_device *pm = dev->pm; + struct pm_device_base *pm = dev->pm_base; pm->state = PM_DEVICE_STATE_OFF; } diff --git a/kernel/include/kernel_offsets.h b/kernel/include/kernel_offsets.h index f7676438d9..5644dbb157 100644 --- a/kernel/include/kernel_offsets.h +++ b/kernel/include/kernel_offsets.h @@ -86,7 +86,7 @@ GEN_ABSOLUTE_SYM(_DEVICE_STRUCT_PM_OFFSET, /* member offsets in the pm_device structure. Used in image post-processing */ GEN_ABSOLUTE_SYM(_PM_DEVICE_STRUCT_FLAGS_OFFSET, - offsetof(struct pm_device, flags)); + offsetof(struct pm_device_base, flags)); GEN_ABSOLUTE_SYM(_PM_DEVICE_FLAG_PD, PM_DEVICE_FLAG_PD); diff --git a/subsys/pm/device.c b/subsys/pm/device.c index 15e8085773..ca3816fa93 100644 --- a/subsys/pm/device.c +++ b/subsys/pm/device.c @@ -42,7 +42,7 @@ const char *pm_device_state_str(enum pm_device_state state) int pm_device_action_run(const struct device *dev, enum pm_device_action action) { - struct pm_device *pm = dev->pm; + struct pm_device_base *pm = dev->pm_base; int ret; if (pm == NULL) { @@ -139,13 +139,13 @@ static int power_domain_add_or_remove(const struct device *dev, while (rv[i] != Z_DEVICE_DEPS_ENDS) { if (add == false) { if (rv[i] == dev_handle) { - dev->pm->domain = NULL; + dev->pm_base->domain = NULL; rv[i] = DEVICE_HANDLE_NULL; return 0; } } else { if (rv[i] == DEVICE_HANDLE_NULL) { - dev->pm->domain = domain; + dev->pm_base->domain = domain; rv[i] = dev_handle; return 0; } @@ -212,7 +212,7 @@ void pm_device_children_action_run(const struct device *dev, int pm_device_state_get(const struct device *dev, enum pm_device_state *state) { - struct pm_device *pm = dev->pm; + struct pm_device_base *pm = dev->pm_base; if (pm == NULL) { return -ENOSYS; @@ -231,7 +231,7 @@ bool pm_device_is_any_busy(void) devc = z_device_get_all_static(&devs); for (const struct device *dev = devs; dev < (devs + devc); dev++) { - struct pm_device *pm = dev->pm; + struct pm_device_base *pm = dev->pm_base; if (pm == NULL) { continue; @@ -247,7 +247,7 @@ bool pm_device_is_any_busy(void) bool pm_device_is_busy(const struct device *dev) { - struct pm_device *pm = dev->pm; + struct pm_device_base *pm = dev->pm_base; if (pm == NULL) { return false; @@ -258,7 +258,7 @@ bool pm_device_is_busy(const struct device *dev) void pm_device_busy_set(const struct device *dev) { - struct pm_device *pm = dev->pm; + struct pm_device_base *pm = dev->pm_base; if (pm == NULL) { return; @@ -269,7 +269,7 @@ void pm_device_busy_set(const struct device *dev) void pm_device_busy_clear(const struct device *dev) { - struct pm_device *pm = dev->pm; + struct pm_device_base *pm = dev->pm_base; if (pm == NULL) { return; @@ -281,7 +281,7 @@ void pm_device_busy_clear(const struct device *dev) bool pm_device_wakeup_enable(const struct device *dev, bool enable) { atomic_val_t flags, new_flags; - struct pm_device *pm = dev->pm; + struct pm_device_base *pm = dev->pm_base; if (pm == NULL) { return false; @@ -305,7 +305,7 @@ bool pm_device_wakeup_enable(const struct device *dev, bool enable) bool pm_device_wakeup_is_enabled(const struct device *dev) { - struct pm_device *pm = dev->pm; + struct pm_device_base *pm = dev->pm_base; if (pm == NULL) { return false; @@ -317,7 +317,7 @@ bool pm_device_wakeup_is_enabled(const struct device *dev) bool pm_device_wakeup_is_capable(const struct device *dev) { - struct pm_device *pm = dev->pm; + struct pm_device_base *pm = dev->pm_base; if (pm == NULL) { return false; @@ -329,7 +329,7 @@ bool pm_device_wakeup_is_capable(const struct device *dev) void pm_device_state_lock(const struct device *dev) { - struct pm_device *pm = dev->pm; + struct pm_device_base *pm = dev->pm_base; if ((pm != NULL) && !pm_device_runtime_is_enabled(dev)) { atomic_set_bit(&pm->flags, PM_DEVICE_FLAG_STATE_LOCKED); @@ -338,7 +338,7 @@ void pm_device_state_lock(const struct device *dev) void pm_device_state_unlock(const struct device *dev) { - struct pm_device *pm = dev->pm; + struct pm_device_base *pm = dev->pm_base; if (pm != NULL) { atomic_clear_bit(&pm->flags, PM_DEVICE_FLAG_STATE_LOCKED); @@ -347,7 +347,7 @@ void pm_device_state_unlock(const struct device *dev) bool pm_device_state_is_locked(const struct device *dev) { - struct pm_device *pm = dev->pm; + struct pm_device_base *pm = dev->pm_base; if (pm == NULL) { return false; @@ -360,7 +360,7 @@ bool pm_device_state_is_locked(const struct device *dev) bool pm_device_on_power_domain(const struct device *dev) { #ifdef CONFIG_PM_DEVICE_POWER_DOMAIN - struct pm_device *pm = dev->pm; + struct pm_device_base *pm = dev->pm_base; if (pm == NULL) { return false; @@ -375,14 +375,14 @@ bool pm_device_on_power_domain(const struct device *dev) bool pm_device_is_powered(const struct device *dev) { #ifdef CONFIG_PM_DEVICE_POWER_DOMAIN - struct pm_device *pm = dev->pm; + struct pm_device_base *pm = dev->pm_base; /* If a device doesn't support PM or is not under a PM domain, * assume it is always powered on. */ return (pm == NULL) || (pm->domain == NULL) || - (pm->domain->pm->state == PM_DEVICE_STATE_ACTIVE); + (pm->domain->pm_base->state == PM_DEVICE_STATE_ACTIVE); #else ARG_UNUSED(dev); return true; @@ -392,7 +392,7 @@ bool pm_device_is_powered(const struct device *dev) int pm_device_driver_init(const struct device *dev, pm_device_action_cb_t action_cb) { - struct pm_device *pm = dev->pm; + struct pm_device_base *pm = dev->pm_base; int rc = 0; /* Work only needs to be performed if the device is powered */ diff --git a/subsys/pm/device_runtime.c b/subsys/pm/device_runtime.c index b6d3091e71..37cd7b85ba 100644 --- a/subsys/pm/device_runtime.c +++ b/subsys/pm/device_runtime.c @@ -52,7 +52,7 @@ static int runtime_suspend(const struct device *dev, bool async, /* * Early return if device runtime is not enabled. */ - if (!atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) { + if (!atomic_test_bit(&pm->base.flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) { return 0; } @@ -65,30 +65,30 @@ static int runtime_suspend(const struct device *dev, bool async, } } - if (pm->usage == 0U) { + if (pm->base.usage == 0U) { LOG_WRN("Unbalanced suspend"); ret = -EALREADY; goto unlock; } - pm->usage--; - if (pm->usage > 0U) { + pm->base.usage--; + if (pm->base.usage > 0U) { goto unlock; } if (async) { /* queue suspend */ - pm->state = PM_DEVICE_STATE_SUSPENDING; + pm->base.state = PM_DEVICE_STATE_SUSPENDING; (void)k_work_schedule(&pm->work, delay); } else { /* suspend now */ - ret = pm->action_cb(pm->dev, PM_DEVICE_ACTION_SUSPEND); + ret = pm->base.action_cb(pm->dev, PM_DEVICE_ACTION_SUSPEND); if (ret < 0) { - pm->usage++; + pm->base.usage++; goto unlock; } - pm->state = PM_DEVICE_STATE_SUSPENDED; + pm->base.state = PM_DEVICE_STATE_SUSPENDED; } unlock: @@ -105,16 +105,16 @@ static void runtime_suspend_work(struct k_work *work) struct k_work_delayable *dwork = k_work_delayable_from_work(work); struct pm_device *pm = CONTAINER_OF(dwork, struct pm_device, work); - ret = pm->action_cb(pm->dev, PM_DEVICE_ACTION_SUSPEND); + ret = pm->base.action_cb(pm->dev, PM_DEVICE_ACTION_SUSPEND); (void)k_sem_take(&pm->lock, K_FOREVER); if (ret < 0) { - pm->usage++; - pm->state = PM_DEVICE_STATE_ACTIVE; + pm->base.usage++; + pm->base.state = PM_DEVICE_STATE_ACTIVE; } else { - pm->state = PM_DEVICE_STATE_SUSPENDED; + pm->base.state = PM_DEVICE_STATE_SUSPENDED; } - k_event_set(&pm->event, BIT(pm->state)); + k_event_set(&pm->event, BIT(pm->base.state)); k_sem_give(&pm->lock); /* @@ -122,13 +122,47 @@ static void runtime_suspend_work(struct k_work *work) * finishes its operation */ if ((ret == 0) && - atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_PD_CLAIMED)) { - (void)pm_device_runtime_put(PM_DOMAIN(pm)); + atomic_test_bit(&pm->base.flags, PM_DEVICE_FLAG_PD_CLAIMED)) { + (void)pm_device_runtime_put(PM_DOMAIN(&pm->base)); } __ASSERT(ret == 0, "Could not suspend device (%d)", ret); } +static int get_sync_locked(const struct device *dev) +{ + int ret; + struct pm_device_isr *pm = dev->pm_isr; + uint32_t flags = pm->base.flags; + + if (pm->base.usage == 0) { + if (flags & BIT(PM_DEVICE_FLAG_PD_CLAIMED)) { + const struct device *domain = PM_DOMAIN(&pm->base); + + if (domain->pm_base->flags & PM_DEVICE_FLAG_ISR_SAFE) { + ret = pm_device_runtime_get(domain); + if (ret < 0) { + return ret; + } + } else { + return -EWOULDBLOCK; + } + } + + ret = pm->base.action_cb(dev, PM_DEVICE_ACTION_RESUME); + if (ret < 0) { + return ret; + } + pm->base.state = PM_DEVICE_STATE_ACTIVE; + } else { + ret = 0; + } + + pm->base.usage++; + + return ret; +} + int pm_device_runtime_get(const struct device *dev) { int ret = 0; @@ -143,10 +177,19 @@ int pm_device_runtime_get(const struct device *dev) /* * Early return if device runtime is not enabled. */ - if (!atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) { + if (!atomic_test_bit(&pm->base.flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) { return 0; } + if (atomic_test_bit(&dev->pm_base->flags, PM_DEVICE_FLAG_ISR_SAFE)) { + struct pm_device_isr *pm_sync = dev->pm_isr; + k_spinlock_key_t k = k_spin_lock(&pm_sync->lock); + + ret = get_sync_locked(dev); + k_spin_unlock(&pm_sync->lock, k); + goto end; + } + if (!k_is_pre_kernel()) { ret = k_sem_take(&pm->lock, k_is_in_isr() ? K_NO_WAIT : K_FOREVER); if (ret < 0) { @@ -154,7 +197,7 @@ int pm_device_runtime_get(const struct device *dev) } } - if (k_is_in_isr() && (pm->state == PM_DEVICE_STATE_SUSPENDING)) { + if (k_is_in_isr() && (pm->base.state == PM_DEVICE_STATE_SUSPENDING)) { ret = -EWOULDBLOCK; goto unlock; } @@ -163,31 +206,33 @@ int pm_device_runtime_get(const struct device *dev) * If the device is under a power domain, the domain has to be get * first. */ - if (PM_DOMAIN(pm) != NULL) { - ret = pm_device_runtime_get(PM_DOMAIN(pm)); + const struct device *domain = PM_DOMAIN(&pm->base); + + if (domain != NULL) { + ret = pm_device_runtime_get(domain); if (ret != 0) { goto unlock; } /* Check if powering up this device failed */ - if (atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_TURN_ON_FAILED)) { - (void)pm_device_runtime_put(PM_DOMAIN(pm)); + if (atomic_test_bit(&pm->base.flags, PM_DEVICE_FLAG_TURN_ON_FAILED)) { + (void)pm_device_runtime_put(domain); ret = -EAGAIN; goto unlock; } /* Power domain successfully claimed */ - atomic_set_bit(&pm->flags, PM_DEVICE_FLAG_PD_CLAIMED); + atomic_set_bit(&pm->base.flags, PM_DEVICE_FLAG_PD_CLAIMED); } - pm->usage++; + pm->base.usage++; /* * Check if the device has a pending suspend operation (not started * yet) and cancel it. This way we avoid unnecessary operations because * the device is actually active. */ - if ((pm->state == PM_DEVICE_STATE_SUSPENDING) && + if ((pm->base.state == PM_DEVICE_STATE_SUSPENDING) && ((k_work_cancel_delayable(&pm->work) & K_WORK_RUNNING) == 0)) { - pm->state = PM_DEVICE_STATE_ACTIVE; + pm->base.state = PM_DEVICE_STATE_ACTIVE; goto unlock; } @@ -196,7 +241,7 @@ int pm_device_runtime_get(const struct device *dev) * If the device is already suspending there is * nothing else we can do but wait until it finishes. */ - while (pm->state == PM_DEVICE_STATE_SUSPENDING) { + while (pm->base.state == PM_DEVICE_STATE_SUSPENDING) { k_sem_give(&pm->lock); k_event_wait(&pm->event, EVENT_MASK, true, K_FOREVER); @@ -205,45 +250,95 @@ int pm_device_runtime_get(const struct device *dev) } } - if (pm->usage > 1U) { + if (pm->base.usage > 1U) { goto unlock; } - ret = pm->action_cb(pm->dev, PM_DEVICE_ACTION_RESUME); + ret = pm->base.action_cb(pm->dev, PM_DEVICE_ACTION_RESUME); if (ret < 0) { - pm->usage--; + pm->base.usage--; goto unlock; } - pm->state = PM_DEVICE_STATE_ACTIVE; + pm->base.state = PM_DEVICE_STATE_ACTIVE; unlock: if (!k_is_pre_kernel()) { k_sem_give(&pm->lock); } +end: SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_get, dev, ret); return ret; } + +static int put_sync_locked(const struct device *dev) +{ + int ret; + struct pm_device_isr *pm = dev->pm_isr; + uint32_t flags = pm->base.flags; + + if (!(flags & BIT(PM_DEVICE_FLAG_RUNTIME_ENABLED))) { + return 0; + } + + if (pm->base.usage == 0U) { + return -EALREADY; + } + + pm->base.usage--; + if (pm->base.usage == 0U) { + ret = pm->base.action_cb(dev, PM_DEVICE_ACTION_SUSPEND); + if (ret < 0) { + return ret; + } + pm->base.state = PM_DEVICE_STATE_SUSPENDED; + + if (flags & BIT(PM_DEVICE_FLAG_PD_CLAIMED)) { + const struct device *domain = PM_DOMAIN(&pm->base); + + if (domain->pm_base->flags & PM_DEVICE_FLAG_ISR_SAFE) { + ret = put_sync_locked(domain); + } else { + ret = -EWOULDBLOCK; + } + } + } else { + ret = 0; + } + + return ret; +} + int pm_device_runtime_put(const struct device *dev) { int ret; - if (dev->pm == NULL) { + if (dev->pm_base == NULL) { return 0; } SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_put, dev); - ret = runtime_suspend(dev, false, K_NO_WAIT); - /* - * Now put the domain - */ - if ((ret == 0) && - atomic_test_bit(&dev->pm->flags, PM_DEVICE_FLAG_PD_CLAIMED)) { - ret = pm_device_runtime_put(PM_DOMAIN(dev->pm)); + if (atomic_test_bit(&dev->pm_base->flags, PM_DEVICE_FLAG_ISR_SAFE)) { + struct pm_device_isr *pm_sync = dev->pm_isr; + k_spinlock_key_t k = k_spin_lock(&pm_sync->lock); + + ret = put_sync_locked(dev); + + k_spin_unlock(&pm_sync->lock, k); + } else { + ret = runtime_suspend(dev, false, K_NO_WAIT); + + /* + * Now put the domain + */ + if ((ret == 0) && + atomic_test_bit(&dev->pm_base->flags, PM_DEVICE_FLAG_PD_CLAIMED)) { + ret = pm_device_runtime_put(PM_DOMAIN(dev->pm_base)); + } } SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_put, dev, ret); @@ -254,12 +349,21 @@ int pm_device_runtime_put_async(const struct device *dev, k_timeout_t delay) { int ret; - if (dev->pm == NULL) { + if (dev->pm_base == NULL) { return 0; } SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_put_async, dev, delay); - ret = runtime_suspend(dev, true, delay); + if (atomic_test_bit(&dev->pm_base->flags, PM_DEVICE_FLAG_ISR_SAFE)) { + struct pm_device_isr *pm_sync = dev->pm_isr; + k_spinlock_key_t k = k_spin_lock(&pm_sync->lock); + + ret = put_sync_locked(dev); + + k_spin_unlock(&pm_sync->lock, k); + } else { + ret = runtime_suspend(dev, true, delay); + } SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_put_async, dev, delay, ret); return ret; @@ -268,7 +372,7 @@ int pm_device_runtime_put_async(const struct device *dev, k_timeout_t delay) __boot_func int pm_device_runtime_auto_enable(const struct device *dev) { - struct pm_device *pm = dev->pm; + struct pm_device_base *pm = dev->pm_base; /* No action needed if PM_DEVICE_FLAG_RUNTIME_AUTO is not enabled */ if (!pm || !atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_AUTO)) { @@ -277,6 +381,36 @@ int pm_device_runtime_auto_enable(const struct device *dev) return pm_device_runtime_enable(dev); } +static int runtime_enable_sync(const struct device *dev) +{ + int ret; + struct pm_device_isr *pm = dev->pm_isr; + k_spinlock_key_t k = k_spin_lock(&pm->lock); + + /* Because context is locked we can access flags directly. */ + if (pm->base.flags & BIT(PM_DEVICE_FLAG_RUNTIME_ENABLED)) { + ret = 0; + goto unlock; + } + + if (pm->base.state == PM_DEVICE_STATE_ACTIVE) { + ret = pm->base.action_cb(dev, PM_DEVICE_ACTION_SUSPEND); + if (ret < 0) { + goto unlock; + } + + pm->base.state = PM_DEVICE_STATE_SUSPENDED; + } else { + ret = 0; + } + + pm->base.flags |= BIT(PM_DEVICE_FLAG_RUNTIME_ENABLED); + pm->base.usage = 0U; +unlock: + k_spin_unlock(&pm->lock, k); + return ret; +} + int pm_device_runtime_enable(const struct device *dev) { int ret = 0; @@ -293,11 +427,16 @@ int pm_device_runtime_enable(const struct device *dev) goto end; } + if (atomic_test_bit(&dev->pm_base->flags, PM_DEVICE_FLAG_ISR_SAFE)) { + ret = runtime_enable_sync(dev); + goto end; + } + if (!k_is_pre_kernel()) { (void)k_sem_take(&pm->lock, K_FOREVER); } - if (atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) { + if (atomic_test_bit(&pm->base.flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) { goto unlock; } @@ -307,17 +446,17 @@ int pm_device_runtime_enable(const struct device *dev) k_work_init_delayable(&pm->work, runtime_suspend_work); } - if (pm->state == PM_DEVICE_STATE_ACTIVE) { - ret = pm->action_cb(pm->dev, PM_DEVICE_ACTION_SUSPEND); + if (pm->base.state == PM_DEVICE_STATE_ACTIVE) { + ret = pm->base.action_cb(pm->dev, PM_DEVICE_ACTION_SUSPEND); if (ret < 0) { goto unlock; } - pm->state = PM_DEVICE_STATE_SUSPENDED; + pm->base.state = PM_DEVICE_STATE_SUSPENDED; } - pm->usage = 0U; + pm->base.usage = 0U; - atomic_set_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_ENABLED); + atomic_set_bit(&pm->base.flags, PM_DEVICE_FLAG_RUNTIME_ENABLED); unlock: if (!k_is_pre_kernel()) { @@ -329,6 +468,34 @@ end: return ret; } +static int runtime_disable_sync(const struct device *dev) +{ + struct pm_device_isr *pm = dev->pm_isr; + int ret; + k_spinlock_key_t k = k_spin_lock(&pm->lock); + + if (!(pm->base.flags & BIT(PM_DEVICE_FLAG_RUNTIME_ENABLED))) { + ret = 0; + goto unlock; + } + + if (pm->base.state == PM_DEVICE_STATE_SUSPENDED) { + ret = pm->base.action_cb(dev, PM_DEVICE_ACTION_RESUME); + if (ret < 0) { + goto unlock; + } + + pm->base.state = PM_DEVICE_STATE_ACTIVE; + } else { + ret = 0; + } + + pm->base.flags &= ~BIT(PM_DEVICE_FLAG_RUNTIME_ENABLED); +unlock: + k_spin_unlock(&pm->lock, k); + return ret; +} + int pm_device_runtime_disable(const struct device *dev) { int ret = 0; @@ -340,23 +507,28 @@ int pm_device_runtime_disable(const struct device *dev) SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_disable, dev); + if (atomic_test_bit(&dev->pm_base->flags, PM_DEVICE_FLAG_ISR_SAFE)) { + ret = runtime_disable_sync(dev); + goto end; + } + if (!k_is_pre_kernel()) { (void)k_sem_take(&pm->lock, K_FOREVER); } - if (!atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) { + if (!atomic_test_bit(&pm->base.flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) { goto unlock; } if (!k_is_pre_kernel()) { - if ((pm->state == PM_DEVICE_STATE_SUSPENDING) && + if ((pm->base.state == PM_DEVICE_STATE_SUSPENDING) && ((k_work_cancel_delayable(&pm->work) & K_WORK_RUNNING) == 0)) { - pm->state = PM_DEVICE_STATE_ACTIVE; + pm->base.state = PM_DEVICE_STATE_ACTIVE; goto clear_bit; } /* wait until possible async suspend is completed */ - while (pm->state == PM_DEVICE_STATE_SUSPENDING) { + while (pm->base.state == PM_DEVICE_STATE_SUSPENDING) { k_sem_give(&pm->lock); k_event_wait(&pm->event, EVENT_MASK, true, K_FOREVER); @@ -366,23 +538,24 @@ int pm_device_runtime_disable(const struct device *dev) } /* wake up the device if suspended */ - if (pm->state == PM_DEVICE_STATE_SUSPENDED) { - ret = pm->action_cb(pm->dev, PM_DEVICE_ACTION_RESUME); + if (pm->base.state == PM_DEVICE_STATE_SUSPENDED) { + ret = pm->base.action_cb(dev, PM_DEVICE_ACTION_RESUME); if (ret < 0) { goto unlock; } - pm->state = PM_DEVICE_STATE_ACTIVE; + pm->base.state = PM_DEVICE_STATE_ACTIVE; } clear_bit: - atomic_clear_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_ENABLED); + atomic_clear_bit(&pm->base.flags, PM_DEVICE_FLAG_RUNTIME_ENABLED); unlock: if (!k_is_pre_kernel()) { k_sem_give(&pm->lock); } +end: SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_disable, dev, ret); return ret; @@ -390,7 +563,7 @@ unlock: bool pm_device_runtime_is_enabled(const struct device *dev) { - struct pm_device *pm = dev->pm; + struct pm_device_base *pm = dev->pm_base; return pm && atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_ENABLED); }