drivers: cache: nrf: Optimize operation
Apply changes which significantly speed up cache operations. Removed k_busy_wait from function which polls for cache busyness. Removed spinlock from cache operation. Lock taking and releasing takes time and it is faster to check if LINEADDR changed after performing the operation. If LINEADDR changed then it indicates that current context was preempted by another cache operation. If such state is detected current operation is repeated. Signed-off-by: Krzysztof Chruściński <krzysztof.chruscinski@nordicsemi.no>
This commit is contained in:
parent
b83a11281d
commit
9f6567b87a
44
drivers/cache/cache_nrf.c
vendored
44
drivers/cache/cache_nrf.c
vendored
|
@ -17,7 +17,6 @@ LOG_MODULE_REGISTER(cache_nrfx, CONFIG_CACHE_LOG_LEVEL);
|
|||
#define CACHE_LINE_SIZE 32
|
||||
#define CACHE_BUSY_RETRY_INTERVAL_US 10
|
||||
|
||||
static struct k_spinlock lock;
|
||||
|
||||
enum k_nrf_cache_op {
|
||||
/*
|
||||
|
@ -55,7 +54,6 @@ static inline bool is_cache_busy(NRF_CACHE_Type *cache)
|
|||
static inline void wait_for_cache(NRF_CACHE_Type *cache)
|
||||
{
|
||||
while (is_cache_busy(cache)) {
|
||||
k_busy_wait(CACHE_BUSY_RETRY_INTERVAL_US);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -99,33 +97,33 @@ static inline int _cache_all(NRF_CACHE_Type *cache, enum k_nrf_cache_op op)
|
|||
|
||||
static inline void _cache_line(NRF_CACHE_Type *cache, enum k_nrf_cache_op op, uintptr_t line_addr)
|
||||
{
|
||||
wait_for_cache(cache);
|
||||
do {
|
||||
wait_for_cache(cache);
|
||||
|
||||
nrf_cache_lineaddr_set(cache, line_addr);
|
||||
nrf_cache_lineaddr_set(cache, line_addr);
|
||||
|
||||
switch (op) {
|
||||
switch (op) {
|
||||
|
||||
#if NRF_CACHE_HAS_TASK_CLEAN
|
||||
case K_NRF_CACHE_CLEAN:
|
||||
nrf_cache_task_trigger(cache, NRF_CACHE_TASK_CLEANLINE);
|
||||
break;
|
||||
case K_NRF_CACHE_CLEAN:
|
||||
nrf_cache_task_trigger(cache, NRF_CACHE_TASK_CLEANLINE);
|
||||
break;
|
||||
#endif
|
||||
|
||||
case K_NRF_CACHE_INVD:
|
||||
nrf_cache_task_trigger(cache, NRF_CACHE_TASK_INVALIDATELINE);
|
||||
break;
|
||||
case K_NRF_CACHE_INVD:
|
||||
nrf_cache_task_trigger(cache, NRF_CACHE_TASK_INVALIDATELINE);
|
||||
break;
|
||||
|
||||
#if NRF_CACHE_HAS_TASK_FLUSH
|
||||
case K_NRF_CACHE_FLUSH:
|
||||
nrf_cache_task_trigger(cache, NRF_CACHE_TASK_FLUSHLINE);
|
||||
break;
|
||||
case K_NRF_CACHE_FLUSH:
|
||||
nrf_cache_task_trigger(cache, NRF_CACHE_TASK_FLUSHLINE);
|
||||
break;
|
||||
#endif
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
wait_for_cache(cache);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
} while (nrf_cache_lineaddr_get(cache) != line_addr);
|
||||
}
|
||||
|
||||
static inline int _cache_range(NRF_CACHE_Type *cache, enum k_nrf_cache_op op, void *addr,
|
||||
|
@ -150,16 +148,12 @@ static inline int _cache_range(NRF_CACHE_Type *cache, enum k_nrf_cache_op op, vo
|
|||
line_addr &= ~(CACHE_LINE_SIZE - 1);
|
||||
|
||||
do {
|
||||
k_spinlock_key_t key = k_spin_lock(&lock);
|
||||
|
||||
_cache_line(cache, op, line_addr);
|
||||
|
||||
k_spin_unlock(&lock, key);
|
||||
|
||||
line_addr += CACHE_LINE_SIZE;
|
||||
|
||||
} while (line_addr < end_addr);
|
||||
|
||||
wait_for_cache(cache);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue