timer: use _sys_clock_tick_announce() in drivers
Replace duplicate code and will allow for further cleanup by possibly consolidating micro/nano code that is currently guarded by preprocessor conditionals. Change-Id: I9aa9966c581244646b6ea317ef8b51fef9054dd4 Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
This commit is contained in:
parent
153503d766
commit
da26208623
|
@ -150,19 +150,7 @@ void _timer_int_handler(void *unused)
|
|||
|
||||
clock_accumulated_count += sys_clock_hw_cycles_per_tick;
|
||||
|
||||
_nano_ticks++;
|
||||
|
||||
if (_nano_timer_list) {
|
||||
_nano_timer_list->ticks--;
|
||||
|
||||
while (_nano_timer_list && (!_nano_timer_list->ticks)) {
|
||||
struct nano_timer *expired = _nano_timer_list;
|
||||
struct nano_lifo *lifo = &expired->lifo;
|
||||
|
||||
_nano_timer_list = expired->link;
|
||||
nano_isr_lifo_put(lifo, expired->userData);
|
||||
}
|
||||
}
|
||||
_sys_clock_tick_announce();
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
|
|
|
@ -317,7 +317,7 @@ void _TIMER_INT_HANDLER(void *unused)
|
|||
idle_mode = IDLE_NOT_TICKLESS;
|
||||
_sys_idle_elapsed_ticks =
|
||||
idle_original_ticks + 1; /* actual # of idle ticks */
|
||||
nano_isr_stack_push(&_k_command_stack, TICK_EVENT);
|
||||
_sys_clock_tick_announce();
|
||||
} else {
|
||||
/*
|
||||
* Increment the tick because _timer_idle_exit does not
|
||||
|
@ -335,7 +335,7 @@ void _TIMER_INT_HANDLER(void *unused)
|
|||
*/
|
||||
|
||||
if (_sys_idle_elapsed_ticks == 1) {
|
||||
nano_isr_stack_push(&_k_command_stack, TICK_EVENT);
|
||||
_sys_clock_tick_announce();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -348,7 +348,7 @@ void _TIMER_INT_HANDLER(void *unused)
|
|||
*/
|
||||
clock_accumulated_count += sys_clock_hw_cycles_per_tick;
|
||||
|
||||
nano_isr_stack_push(&_k_command_stack, TICK_EVENT);
|
||||
_sys_clock_tick_announce();
|
||||
#endif /* CONFIG_TICKLESS_IDLE */
|
||||
|
||||
numIdleTicks = _NanoIdleValGet(); /* get # of idle ticks requested */
|
||||
|
@ -371,26 +371,12 @@ void _TIMER_INT_HANDLER(void *unused)
|
|||
/* accumulate total counter value */
|
||||
clock_accumulated_count += sys_clock_hw_cycles_per_tick;
|
||||
|
||||
#ifdef CONFIG_MICROKERNEL
|
||||
/*
|
||||
* one more tick has occurred -- don't need to do anything special since
|
||||
* timer is already configured to interrupt on the following tick
|
||||
*/
|
||||
nano_isr_stack_push(&_k_command_stack, TICK_EVENT);
|
||||
#else
|
||||
_nano_ticks++;
|
||||
_sys_clock_tick_announce();
|
||||
|
||||
if (_nano_timer_list) {
|
||||
_nano_timer_list->ticks--;
|
||||
|
||||
while (_nano_timer_list && (!_nano_timer_list->ticks)) {
|
||||
struct nano_timer *expired = _nano_timer_list;
|
||||
struct nano_lifo *chan = &expired->lifo;
|
||||
_nano_timer_list = expired->link;
|
||||
nano_isr_lifo_put(chan, expired->userData);
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_MICROKERNEL */
|
||||
#endif /* CONFIG_ADVANCED_POWER_MANAGEMENT */
|
||||
|
||||
extern void _ExcExit(void);
|
||||
|
@ -587,7 +573,7 @@ void _timer_idle_exit(void)
|
|||
* so _sys_idle_elapsed_ticks is adjusted to account for it.
|
||||
*/
|
||||
_sys_idle_elapsed_ticks = idle_original_ticks - 1;
|
||||
nano_isr_stack_push(&_k_command_stack, TICK_EVENT);
|
||||
_sys_clock_tick_announce();
|
||||
} else {
|
||||
uint32_t elapsed; /* elapsed "counter time" */
|
||||
uint32_t remaining; /* remaining "counter time" */
|
||||
|
@ -618,8 +604,7 @@ void _timer_idle_exit(void)
|
|||
_sys_idle_elapsed_ticks = elapsed / default_load_value;
|
||||
|
||||
if (_sys_idle_elapsed_ticks) {
|
||||
/* Announce elapsed ticks to the microkernel */
|
||||
nano_isr_stack_push(&_k_command_stack, TICK_EVENT);
|
||||
_sys_clock_tick_announce();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -298,7 +298,7 @@ void _timer_int_handler(void *unused)
|
|||
* timer is already configured to interrupt on the following tick
|
||||
*/
|
||||
|
||||
nano_isr_stack_push(&_k_command_stack, TICK_EVENT);
|
||||
_sys_clock_tick_announce();
|
||||
|
||||
#else
|
||||
|
||||
|
@ -335,24 +335,13 @@ void _timer_int_handler(void *unused)
|
|||
*/
|
||||
|
||||
if (_sys_idle_elapsed_ticks == 1) {
|
||||
nano_isr_stack_push(&_k_command_stack, TICK_EVENT);
|
||||
_sys_clock_tick_announce();
|
||||
}
|
||||
|
||||
#endif /* !TIMER_SUPPORTS_TICKLESS */
|
||||
|
||||
#else
|
||||
_nano_ticks++; /* increment nanokernel ticks var */
|
||||
|
||||
if (_nano_timer_list) {
|
||||
_nano_timer_list->ticks--;
|
||||
|
||||
while (_nano_timer_list && (!_nano_timer_list->ticks)) {
|
||||
struct nano_timer *expired = _nano_timer_list;
|
||||
struct nano_lifo *chan = &expired->lifo;
|
||||
_nano_timer_list = expired->link;
|
||||
nano_isr_lifo_put(chan, expired->userData);
|
||||
}
|
||||
}
|
||||
_sys_clock_tick_announce();
|
||||
#endif /* CONFIG_MICROKERNEL */
|
||||
}
|
||||
|
||||
|
@ -440,7 +429,7 @@ void _timer_idle_exit(void)
|
|||
* is
|
||||
* serviced.
|
||||
*/
|
||||
nano_isr_stack_push(&_k_command_stack, TICK_EVENT);
|
||||
_sys_clock_tick_announce();
|
||||
|
||||
/* timer interrupt handler reprograms the timer for the next
|
||||
* tick */
|
||||
|
@ -491,7 +480,7 @@ void _timer_idle_exit(void)
|
|||
|
||||
if (_sys_idle_elapsed_ticks) {
|
||||
/* Announce elapsed ticks to the microkernel */
|
||||
nano_isr_stack_push(&_k_command_stack, TICK_EVENT);
|
||||
_sys_clock_tick_announce();
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -331,7 +331,7 @@ void _timer_int_handler(void *unused /* parameter is not used */
|
|||
*/
|
||||
|
||||
if (_sys_idle_elapsed_ticks == 1) {
|
||||
nano_isr_stack_push(&_k_command_stack, TICK_EVENT);
|
||||
__sys_clock_tick_announce();
|
||||
}
|
||||
|
||||
#else
|
||||
|
@ -339,26 +339,13 @@ void _timer_int_handler(void *unused /* parameter is not used */
|
|||
clock_accumulated_count += counterLoadVal;
|
||||
|
||||
#if defined(CONFIG_MICROKERNEL)
|
||||
/* announce tick into the microkernel */
|
||||
nano_isr_stack_push(&_k_command_stack, TICK_EVENT);
|
||||
__sys_clock_tick_announce();
|
||||
#endif
|
||||
|
||||
#endif /*TIMER_SUPPORTS_TICKLESS*/
|
||||
|
||||
#if defined(CONFIG_NANOKERNEL)
|
||||
|
||||
_nano_ticks++; /* increment nanokernel ticks var */
|
||||
|
||||
if (_nano_timer_list != NULL) {
|
||||
_nano_timer_list->ticks--;
|
||||
|
||||
while ((_nano_timer_list != NULL) && (!_nano_timer_list->ticks)) {
|
||||
struct nano_timer *expired = _nano_timer_list;
|
||||
struct nano_lifo *chan = &expired->lifo;
|
||||
_nano_timer_list = expired->link;
|
||||
nano_isr_lifo_put(chan, expired->userData);
|
||||
}
|
||||
}
|
||||
__sys_clock_tick_announce();
|
||||
#endif /* CONFIG_NANOKERNEL */
|
||||
|
||||
#ifdef LOAPIC_TIMER_PERIODIC_WORKAROUND
|
||||
|
@ -528,7 +515,7 @@ void _timer_idle_exit(void)
|
|||
* is
|
||||
* serviced.
|
||||
*/
|
||||
nano_isr_stack_push(&_k_command_stack, TICK_EVENT);
|
||||
__sys_clock_tick_announce();
|
||||
} else {
|
||||
uint32_t elapsed; /* elapsed "counter time" */
|
||||
uint32_t remaining; /* remaining "counter time" */
|
||||
|
@ -551,8 +538,7 @@ void _timer_idle_exit(void)
|
|||
_sys_idle_elapsed_ticks = elapsed / counterLoadVal;
|
||||
|
||||
if (_sys_idle_elapsed_ticks) {
|
||||
/* Announce elapsed ticks to the microkernel */
|
||||
nano_isr_stack_push(&_k_command_stack, TICK_EVENT);
|
||||
__sys_clock_tick_announce();
|
||||
}
|
||||
}
|
||||
_loApicTimerStart();
|
||||
|
|
|
@ -285,7 +285,7 @@ void _timer_int_handler(void *unusedArg /* not used */
|
|||
*/
|
||||
|
||||
if (_sys_idle_elapsed_ticks == 1) {
|
||||
nano_isr_stack_push(&_k_command_stack, TICK_EVENT);
|
||||
_sys_clock_tick_announce();
|
||||
}
|
||||
|
||||
/* accumulate total counter value */
|
||||
|
@ -293,8 +293,7 @@ void _timer_int_handler(void *unusedArg /* not used */
|
|||
|
||||
#else
|
||||
#if defined(CONFIG_MICROKERNEL)
|
||||
/* announce tick into the microkernel */
|
||||
nano_isr_stack_push(&_k_command_stack, TICK_EVENT);
|
||||
_sys_clock_tick_announce();
|
||||
#endif
|
||||
|
||||
/* accumulate total counter value */
|
||||
|
@ -316,18 +315,7 @@ void _timer_int_handler(void *unusedArg /* not used */
|
|||
}
|
||||
|
||||
#if defined(CONFIG_NANOKERNEL)
|
||||
_nano_ticks++; /* increment nanokernel ticks var */
|
||||
|
||||
if (_nano_timer_list) {
|
||||
_nano_timer_list->ticks--;
|
||||
|
||||
while (_nano_timer_list && (!_nano_timer_list->ticks)) {
|
||||
struct nano_timer *expired = _nano_timer_list;
|
||||
struct nano_lifo *chan = &expired->lifo;
|
||||
_nano_timer_list = expired->link;
|
||||
nano_isr_lifo_put(chan, expired->userData);
|
||||
}
|
||||
}
|
||||
_sys_clock_tick_announce();
|
||||
#endif /* CONFIG_NANOKERNEL */
|
||||
}
|
||||
|
||||
|
@ -464,7 +452,7 @@ void _timer_idle_exit(void)
|
|||
* is
|
||||
* serviced.
|
||||
*/
|
||||
nano_isr_stack_push(&_k_command_stack, TICK_EVENT);
|
||||
_sys_clock_tick_announce();
|
||||
} else {
|
||||
uint16_t elapsed; /* elapsed "counter time" */
|
||||
uint16_t remaining; /* remaing "counter time" */
|
||||
|
@ -487,7 +475,7 @@ void _timer_idle_exit(void)
|
|||
|
||||
if (_sys_idle_elapsed_ticks) {
|
||||
/* Announce elapsed ticks to the microkernel */
|
||||
nano_isr_stack_push(&_k_command_stack, TICK_EVENT);
|
||||
_sys_clock_tick_announce();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue