kernel: Checkpatch fixups

I was pretty careful, but these snuck in.  Most of them are due to
overbroad string replacements in comments.  The pull request is very
large, and I'm too lazy to find exactly where to back-merge all of
these.

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
Andy Ross 2018-09-29 07:34:55 -07:00 committed by Anas Nashif
parent af7bf89ed2
commit cfe62038d2
10 changed files with 43 additions and 31 deletions

View file

@ -216,7 +216,7 @@ void _timer_int_handler(void *unused)
z_clock_announce(_sys_idle_elapsed_ticks);
/* z_clock_announce(_sys_idle_elapsed_ticks) could cause new programming */
/* z_clock_announce() could cause new programming */
if (!programmed_ticks && _sys_clock_always_on) {
z_tick_set(z_clock_uptime());
program_max_cycles();

View file

@ -270,7 +270,7 @@ void _timer_int_handler(void *unused)
z_clock_announce(_sys_idle_elapsed_ticks);
/* z_clock_announce(_sys_idle_elapsed_ticks) could cause new programming */
/* z_clock_announce() could cause new programming */
if (!idle_original_ticks && _sys_clock_always_on) {
z_tick_set(z_clock_uptime());
/* clear overflow tracking flag as it is accounted */
@ -754,15 +754,16 @@ return (u32_t) get_elapsed_count();
do {
cac = clock_accumulated_count;
#ifdef CONFIG_TICKLESS_IDLE
/* When we leave a tickless period the reload value of the timer
* can be set to a remaining value to wait until end of tick.
* (see z_clock_idle_exit). The remaining value is always smaller
* than default_load_value. In this case the time elapsed until
* the timer restart was not yet added to
* clock_accumulated_count. To retrieve a correct cycle count
* we must therefore consider the number of cycle since current
* tick period start and not only the cycle number since
* the timer restart.
/* When we leave a tickless period the reload value of
* the timer can be set to a remaining value to wait
* until end of tick. (see z_clock_idle_exit). The
* remaining value is always smaller than
* default_load_value. In this case the time elapsed
* until the timer restart was not yet added to
* clock_accumulated_count. To retrieve a correct
* cycle count we must therefore consider the number
* of cycle since current tick period start and not
* only the cycle number since the timer restart.
*/
if (SysTick->LOAD < default_load_value) {
count = default_load_value;

View file

@ -301,7 +301,7 @@ void _timer_int_handler(void *unused)
programmed_ticks = 0;
z_clock_announce(_sys_idle_elapsed_ticks);
/* z_clock_announce(_sys_idle_elapsed_ticks) could cause new programming */
/* z_clock_announce() could cause new programming */
if (!programmed_ticks && _sys_clock_always_on) {
z_tick_set(z_clock_uptime());
program_max_cycles();

View file

@ -320,7 +320,7 @@ void _timer_int_handler(void *unused /* parameter is not used */
z_clock_announce(_sys_idle_elapsed_ticks);
/* z_clock_announce(_sys_idle_elapsed_ticks) could cause new programming */
/* z_clock_announce() could cause new programming */
if (!programmed_full_ticks && _sys_clock_always_on) {
z_tick_set(z_clock_uptime());
program_max_cycles();
@ -332,16 +332,20 @@ void _timer_int_handler(void *unused /* parameter is not used */
u32_t cycles;
/*
* The timer fired unexpectedly. This is due to one of two cases:
* The timer fired unexpectedly. This is due
* to one of two cases:
* 1. Entering tickless idle straddled a tick.
* 2. Leaving tickless idle straddled the final tick.
* Due to the timer reprogramming in z_clock_idle_exit(), case #2
* can be handled as a fall-through.
* Due to the timer reprogramming in
* z_clock_idle_exit(), case #2 can be handled
* as a fall-through.
*
* NOTE: Although the cycle count is supposed to stop decrementing
* once it hits zero in one-shot mode, not all targets implement
* this properly (and continue to decrement). Thus, we have to
* perform a second comparison to check for wrap-around.
* NOTE: Although the cycle count is supposed
* to stop decrementing once it hits zero in
* one-shot mode, not all targets implement
* this properly (and continue to decrement).
* Thus, we have to perform a second
* comparison to check for wrap-around.
*/
cycles = current_count_register_get();
@ -604,7 +608,7 @@ void z_clock_idle_exit(void)
*
* NOTE #1: In the case of a straddled tick, the '_sys_idle_elapsed_ticks'
* calculation below may result in either 0 or 1. If 1, then this may
* result in a harmless extra call to z_clock_announce(_sys_idle_elapsed_ticks).
* result in a harmless extra call to z_clock_announce().
*
* NOTE #2: In the case of a straddled tick, it is assumed that when the
* timer is reprogrammed, it will be reprogrammed with a cycle count

View file

@ -209,7 +209,8 @@ void _timer_idle_enter(s32_t sys_ticks)
/* If ticks is 0, the RTC interrupt handler will be set pending
* immediately, meaning that we will not go to sleep.
*/
rtc_compare_set(rtc_past + (sys_ticks * sys_clock_hw_cycles_per_tick()));
rtc_compare_set(rtc_past +
(sys_ticks * sys_clock_hw_cycles_per_tick()));
#endif
}
@ -435,7 +436,7 @@ void z_clock_idle_exit(void)
rtc_announce_set_next();
/* After exiting idle, the kernel no longer expects more than one sys
* ticks to have passed when z_clock_announce(_sys_idle_elapsed_ticks) is called.
* ticks to have passed when z_clock_announce() is called.
*/
expected_sys_ticks = 1;
#endif
@ -493,7 +494,7 @@ void rtc1_nrf5_isr(void *arg)
/* Anounce elapsed of _sys_idle_elapsed_ticks systicks*/
z_clock_announce(_sys_idle_elapsed_ticks);
/* z_clock_announce(_sys_idle_elapsed_ticks) could cause new programming */
/* z_clock_announce() could cause new programming */
if (!expected_sys_ticks && _sys_clock_always_on) {
program_max_cycles();
}
@ -530,7 +531,8 @@ int z_clock_driver_init(struct device *device)
/* TODO: replace with counter driver to access RTC */
SYS_CLOCK_RTC->PRESCALER = 0;
nrf_rtc_cc_set(SYS_CLOCK_RTC, RTC_CC_IDX, sys_clock_hw_cycles_per_tick());
nrf_rtc_cc_set(SYS_CLOCK_RTC, RTC_CC_IDX,
sys_clock_hw_cycles_per_tick());
nrf_rtc_event_enable(SYS_CLOCK_RTC, RTC_EVTENSET_COMPARE0_Msk);
nrf_rtc_int_enable(SYS_CLOCK_RTC, RTC_INTENSET_COMPARE0_Msk);

View file

@ -1218,6 +1218,10 @@ __syscall void k_thread_name_set(k_tid_t thread_id, const char *value);
*/
__syscall const char *k_thread_name_get(k_tid_t thread_id);
/**
* @}
*/
/**
* @addtogroup clock_apis
* @{

View file

@ -89,9 +89,9 @@ static inline int sys_clock_hw_cycles_per_tick(void)
*/
#if !defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME)
#if (CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC % CONFIG_SYS_CLOCK_TICKS_PER_SEC) != 0
#if (CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC % CONFIG_SYS_CLOCK_TICKS_PER_SEC) != 0
#define _NEED_PRECISE_TICK_MS_CONVERSION
#elif (MSEC_PER_SEC % CONFIG_SYS_CLOCK_TICKS_PER_SEC) != 0
#elif (MSEC_PER_SEC % CONFIG_SYS_CLOCK_TICKS_PER_SEC) != 0
#define _NON_OPTIMIZED_TICKS_PER_SEC
#endif
#endif

View file

@ -53,8 +53,8 @@ s32_t z_timeout_remaining(struct _timeout *timeout);
#else
/* Stubs when !CONFIG_SYS_CLOCK_EXISTS */
#define _init_thread_timeout(t) do{}while(0)
#define _add_thread_timeout(th,to) do{}while(0 && (void*)to && (void*)th)
#define _init_thread_timeout(t) do {} while (0)
#define _add_thread_timeout(th, to) do {} while (0 && (void *)to && (void *)th)
#define _abort_thread_timeout(t) (0)
#define _get_next_timeout_expiry() (K_FOREVER)

View file

@ -362,7 +362,7 @@ void thread_producer_get_msgq_w_cxt_switch(void *p1, void *p2, void *p3)
void thread_consumer_get_msgq_w_cxt_switch(void *p1, void *p2, void *p3)
{
producer_get_w_cxt_switch_tid->base.timeout.dticks =_EXPIRED;
producer_get_w_cxt_switch_tid->base.timeout.dticks = _EXPIRED;
__read_swap_end_time_value = 1;
TIMING_INFO_PRE_READ();
__msg_q_get_w_cxt_start_time = TIMING_INFO_OS_GET_TIME();

View file

@ -124,7 +124,8 @@ void test_clock_cycle(void)
if (c1 > c0) {
/* delta cycle should be greater than 1 milli-second*/
zassert_true((c1 - c0) >
(sys_clock_hw_cycles_per_sec() / MSEC_PER_SEC), NULL);
(sys_clock_hw_cycles_per_sec() / MSEC_PER_SEC),
NULL);
/* delta NS should be greater than 1 milli-second */
zassert_true(SYS_CLOCK_HW_CYCLES_TO_NS(c1 - c0) >
(NSEC_PER_SEC / MSEC_PER_SEC), NULL);