Rename _IntLatencyStart to _int_latency_start
Updating nano kernel functions to follow a consistent naming convention. Part of that process is the removal of camelCase naming conventions for the preferred_underscore_method. Change accomplished with the following script: #!/bin/bash echo "Searching for ${1} to replace with ${2}" find . -type f \( -iname \*.c -o -iname \*.h -o -iname \*.s \) \ -not \( -path host/src/genIdt -prune \) \ \ -not \( -path host/src/gen_tables -prune \) \ -print | xargs sed -i "s/"${1}"/"${2}"/g" Signed-off-by: Dan Kalowsky <daniel.kalowsky@intel.com>
This commit is contained in:
parent
fe95d5c018
commit
5b09162bb1
|
@ -145,7 +145,7 @@ SECTION_FUNC(TEXT, nano_cpu_atomic_idle)
|
|||
testl $0x200, SP_ARG1(%esp)
|
||||
jnz skipIntDisable
|
||||
#ifdef CONFIG_INT_LATENCY_BENCHMARK
|
||||
call _IntLatencyStart
|
||||
call _int_latency_start
|
||||
#endif
|
||||
cli
|
||||
BRANCH_LABEL(skipIntDisable)
|
||||
|
|
|
@ -71,7 +71,7 @@ entering and exiting a C interrupt handler.
|
|||
|
||||
|
||||
#ifdef CONFIG_INT_LATENCY_BENCHMARK
|
||||
GTEXT(_IntLatencyStart)
|
||||
GTEXT(_int_latency_start)
|
||||
GTEXT(_IntLatencyStop)
|
||||
#endif
|
||||
/*******************************************************************************
|
||||
|
@ -171,7 +171,7 @@ SECTION_FUNC(TEXT, _IntEnt)
|
|||
*/
|
||||
|
||||
pushl %eax
|
||||
call _IntLatencyStart
|
||||
call _int_latency_start
|
||||
popl %eax
|
||||
#endif
|
||||
|
||||
|
@ -268,7 +268,7 @@ SECTION_FUNC(TEXT, _IntExit)
|
|||
|
||||
cli /* disable interrupts */
|
||||
#ifdef CONFIG_INT_LATENCY_BENCHMARK
|
||||
call _IntLatencyStart
|
||||
call _int_latency_start
|
||||
#endif
|
||||
|
||||
/* determine whether exiting from a nested interrupt */
|
||||
|
@ -495,7 +495,7 @@ SECTION_FUNC(TEXT, irq_lock)
|
|||
pushfl
|
||||
cli
|
||||
#ifdef CONFIG_INT_LATENCY_BENCHMARK
|
||||
call _IntLatencyStart
|
||||
call _int_latency_start
|
||||
#endif
|
||||
popl %eax
|
||||
ret
|
||||
|
|
|
@ -259,7 +259,7 @@ static inline void irq_unlock(unsigned int key) {}
|
|||
#else /* CONFIG_NO_ISRS */
|
||||
|
||||
#ifdef CONFIG_INT_LATENCY_BENCHMARK
|
||||
void _IntLatencyStart (void);
|
||||
void _int_latency_start (void);
|
||||
void _IntLatencyStop (void);
|
||||
#endif
|
||||
|
||||
|
@ -313,7 +313,7 @@ static inline __attribute__((always_inline))
|
|||
);
|
||||
|
||||
#ifdef CONFIG_INT_LATENCY_BENCHMARK
|
||||
_IntLatencyStart ();
|
||||
_int_latency_start ();
|
||||
#endif
|
||||
|
||||
return key;
|
||||
|
@ -330,7 +330,7 @@ __asm volatile unsigned int irq_lock_inline (void)
|
|||
pushfl
|
||||
cli
|
||||
#ifdef CONFIG_INT_LATENCY_BENCHMARK
|
||||
call _IntLatencyStart
|
||||
call _int_latency_start
|
||||
#endif
|
||||
popl %eax
|
||||
}
|
||||
|
|
|
@ -86,7 +86,7 @@ uint32_t _HwIntToCHandlerLatency = ULONG_MAX;
|
|||
*
|
||||
*/
|
||||
|
||||
void _IntLatencyStart(void)
|
||||
void _int_latency_start(void)
|
||||
{
|
||||
/* when interrupts are not already locked, take time stamp */
|
||||
if (!intLockedTimestamp && intLatencyBenchRdy) {
|
||||
|
@ -180,12 +180,12 @@ void intLatencyInit(void)
|
|||
/* measure time to call intLatencyStart() and intLatencyStop
|
||||
* takes */
|
||||
initialStartDelay = timer_read();
|
||||
_IntLatencyStart();
|
||||
_int_latency_start();
|
||||
initialStartDelay =
|
||||
timer_read() - initialStartDelay - timeToReadTime;
|
||||
|
||||
nestingDelay = timer_read();
|
||||
_IntLatencyStart();
|
||||
_int_latency_start();
|
||||
nestingDelay = timer_read() - nestingDelay - timeToReadTime;
|
||||
|
||||
stopDelay = timer_read();
|
||||
|
|
Loading…
Reference in a new issue