checkpatch: warning - line_spacing

Change-Id: I2276676142deea21cf8079449ce153f2fb887a8e
Signed-off-by: Dan Kalowsky <daniel.kalowsky@intel.com>
Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
Dan Kalowsky 2015-10-20 09:42:34 -07:00 committed by Anas Nashif
parent e45956c9a0
commit 890cc2f1ef
16 changed files with 54 additions and 26 deletions

View file

@ -767,9 +767,9 @@ static inline void nanoArchInit(void)
{
extern void *__isr___SpuriousIntHandler;
extern void *_dummy_spurious_interrupt;
extern void _ExcEnt(void);
extern void *_dummy_exception_vector_stub;
extern char _interrupt_stack[CONFIG_ISR_STACK_SIZE];
extern void _ExcEnt(void);
_nanokernel.nested = 0;

View file

@ -290,11 +290,11 @@ static int _i2c_dw_setup(struct device *dev)
{
struct i2c_dw_dev_config * const dw = dev->driver_data;
struct i2c_dw_rom_config const * const rom = dev->config->config_info;
volatile struct i2c_dw_registers * const regs =
(struct i2c_dw_registers *)rom->base_address;
uint32_t value = 0;
union ic_con_register ic_con;
int rc = DEV_OK;
volatile struct i2c_dw_registers * const regs =
(struct i2c_dw_registers *)rom->base_address;
ic_con.raw = 0;

View file

@ -462,6 +462,7 @@ IRQ_CONNECT_STATIC(spi_intel_irq_port_0, CONFIG_SPI_INTEL_PORT_0_IRQ,
void spi_config_0_irq(struct device *dev)
{
struct spi_intel_config *config = dev->config->config_info;
IRQ_CONFIG(spi_intel_irq_port_0, config->irq, 0);
}
@ -505,6 +506,7 @@ IRQ_CONNECT_STATIC(spi_intel_irq_port_1, CONFIG_SPI_INTEL_PORT_1_IRQ,
void spi_config_1_irq(struct device *dev)
{
struct spi_intel_config *config = dev->config->config_info;
IRQ_CONFIG(spi_intel_irq_port_1, config->irq, 0);
}

View file

@ -391,6 +391,7 @@ static void sysTickTicklessIdleInit(void)
/* enable counter, disable interrupt and set clock src to system clock
*/
union __stcsr stcsr = {.bit = {1, 0, 1, 0, 0, 0} };
volatile uint32_t dummy; /* used to help determine the 'skew time' */
/* store the default reload value (which has already been set) */

View file

@ -283,13 +283,11 @@ union pci_dev {
/* offset 04: */
#ifdef _BIG_ENDIAN
uint32_t status : 16; /* device status */
uint32_t command
: 16; /* device command register */
uint32_t status : 16; /* device status */
uint32_t command : 16; /* device command register */
#else
uint32_t command
: 16; /* device command register */
uint32_t status : 16; /* device status */
uint32_t command : 16; /* device command register */
uint32_t status : 16; /* device status */
#endif
/* offset 08: */

View file

@ -103,6 +103,7 @@ static inline int spi_configure(struct device *dev,
struct spi_config *config, void *user_data)
{
struct spi_driver_api *api = (struct spi_driver_api *)dev->driver_api;
return api->configure(dev, config, user_data);
}
@ -141,6 +142,7 @@ inline int spi_slave_select(struct device *dev, uint32_t slave)
static inline int spi_read(struct device *dev, uint8_t *buf, uint32_t len)
{
struct spi_driver_api *api = (struct spi_driver_api *)dev->driver_api;
return api->transceive(dev, NULL, 0, buf, len);
}
@ -155,6 +157,7 @@ static inline int spi_read(struct device *dev, uint8_t *buf, uint32_t len)
static inline int spi_write(struct device *dev, uint8_t *buf, uint32_t len)
{
struct spi_driver_api *api = (struct spi_driver_api *)dev->driver_api;
return api->transceive(dev, buf, len, NULL, 0);
}
@ -176,6 +179,7 @@ static inline int spi_transceive(struct device *dev,
uint8_t *rx_buf, uint32_t rx_buf_len)
{
struct spi_driver_api *api = (struct spi_driver_api *)dev->driver_api;
return api->transceive(dev, tx_buf, tx_buf_len, rx_buf, rx_buf_len);
}
@ -188,6 +192,7 @@ static inline int spi_transceive(struct device *dev,
static inline int spi_suspend(struct device *dev)
{
struct spi_driver_api *api = (struct spi_driver_api *)dev->driver_api;
return api->suspend(dev);
}
@ -200,6 +205,7 @@ static inline int spi_suspend(struct device *dev)
static inline int spi_resume(struct device *dev)
{
struct spi_driver_api *api = (struct spi_driver_api *)dev->driver_api;
return api->resume(dev);
}

View file

@ -185,6 +185,7 @@ void _k_do_event_signal(kevent_t event)
void _k_event_signal(struct k_args *A)
{
kevent_t event = A->args.e1.event;
_k_do_event_signal(event);
A->Time.rcode = RC_OK;
}

View file

@ -72,6 +72,7 @@ static void workload_loop(void)
while (++_k_workload_i != _k_workload_n1) {
unsigned int s_iCountDummyProc = 0;
while (64 != s_iCountDummyProc++) { /* 64 == 2^6 */
x >>= y;
x <<= y;
@ -413,6 +414,7 @@ int _k_kernel_idle(void)
/* record timestamp when idling begins */
extern uint64_t __idle_tsc;
__idle_tsc = _NanoTscRead();
#endif

View file

@ -139,8 +139,8 @@ static bool prepare_transfer(struct k_args *move,
if (move) {
/* { move != NULL, which means full data exchange } */
bool all_data_present = true;
move->Comm = _K_SVC_MOVEDATA_REQ;
/*
* transfer the data with the highest
@ -872,6 +872,7 @@ int _task_mbox_data_block_get(struct k_msg *message,
*/
struct k_args A;
A.args.m1.mess = *message;
A.Comm = _K_SVC_MBOX_RECEIVE_DATA;
KERNEL_ENTRY(&A);

View file

@ -205,6 +205,7 @@ void _k_pipe_get_reply(struct k_args *ReqProc)
struct k_args *ReqOrig = ReqProc->Ctxt.args;
PIPE_REQUEST_STATUS status;
ReqOrig->Comm = _K_SVC_PIPE_GET_ACK;
/* determine return value */

View file

@ -56,6 +56,7 @@ int CalcFreeReaderSpace(struct k_args *pReaderList)
if (pReaderList) {
struct k_args *reader_ptr = pReaderList;
while (reader_ptr != NULL) {
size += (reader_ptr->args.pipe_xfer_req.total_size -
reader_ptr->args.pipe_xfer_req.xferred_size);
@ -71,6 +72,7 @@ int CalcAvailWriterData(struct k_args *pWriterList)
if (pWriterList) {
struct k_args *writer_ptr = pWriterList;
while (writer_ptr != NULL) {
size += (writer_ptr->args.pipe_xfer_req.total_size -
writer_ptr->args.pipe_xfer_req.xferred_size);

View file

@ -99,6 +99,7 @@ void _k_task_monitor_read(struct k_args *A)
A->args.z4.nrec = k_monitor_nrec;
if (A->args.z4.rind < k_monitor_nrec) {
int i = K_monitor_wind - k_monitor_nrec + A->args.z4.rind;
if (i < 0) {
i += k_monitor_capacity;
}

View file

@ -36,6 +36,7 @@ void _sys_device_do_config_level(int level)
for (info = config_levels[level]; info < config_levels[level+1]; info++) {
struct device_config *device = info->config;
device->init(info);
}
}

View file

@ -115,6 +115,7 @@ void _fifo_put_non_preemptible(struct nano_fifo *fifo, void *data)
fifo->stat++;
if (fifo->stat <= 0) {
struct tcs *tcs = _nano_wait_q_remove_no_check(&fifo->wait_q);
_nano_timeout_abort(tcs);
fiberRtnValueSet(tcs, (unsigned int)data);
} else {
@ -133,6 +134,7 @@ void nano_task_fifo_put(struct nano_fifo *fifo, void *data)
fifo->stat++;
if (fifo->stat <= 0) {
struct tcs *tcs = _nano_wait_q_remove_no_check(&fifo->wait_q);
_nano_timeout_abort(tcs);
fiberRtnValueSet(tcs, (unsigned int)data);
_Swap(imask);

View file

@ -71,6 +71,7 @@ int64_t nano_tick_get(void)
* _nano_ticks
*/
unsigned int imask = irq_lock();
tmp_nano_ticks = _nano_ticks;
irq_unlock(imask);
return tmp_nano_ticks;
@ -116,6 +117,7 @@ static ALWAYS_INLINE int64_t _nano_tick_delta(int64_t *reftime)
* _nano_ticks
*/
unsigned int imask = irq_lock();
saved = _nano_ticks;
irq_unlock(imask);
delta = saved - (*reftime);
@ -171,6 +173,7 @@ static inline void handle_expired_nano_timers(int ticks)
while (_nano_timer_list && (!_nano_timer_list->ticks)) {
struct nano_timer *expired = _nano_timer_list;
struct nano_lifo *lifo = &expired->lifo;
_nano_timer_list = expired->link;
nano_isr_lifo_put(lifo, expired->userData);
}

View file

@ -72,8 +72,11 @@ void _sys_profiler_context_switch(void)
{
extern tNANO _nanokernel;
uint32_t data[2];
extern void _sys_event_logger_put_non_preemptible(
struct event_logger *logger, uint16_t event_id, uint32_t *event_data,
struct event_logger *logger,
uint16_t event_id,
uint32_t *event_data,
uint8_t data_size);
/* if the profiler has not been initialized, we do nothing */
@ -86,21 +89,25 @@ void _sys_profiler_context_switch(void)
data[1] = (uint32_t)_nanokernel.current;
/*
* The mechanism we use to log the profile events uses a sync semaphore
* to inform that there are available events to be collected. The
* context switch event can be triggered from a task. When we
* signal a semaphore from a task and a fiber is waiting for
* that semaphore, a context switch is generated immediately. Due to
* the fact that we register the context switch event while the context
* switch is being processed, a new context switch can be generated
* before the kernel finishes processing the current context switch. We
* need to prevent this because the kernel is not able to handle it.
* The _sem_give_non_preemptible function does not trigger a context
* switch when we signal the semaphore from any type of thread. Using
* _sys_event_logger_put_non_preemptible function, that internally uses
* _sem_give_non_preemptible function for signaling the sync semaphore,
* allow us registering the context switch event without triggering any
* new context switch during the process.
* The mechanism we use to log the profile events uses a sync
* semaphore to inform that there are available events to be
* collected. The context switch event can be triggered from a
* task. When we signal a semaphore from a task and a fiber is
* waiting for that semaphore, a context switch is generated
* immediately. Due to the fact that we register the context
* switch event while the context switch is being processed, a
* new context switch can be generated before the kernel
* finishes processing the current context switch. We
* need to prevent this because the kernel is not able to
* handle it.
*
* The _sem_give_non_preemptible function does not trigger a
* context switch when we signal the semaphore from any type of
* thread. Using _sys_event_logger_put_non_preemptible function,
* that internally uses _sem_give_non_preemptible function for
* signaling the sync semaphore, allow us registering the
* context switch event without triggering any new context
* switch during the process.
*/
_sys_event_logger_put_non_preemptible(&sys_profiler_logger,
PROFILER_CONTEXT_SWITCH_EVENT_ID, data, ARRAY_SIZE(data));