all: Add 'U' suffix when using unsigned variables
Add a 'U' suffix to values when computing and comparing against unsigned variables. Signed-off-by: Patrik Flykt <patrik.flykt@intel.com>
This commit is contained in:
parent
caebf204c6
commit
24d71431e9
|
@ -91,7 +91,7 @@ static void dcache_flush_mlines(u32_t start_addr, u32_t size)
|
|||
u32_t end_addr;
|
||||
unsigned int key;
|
||||
|
||||
if (!dcache_available() || (size == 0)) {
|
||||
if (!dcache_available() || (size == 0U)) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -150,9 +150,9 @@ static void init_dcache_line_size(void)
|
|||
u32_t val;
|
||||
|
||||
val = z_arc_v2_aux_reg_read(_ARC_V2_D_CACHE_BUILD);
|
||||
__ASSERT((val&0xff) != 0, "d-cache is not present");
|
||||
__ASSERT((val&0xff) != 0U, "d-cache is not present");
|
||||
val = ((val>>16) & 0xf) + 1;
|
||||
val *= 16;
|
||||
val *= 16U;
|
||||
sys_cache_line_size = (size_t) val;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -77,7 +77,7 @@ void _Fault(NANO_ESF *esf)
|
|||
* stack check and mpu violation can come out together, then
|
||||
* parameter = 0x2 | [0x4 | 0x8 | 0x1]
|
||||
*/
|
||||
if (vector == 6 && parameter & 0x2) {
|
||||
if (vector == 6U && parameter & 0x2) {
|
||||
z_NanoFatalErrorHandler(_NANO_ERR_STACK_CHK_FAIL, esf);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -159,7 +159,7 @@ static inline int _mpu_configure(u8_t type, u32_t base, u32_t size)
|
|||
|
||||
LOG_DBG("Region info: 0x%x 0x%x", base, size);
|
||||
|
||||
if (region_attr == 0 || region_index < 0) {
|
||||
if (region_attr == 0U || region_index < 0) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
|
@ -320,7 +320,7 @@ static void _mpu_reset_dynamic_regions(void)
|
|||
_region_init(i, 0, 0, 0);
|
||||
}
|
||||
|
||||
for (i = 0; i < dynamic_regions_num; i++) {
|
||||
for (i = 0U; i < dynamic_regions_num; i++) {
|
||||
_region_init(
|
||||
dyn_reg_info[i].index,
|
||||
dyn_reg_info[i].base,
|
||||
|
@ -389,7 +389,7 @@ void arc_core_mpu_configure_thread(struct k_thread *thread)
|
|||
_mpu_reset_dynamic_regions();
|
||||
#if defined(CONFIG_MPU_STACK_GUARD)
|
||||
#if defined(CONFIG_USERSPACE)
|
||||
if ((thread->base.user_options & K_USER) != 0) {
|
||||
if ((thread->base.user_options & K_USER) != 0U) {
|
||||
/* the areas before and after the user stack of thread is
|
||||
* kernel only. These area can be used as stack guard.
|
||||
* -----------------------
|
||||
|
@ -448,7 +448,7 @@ void arc_core_mpu_configure_thread(struct k_thread *thread)
|
|||
num_partitions = mem_domain->num_partitions;
|
||||
pparts = mem_domain->partitions;
|
||||
} else {
|
||||
num_partitions = 0;
|
||||
num_partitions = 0U;
|
||||
pparts = NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@ static ALWAYS_INLINE void _icache_setup(void)
|
|||
|
||||
val = z_arc_v2_aux_reg_read(_ARC_V2_I_CACHE_BUILD);
|
||||
val &= 0xff;
|
||||
if (val != 0) { /* is i-cache present? */
|
||||
if (val != 0U) { /* is i-cache present? */
|
||||
/* configure i-cache */
|
||||
z_arc_v2_aux_reg_write(_ARC_V2_IC_CTRL, icache_config);
|
||||
}
|
||||
|
|
|
@ -52,11 +52,11 @@ static int _mpu_partition_is_valid(const struct k_mem_partition *part)
|
|||
* partition must align with size.
|
||||
*/
|
||||
int partition_is_valid =
|
||||
((part->size & (part->size - 1)) == 0)
|
||||
((part->size & (part->size - 1)) == 0U)
|
||||
&&
|
||||
(part->size >= CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE)
|
||||
&&
|
||||
((part->start & (part->size - 1)) == 0);
|
||||
((part->start & (part->size - 1)) == 0U);
|
||||
|
||||
return partition_is_valid;
|
||||
}
|
||||
|
@ -72,7 +72,7 @@ static int _mpu_partition_is_valid(const struct k_mem_partition *part)
|
|||
static inline u32_t _size_to_mpu_rasr_size(u32_t size)
|
||||
{
|
||||
/* The minimal supported region size is 32 bytes */
|
||||
if (size <= 32) {
|
||||
if (size <= 32U) {
|
||||
return REGION_32B;
|
||||
}
|
||||
|
||||
|
@ -264,7 +264,7 @@ static int _mpu_configure_regions(const struct k_mem_partition
|
|||
int reg_index = start_reg_index;
|
||||
|
||||
for (i = 0; i < regions_num; i++) {
|
||||
if (regions[i]->size == 0) {
|
||||
if (regions[i]->size == 0U) {
|
||||
continue;
|
||||
}
|
||||
/* Non-empty region. */
|
||||
|
|
|
@ -101,7 +101,7 @@ static int _mpu_partition_is_valid(const struct k_mem_partition *part)
|
|||
== part->size)
|
||||
&&
|
||||
((part->start &
|
||||
(CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE - 1)) == 0);
|
||||
(CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE - 1)) == 0U);
|
||||
|
||||
return partition_is_valid;
|
||||
}
|
||||
|
@ -330,7 +330,7 @@ static int _mpu_configure_regions(const struct k_mem_partition
|
|||
int reg_index = start_reg_index;
|
||||
|
||||
for (i = 0; i < regions_num; i++) {
|
||||
if (regions[i]->size == 0) {
|
||||
if (regions[i]->size == 0U) {
|
||||
continue;
|
||||
}
|
||||
/* Non-empty region. */
|
||||
|
@ -493,7 +493,7 @@ static int _mpu_mark_areas_for_dynamic_regions(
|
|||
* which dynamic memory regions may be programmed at run-time.
|
||||
*/
|
||||
for (int i = 0; i < dyn_region_areas_num; i++) {
|
||||
if (dyn_region_areas[i].size == 0) {
|
||||
if (dyn_region_areas[i].size == 0U) {
|
||||
continue;
|
||||
}
|
||||
/* Non-empty area */
|
||||
|
|
|
@ -54,14 +54,14 @@ static int _mpu_partition_is_valid(const struct k_mem_partition *part)
|
|||
* minimum MPU region size.
|
||||
*/
|
||||
int partition_is_valid =
|
||||
(part->size != 0)
|
||||
(part->size != 0U)
|
||||
&&
|
||||
((part->size &
|
||||
(~(CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE - 1)))
|
||||
== part->size)
|
||||
&&
|
||||
((part->start &
|
||||
(CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE - 1)) == 0);
|
||||
(CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE - 1)) == 0U);
|
||||
|
||||
return partition_is_valid;
|
||||
}
|
||||
|
@ -78,7 +78,7 @@ static void _region_init(const u32_t index,
|
|||
u32_t region_end = region_conf->end;
|
||||
u32_t region_attr = region_conf->attr.attr;
|
||||
|
||||
if (index == 0) {
|
||||
if (index == 0U) {
|
||||
/* The MPU does not allow writes from the core to affect the
|
||||
* RGD0 start or end addresses nor the permissions associated
|
||||
* with the debugger; it can only write the permission fields
|
||||
|
@ -241,7 +241,7 @@ static int _mpu_configure_regions(const struct k_mem_partition
|
|||
int reg_index = start_reg_index;
|
||||
|
||||
for (i = 0; i < regions_num; i++) {
|
||||
if (regions[i]->size == 0) {
|
||||
if (regions[i]->size == 0U) {
|
||||
continue;
|
||||
}
|
||||
/* Non-empty region. */
|
||||
|
|
|
@ -389,7 +389,7 @@ static int _BusFault(NANO_ESF *esf, int fromHardFault)
|
|||
|
||||
if (sperr) {
|
||||
for (i = 0; i < SYSMPU_EAR_COUNT; i++, mask >>= 1) {
|
||||
if ((sperr & mask) == 0) {
|
||||
if ((sperr & mask) == 0U) {
|
||||
continue;
|
||||
}
|
||||
STORE_xFAR(edr, SYSMPU->SP[i].EDR);
|
||||
|
@ -796,7 +796,7 @@ void _Fault(NANO_ESF *esf, u32_t exc_return)
|
|||
/* Invalid EXC_RETURN value */
|
||||
goto _exit_fatal;
|
||||
}
|
||||
if ((exc_return & EXC_RETURN_EXCEPTION_SECURE_Secure) == 0) {
|
||||
if ((exc_return & EXC_RETURN_EXCEPTION_SECURE_Secure) == 0U) {
|
||||
/* Secure Firmware shall only handle Secure Exceptions.
|
||||
* This is a fatal error.
|
||||
*/
|
||||
|
|
|
@ -29,7 +29,7 @@ void irq_offload(irq_offload_routine_t routine, void *parameter)
|
|||
unsigned int key;
|
||||
|
||||
__asm__ volatile("mrs %0, PRIMASK;" : "=r" (key) : : "memory");
|
||||
__ASSERT(key == 0, "irq_offload called with interrupts locked\n");
|
||||
__ASSERT(key == 0U, "irq_offload called with interrupts locked\n");
|
||||
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE && CONFIG_ASSERT */
|
||||
|
||||
k_sched_lock();
|
||||
|
|
|
@ -58,7 +58,7 @@ void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|||
char *pStackMem = K_THREAD_STACK_BUFFER(stack);
|
||||
char *stackEnd;
|
||||
/* Offset between the top of stack and the high end of stack area. */
|
||||
u32_t top_of_stack_offset = 0;
|
||||
u32_t top_of_stack_offset = 0U;
|
||||
|
||||
Z_ASSERT_VALID_PRIO(priority, pEntry);
|
||||
|
||||
|
|
|
@ -63,7 +63,7 @@ static ALWAYS_INLINE bool _IsInIsr(void)
|
|||
/* On ARMv6-M there is no nested execution bit, so we check
|
||||
* exception 3, hard fault, to a detect a nested exception.
|
||||
*/
|
||||
|| (vector == 3)
|
||||
|| (vector == 3U)
|
||||
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
|
||||
/* If not in thread mode, and if RETTOBASE bit in ICSR is 0,
|
||||
* then there are preempted active exceptions to execute.
|
||||
|
|
|
@ -80,7 +80,7 @@ u64_t __common_var_swap_end_time;
|
|||
|
||||
void read_timer_start_of_swap(void)
|
||||
{
|
||||
if (__read_swap_end_time_value == 1) {
|
||||
if (__read_swap_end_time_value == 1U) {
|
||||
TIMING_INFO_PRE_READ();
|
||||
__start_swap_time = (u32_t) TIMING_INFO_OS_GET_TIME();
|
||||
}
|
||||
|
@ -88,7 +88,7 @@ void read_timer_start_of_swap(void)
|
|||
|
||||
void read_timer_end_of_swap(void)
|
||||
{
|
||||
if (__read_swap_end_time_value == 1) {
|
||||
if (__read_swap_end_time_value == 1U) {
|
||||
TIMING_INFO_PRE_READ();
|
||||
__read_swap_end_time_value = 2U;
|
||||
__common_var_swap_end_time = (u64_t)TIMING_INFO_OS_GET_TIME();
|
||||
|
|
|
@ -177,7 +177,7 @@ FUNC_NORETURN void _Fault(const NANO_ESF *esf)
|
|||
exc_reg = _nios2_creg_read(NIOS2_CR_EXCEPTION);
|
||||
|
||||
/* Bit 31 indicates potentially fatal ECC error */
|
||||
eccftl = (exc_reg & NIOS2_EXCEPTION_REG_ECCFTL_MASK) != 0;
|
||||
eccftl = (exc_reg & NIOS2_EXCEPTION_REG_ECCFTL_MASK) != 0U;
|
||||
|
||||
/* Bits 2-6 contain the cause code */
|
||||
cause = (exc_reg & NIOS2_EXCEPTION_REG_CAUSE_MASK)
|
||||
|
|
|
@ -95,7 +95,7 @@ void k_cpu_atomic_idle(unsigned int key)
|
|||
"hlt\n\t");
|
||||
|
||||
/* restore interrupt lockout state before returning to caller */
|
||||
if ((key & 0x200U) == 0) {
|
||||
if ((key & 0x200U) == 0U) {
|
||||
z_int_latency_start();
|
||||
__asm__ volatile("cli");
|
||||
}
|
||||
|
|
|
@ -47,7 +47,7 @@ static bool check_stack_bounds(u32_t addr, size_t size, u16_t cs)
|
|||
/* We were servicing an interrupt */
|
||||
start = (u32_t)Z_ARCH_THREAD_STACK_BUFFER(_interrupt_stack);
|
||||
end = start + CONFIG_ISR_STACK_SIZE;
|
||||
} else if ((cs & 0x3U) != 0 ||
|
||||
} else if ((cs & 0x3U) != 0U ||
|
||||
(_current->base.user_options & K_USER) == 0) {
|
||||
/* Thread was in user mode, or is not a user mode thread.
|
||||
* The normal stack buffer is what we will check.
|
||||
|
@ -79,13 +79,13 @@ static void unwind_stack(u32_t base_ptr, u16_t cs)
|
|||
struct stack_frame *frame;
|
||||
int i;
|
||||
|
||||
if (base_ptr == 0) {
|
||||
if (base_ptr == 0U) {
|
||||
printk("NULL base ptr\n");
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < MAX_STACK_FRAMES; i++) {
|
||||
if (base_ptr % sizeof(base_ptr) != 0) {
|
||||
if (base_ptr % sizeof(base_ptr) != 0U) {
|
||||
printk("unaligned frame ptr\n");
|
||||
return;
|
||||
}
|
||||
|
@ -105,7 +105,7 @@ static void unwind_stack(u32_t base_ptr, u16_t cs)
|
|||
}
|
||||
#endif
|
||||
|
||||
if (frame->ret_addr == 0) {
|
||||
if (frame->ret_addr == 0U) {
|
||||
break;
|
||||
}
|
||||
#ifdef CONFIG_X86_IAMCU
|
||||
|
@ -372,8 +372,8 @@ static void dump_page_fault(NANO_ESF *esf)
|
|||
printk("***** CPU Page Fault (error code 0x%08x)\n", err);
|
||||
|
||||
printk("%s thread %s address 0x%08x\n",
|
||||
(err & US) != 0 ? "User" : "Supervisor",
|
||||
(err & ID) != 0 ? "executed" : ((err & WR) != 0 ? "wrote" :
|
||||
(err & US) != 0U ? "User" : "Supervisor",
|
||||
(err & ID) != 0U ? "executed" : ((err & WR) != 0U ? "wrote" :
|
||||
"read"), cr2);
|
||||
|
||||
#ifdef CONFIG_X86_MMU
|
||||
|
@ -507,7 +507,7 @@ static FUNC_NORETURN __used void _df_handler_top(void)
|
|||
_main_tss.ss = DATA_SEG;
|
||||
_main_tss.eip = (u32_t)_df_handler_bottom;
|
||||
_main_tss.cr3 = (u32_t)&z_x86_kernel_pdpt;
|
||||
_main_tss.eflags = 0;
|
||||
_main_tss.eflags = 0U;
|
||||
|
||||
/* NT bit is set in EFLAGS so we will task switch back to _main_tss
|
||||
* and run _df_handler_bottom
|
||||
|
|
|
@ -198,7 +198,7 @@ static unsigned int priority_to_free_vector(unsigned int requested_priority)
|
|||
z_interrupt_vectors_allocated[entry];
|
||||
fsb = find_lsb_set(search_set);
|
||||
|
||||
__ASSERT(fsb != 0, "No remaning vectors for priority level %d",
|
||||
__ASSERT(fsb != 0U, "No remaning vectors for priority level %d",
|
||||
requested_priority);
|
||||
|
||||
/*
|
||||
|
@ -314,7 +314,7 @@ int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
|
|||
#else
|
||||
vector = priority_to_free_vector(priority);
|
||||
/* 0 indicates not used, vectors for interrupts start at 32 */
|
||||
__ASSERT(_irq_to_interrupt_vector[irq] == 0,
|
||||
__ASSERT(_irq_to_interrupt_vector[irq] == 0U,
|
||||
"IRQ %d already configured", irq);
|
||||
_irq_to_interrupt_vector[irq] = vector;
|
||||
#endif
|
||||
|
|
|
@ -48,16 +48,16 @@ static int spec_ctrl_init(struct device *dev)
|
|||
u32_t cpuid7 = cpuid_extended_features();
|
||||
|
||||
#ifdef CONFIG_DISABLE_SSBD
|
||||
if ((cpuid7 & CPUID_SPEC_CTRL_SSBD) != 0) {
|
||||
if ((cpuid7 & CPUID_SPEC_CTRL_SSBD) != 0U) {
|
||||
enable_bits |= SPEC_CTRL_SSBD;
|
||||
}
|
||||
#endif
|
||||
#ifdef CONFIG_ENABLE_EXTENDED_IBRS
|
||||
if ((cpuid7 & CPUID_SPEC_CTRL_IBRS) != 0) {
|
||||
if ((cpuid7 & CPUID_SPEC_CTRL_IBRS) != 0U) {
|
||||
enable_bits |= SPEC_CTRL_IBRS;
|
||||
}
|
||||
#endif
|
||||
if (enable_bits != 0) {
|
||||
if (enable_bits != 0U) {
|
||||
u64_t cur = _x86_msr_read(IA32_SPEC_CTRL_MSR);
|
||||
|
||||
_x86_msr_write(IA32_SPEC_CTRL_MSR,
|
||||
|
|
|
@ -72,7 +72,7 @@ void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|||
_new_thread_init(thread, stack_buf, stack_size, priority, options);
|
||||
|
||||
#if CONFIG_X86_USERSPACE
|
||||
if ((options & K_USER) == 0) {
|
||||
if ((options & K_USER) == 0U) {
|
||||
/* Running in kernel mode, kernel stack region is also a guard
|
||||
* page */
|
||||
z_x86_mmu_set_flags(&z_x86_kernel_pdpt,
|
||||
|
@ -100,7 +100,7 @@ void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|||
/* initial EFLAGS; only modify IF and IOPL bits */
|
||||
initial_frame->eflags = (EflagsGet() & ~EFLAGS_MASK) | EFLAGS_INITIAL;
|
||||
#ifdef CONFIG_X86_USERSPACE
|
||||
if ((options & K_USER) != 0) {
|
||||
if ((options & K_USER) != 0U) {
|
||||
#ifdef _THREAD_WRAPPER_REQUIRED
|
||||
initial_frame->edi = (u32_t)z_arch_user_mode_enter;
|
||||
initial_frame->thread_entry = _x86_thread_entry_wrapper;
|
||||
|
|
|
@ -291,7 +291,7 @@ static inline void _x86_mem_domain_pages_update(struct k_mem_domain *mem_domain,
|
|||
|
||||
/* Get the partition info */
|
||||
partition = &mem_domain->partitions[partition_index];
|
||||
if (partition->size == 0) {
|
||||
if (partition->size == 0U) {
|
||||
continue;
|
||||
}
|
||||
partitions_count++;
|
||||
|
|
|
@ -150,7 +150,7 @@ void _cpu_start(int cpu)
|
|||
* "timer" API
|
||||
*/
|
||||
xuk_set_isr(INT_APIC_LVT_TIMER, 10, handler_timer, 0);
|
||||
_apic.INIT_COUNT = 5000000;
|
||||
_apic.INIT_COUNT = 5000000U;
|
||||
test_timers();
|
||||
|
||||
if (cpu == 0) {
|
||||
|
|
|
@ -73,7 +73,7 @@ static inline void *alloc_page(int clear)
|
|||
{
|
||||
int *p = (int *)(long)_shared.next_page;
|
||||
|
||||
_shared.next_page += 4096;
|
||||
_shared.next_page += 4096U;
|
||||
|
||||
for (int i = 0; clear && i < 1024; i++) {
|
||||
p[i] = 0;
|
||||
|
|
|
@ -145,7 +145,7 @@ void _cpu_start(int cpu)
|
|||
|
||||
/* Set up the timer ISR, but ensure the timer is disabled */
|
||||
xuk_set_isr(INT_APIC_LVT_TIMER, 13, x86_apic_timer_isr, 0);
|
||||
_apic.INIT_COUNT = 0;
|
||||
_apic.INIT_COUNT = 0U;
|
||||
|
||||
xuk_set_isr(XUK_INT_RAW_VECTOR(SCHED_IPI_VECTOR),
|
||||
-1, sched_ipi_handler, 0);
|
||||
|
@ -189,7 +189,7 @@ int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
|
|||
u32_t flags)
|
||||
{
|
||||
ARG_UNUSED(flags);
|
||||
__ASSERT(priority >= 2 && priority <= 15,
|
||||
__ASSERT(priority >= 2U && priority <= 15U,
|
||||
"APIC interrupt priority must be 2-15");
|
||||
|
||||
xuk_set_isr(irq, priority, (void *)routine, parameter);
|
||||
|
|
|
@ -106,7 +106,7 @@ void *init_page_tables(void)
|
|||
|
||||
/* Each PDE filled with 2M supervisor pages */
|
||||
for (int i = 0; i < 512; i++) {
|
||||
if (!(gb == 0 && i == 0)) {
|
||||
if (!(gb == 0U && i == 0)) {
|
||||
pde[i].addr = (gb << 30) | (i << 21);
|
||||
pde[i].present = 1;
|
||||
pde[i].writable = 1;
|
||||
|
|
|
@ -481,7 +481,7 @@ static void smp_init(void)
|
|||
* the page we allocated.
|
||||
*/
|
||||
_shared.smpinit_lock = 0;
|
||||
_shared.smpinit_stack = 0;
|
||||
_shared.smpinit_stack = 0U;
|
||||
_shared.num_active_cpus = 1;
|
||||
|
||||
printf("Sending SIPI IPI\n");
|
||||
|
|
|
@ -97,7 +97,7 @@ int hw_irq_ctrl_get_highest_prio_irq(void)
|
|||
int winner = -1;
|
||||
int winner_prio = 256;
|
||||
|
||||
while (irq_status != 0) {
|
||||
while (irq_status != 0U) {
|
||||
int irq_nbr = find_lsb_set(irq_status) - 1;
|
||||
|
||||
irq_status &= ~((u64_t) 1 << irq_nbr);
|
||||
|
@ -123,7 +123,7 @@ u32_t hw_irq_ctrl_change_lock(u32_t new_lock)
|
|||
irqs_locked = new_lock;
|
||||
|
||||
if ((previous_lock == true) && (new_lock == false)) {
|
||||
if (irq_status != 0) {
|
||||
if (irq_status != 0U) {
|
||||
posix_irq_handler_im_from_sw();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,7 +28,7 @@ u64_t native_rtc_gettime_us(int clock_type)
|
|||
u64_t sec;
|
||||
|
||||
hwtimer_get_pseudohost_rtc_time(&nsec, &sec);
|
||||
return sec * 1000000UL + nsec / 1000;
|
||||
return sec * 1000000UL + nsec / 1000U;
|
||||
}
|
||||
|
||||
posix_print_error_and_exit("Unknown clock source %i\n",
|
||||
|
@ -45,7 +45,7 @@ void native_rtc_gettime(int clock_type, u32_t *nsec, u64_t *sec)
|
|||
{
|
||||
if (clock_type == RTC_CLOCK_BOOT || clock_type == RTC_CLOCK_REALTIME) {
|
||||
u64_t us = native_rtc_gettime_us(clock_type);
|
||||
*nsec = (us % 1000000UL) * 1000;
|
||||
*nsec = (us % 1000000UL) * 1000U;
|
||||
*sec = us / 1000000UL;
|
||||
} else { /* RTC_CLOCK_PSEUDOHOSTREALTIME */
|
||||
hwtimer_get_pseudohost_rtc_time(nsec, sec);
|
||||
|
|
|
@ -50,9 +50,9 @@ static char *us_time_to_str(char *dest, u64_t time)
|
|||
unsigned int second;
|
||||
unsigned int us;
|
||||
|
||||
hour = (time / 3600 / 1000000) % 24;
|
||||
minute = (time / 60 / 1000000) % 60;
|
||||
second = (time / 1000000) % 60;
|
||||
hour = (time / 3600U / 1000000U) % 24;
|
||||
minute = (time / 60U / 1000000U) % 60;
|
||||
second = (time / 1000000U) % 60;
|
||||
us = time % 1000000;
|
||||
|
||||
sprintf(dest, "%02u:%02u:%02u.%06u", hour, minute, second, us);
|
||||
|
@ -202,7 +202,7 @@ static void hwtimer_tick_timer_reached(void)
|
|||
us_time_to_str(rs, real_time - boot_time);
|
||||
printf("tick @%5llims: diff = expected_rt - real_time = "
|
||||
"%5lli = %s - %s\n",
|
||||
hw_timer_tick_timer/1000, diff, es, rs);
|
||||
hw_timer_tick_timer/1000U, diff, es, rs);
|
||||
#endif
|
||||
|
||||
if (diff > 0) { /* we need to slow down */
|
||||
|
|
|
@ -40,7 +40,7 @@ uint8_t inner_main_clean_up(int exit_code)
|
|||
|
||||
u8_t bst_result = bst_delete();
|
||||
|
||||
if (bst_result != 0) {
|
||||
if (bst_result != 0U) {
|
||||
bs_trace_raw_time(2, "main: The TESTCASE FAILED with return "
|
||||
"code %u\n", bst_result);
|
||||
}
|
||||
|
|
|
@ -187,7 +187,7 @@ static inline void adc_context_start_read(struct adc_context *ctx,
|
|||
if (ctx->sequence->options) {
|
||||
ctx->sampling_index = 0U;
|
||||
|
||||
if (ctx->sequence->options->interval_us != 0) {
|
||||
if (ctx->sequence->options->interval_us != 0U) {
|
||||
atomic_set(&ctx->sampling_requested, 0);
|
||||
adc_context_enable_timer(ctx);
|
||||
return;
|
||||
|
@ -244,7 +244,7 @@ static inline void adc_context_on_sampling_done(struct adc_context *ctx,
|
|||
* a zero interval or if the timer expired again while
|
||||
* the current sampling was in progress.
|
||||
*/
|
||||
if (ctx->sequence->options->interval_us == 0) {
|
||||
if (ctx->sequence->options->interval_us == 0U) {
|
||||
adc_context_start_sampling(ctx);
|
||||
} else if (atomic_dec(&ctx->sampling_requested) > 1) {
|
||||
adc_context_start_sampling(ctx);
|
||||
|
@ -253,7 +253,7 @@ static inline void adc_context_on_sampling_done(struct adc_context *ctx,
|
|||
return;
|
||||
}
|
||||
|
||||
if (ctx->sequence->options->interval_us != 0) {
|
||||
if (ctx->sequence->options->interval_us != 0U) {
|
||||
adc_context_disable_timer(ctx);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -248,10 +248,10 @@ static int adc_quark_d2000_read_request(struct device *dev,
|
|||
case 8:
|
||||
case 10:
|
||||
case 12:
|
||||
info->resolution = (seq_tbl->resolution / 2) - 3;
|
||||
info->resolution = (seq_tbl->resolution / 2U) - 3;
|
||||
|
||||
/* sampling window is (resolution + 2) cycles */
|
||||
info->sample_window = seq_tbl->resolution + 2;
|
||||
info->sample_window = seq_tbl->resolution + 2U;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
@ -267,7 +267,7 @@ static int adc_quark_d2000_read_request(struct device *dev,
|
|||
* System clock is 32MHz, which means 1us == 32 cycles
|
||||
* if divider is 1.
|
||||
*/
|
||||
interval = seq_tbl->options->interval_us * 32 /
|
||||
interval = seq_tbl->options->interval_us * 32U /
|
||||
CONFIG_ADC_INTEL_QUARK_D2000_CLOCK_RATIO;
|
||||
|
||||
if (interval < info->sample_window) {
|
||||
|
|
|
@ -94,7 +94,7 @@ static inline void wait_slv0_bit_set(u32_t bit_mask)
|
|||
|
||||
do {
|
||||
reg_value = sys_in32(PERIPH_ADDR_BASE_CREG_SLV0);
|
||||
} while ((reg_value & bit_mask) == 0);
|
||||
} while ((reg_value & bit_mask) == 0U);
|
||||
}
|
||||
|
||||
static void set_power_mode_inner(u32_t mode)
|
||||
|
@ -182,7 +182,7 @@ static void dummy_conversion(struct device *dev)
|
|||
/* Wait for data available */
|
||||
do {
|
||||
reg_value = sys_in32(adc_base + ADC_INTSTAT);
|
||||
} while ((reg_value & ADC_INTSTAT_DATA_A) == 0);
|
||||
} while ((reg_value & ADC_INTSTAT_DATA_A) == 0U);
|
||||
|
||||
/* Flush FIFO */
|
||||
reg_value = sys_in32(adc_base + ADC_SET);
|
||||
|
@ -364,7 +364,7 @@ static int adc_quark_se_ss_read_request(struct device *dev,
|
|||
* System clock is 32MHz, which means 1us == 32 cycles
|
||||
* if divider is 1.
|
||||
*/
|
||||
interval = seq_tbl->options->interval_us * 32 /
|
||||
interval = seq_tbl->options->interval_us * 32U /
|
||||
CONFIG_ADC_INTEL_QUARK_SE_C1000_SS_CLOCK_RATIO;
|
||||
|
||||
if (interval < (seq_tbl->resolution + 2)) {
|
||||
|
|
|
@ -152,7 +152,7 @@ static int start_read(struct device *dev, const struct adc_sequence *sequence)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (sequence->oversampling != 0) {
|
||||
if (sequence->oversampling != 0U) {
|
||||
LOG_ERR("Oversampling is not supported");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -272,7 +272,7 @@ static int start_read(struct device *dev, const struct adc_sequence *sequence)
|
|||
/* Signal an error if a selected channel has not been
|
||||
* configured yet.
|
||||
*/
|
||||
if (m_data.positive_inputs[channel_id] == 0) {
|
||||
if (m_data.positive_inputs[channel_id] == 0U) {
|
||||
LOG_ERR("Channel %u not configured",
|
||||
channel_id);
|
||||
return -EINVAL;
|
||||
|
@ -287,7 +287,7 @@ static int start_read(struct device *dev, const struct adc_sequence *sequence)
|
|||
* possible), the burst mode have to be deactivated.
|
||||
*/
|
||||
nrf_saadc_burst_set(channel_id,
|
||||
(sequence->oversampling != 0 ?
|
||||
(sequence->oversampling != 0U ?
|
||||
NRF_SAADC_BURST_ENABLED :
|
||||
NRF_SAADC_BURST_DISABLED));
|
||||
nrf_saadc_channel_pos_input_set(
|
||||
|
|
|
@ -75,17 +75,17 @@ static int adc_sam_channel_setup(struct device *dev,
|
|||
u8_t channel_id = channel_cfg->channel_id;
|
||||
|
||||
/* Clear the gain bits for the channel. */
|
||||
afec->AFEC_CGR &= ~(3 << channel_id * 2);
|
||||
afec->AFEC_CGR &= ~(3 << channel_id * 2U);
|
||||
|
||||
switch (channel_cfg->gain) {
|
||||
case ADC_GAIN_1:
|
||||
/* A value of 0 in this register is a gain of 1. */
|
||||
break;
|
||||
case ADC_GAIN_1_2:
|
||||
afec->AFEC_CGR |= (1 << (channel_id * 2));
|
||||
afec->AFEC_CGR |= (1 << (channel_id * 2U));
|
||||
break;
|
||||
case ADC_GAIN_1_4:
|
||||
afec->AFEC_CGR |= (2 << (channel_id * 2));
|
||||
afec->AFEC_CGR |= (2 << (channel_id * 2U));
|
||||
break;
|
||||
default:
|
||||
LOG_ERR("Selected ADC gain is not valid");
|
||||
|
@ -192,18 +192,18 @@ static int start_read(struct device *dev, const struct adc_sequence *sequence)
|
|||
/* Signal an error if the channel selection is invalid (no channels or
|
||||
* a non-existing one is selected).
|
||||
*/
|
||||
if (channels == 0 ||
|
||||
if (channels == 0U ||
|
||||
(channels & (~0UL << NUM_CHANNELS))) {
|
||||
LOG_ERR("Invalid selection of channels");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (sequence->oversampling != 0) {
|
||||
if (sequence->oversampling != 0U) {
|
||||
LOG_ERR("Oversampling is not supported");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (sequence->resolution != 12) {
|
||||
if (sequence->resolution != 12U) {
|
||||
/* TODO JKW: Support the Enhanced Resolution Mode 50.6.3 page
|
||||
* 1544.
|
||||
*/
|
||||
|
|
|
@ -238,7 +238,7 @@ static void find_modes(struct decim_modes *modes,
|
|||
/* The FIFO is not requested if sample rate is set to zero. Just
|
||||
* return in such case with num_of_modes as zero.
|
||||
*/
|
||||
if (fs == 0) {
|
||||
if (fs == 0U) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -677,7 +677,7 @@ static int source_ipm_helper(struct pdm_chan_cfg *config, u32_t *source_mask,
|
|||
continue;
|
||||
}
|
||||
|
||||
if ((*controller_mask & BIT(pdm_ix)) == 0) {
|
||||
if ((*controller_mask & BIT(pdm_ix)) == 0U) {
|
||||
*controller_mask |= BIT(pdm_ix);
|
||||
*source_mask |= pdm_ix << (ipm << 2);
|
||||
ipm++;
|
||||
|
@ -696,7 +696,7 @@ static int source_ipm_helper(struct pdm_chan_cfg *config, u32_t *source_mask,
|
|||
* if R channel mic was requested first
|
||||
* set the controller to swap the channels
|
||||
*/
|
||||
if ((pdm_lr_mask & BIT(PDM_CHAN_LEFT + (pdm_ix << 1))) == 0) {
|
||||
if ((pdm_lr_mask & BIT(PDM_CHAN_LEFT + (pdm_ix << 1))) == 0U) {
|
||||
*swap_mask |= BIT(pdm_ix);
|
||||
}
|
||||
}
|
||||
|
@ -749,9 +749,9 @@ static int configure_registers(struct device *dev,
|
|||
u32_t source_mask;
|
||||
|
||||
/* OUTCONTROL0 and OUTCONTROL1 */
|
||||
of0 = (config->streams[0].pcm_width == 32) ? 2 : 0;
|
||||
of0 = (config->streams[0].pcm_width == 32U) ? 2 : 0;
|
||||
if (config->channel.req_num_streams > 1) {
|
||||
of1 = (config->streams[1].pcm_width == 32) ? 2 : 0;
|
||||
of1 = (config->streams[1].pcm_width == 32U) ? 2 : 0;
|
||||
} else {
|
||||
of1 = 0;
|
||||
}
|
||||
|
@ -792,7 +792,7 @@ static int configure_registers(struct device *dev,
|
|||
* for starting correct parts of the HW.
|
||||
*/
|
||||
for (i = 0; i < DMIC_HW_CONTROLLERS; i++) {
|
||||
if ((controller_mask & BIT(i)) == 0) {
|
||||
if ((controller_mask & BIT(i)) == 0U) {
|
||||
/* controller is not enabled */
|
||||
continue;
|
||||
}
|
||||
|
@ -802,7 +802,7 @@ static int configure_registers(struct device *dev,
|
|||
BIT(PDM_CHAN_RIGHT)) << (i << 1);
|
||||
} else {
|
||||
dmic_private.mic_en_mask |=
|
||||
((swap_mask & BIT(i)) == 0) ?
|
||||
((swap_mask & BIT(i)) == 0U) ?
|
||||
BIT(PDM_CHAN_LEFT) << (i << 1) :
|
||||
BIT(PDM_CHAN_RIGHT) << (i << 1);
|
||||
}
|
||||
|
@ -1014,7 +1014,7 @@ static int dmic_set_config(struct device *dev, struct dmic_cfg *config)
|
|||
LOG_DBG("num_chan %u", config->channel.req_num_chan);
|
||||
LOG_DBG("req_num_streams %u", config->channel.req_num_streams);
|
||||
|
||||
if (config->channel.req_num_streams == 0) {
|
||||
if (config->channel.req_num_streams == 0U) {
|
||||
LOG_ERR("req_num_streams is 0");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -1281,7 +1281,7 @@ static int dmic_trigger_device(struct device *dev, enum dmic_trigger cmd)
|
|||
|
||||
static inline u8_t dmic_parse_clk_skew_map(u32_t skew_map, u8_t pdm)
|
||||
{
|
||||
return (u8_t)((skew_map >> ((pdm & BIT_MASK(3)) * 4)) & BIT_MASK(4));
|
||||
return (u8_t)((skew_map >> ((pdm & BIT_MASK(3)) * 4U)) & BIT_MASK(4));
|
||||
}
|
||||
|
||||
static int dmic_initialize_device(struct device *dev)
|
||||
|
@ -1391,7 +1391,7 @@ int dmic_configure_dma(struct pcm_stream_cfg *config, u8_t num_streams)
|
|||
|
||||
dma_block.source_address = (u32_t)NULL;
|
||||
dma_block.dest_address = (u32_t)NULL;
|
||||
dma_block.block_size = 0;
|
||||
dma_block.block_size = 0U;
|
||||
dma_block.next_block = NULL;
|
||||
|
||||
ret = dma_config(dmic_private.dma_dev, channel, &dma_cfg);
|
||||
|
|
|
@ -112,7 +112,7 @@ int mpxxdtyy_i2s_configure(struct device *dev, struct dmic_cfg *cfg)
|
|||
}
|
||||
|
||||
factor = sw_filter_lib_init(dev, cfg);
|
||||
if (factor == 0) {
|
||||
if (factor == 0U) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ u16_t sw_filter_lib_init(struct device *dev, struct dmic_cfg *cfg)
|
|||
u32_t audio_freq = cfg->streams->pcm_rate;
|
||||
|
||||
/* calculate oversampling factor based on pdm clock */
|
||||
for (factor = 64; factor <= 128; factor += 64) {
|
||||
for (factor = 64U; factor <= 128U; factor += 64U) {
|
||||
u32_t pdm_bit_clk = (audio_freq * factor *
|
||||
cfg->channel.req_num_chan);
|
||||
|
||||
|
@ -28,12 +28,12 @@ u16_t sw_filter_lib_init(struct device *dev, struct dmic_cfg *cfg)
|
|||
}
|
||||
}
|
||||
|
||||
if (factor != 64 && factor != 128) {
|
||||
if (factor != 64U && factor != 128U) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* init the filter lib */
|
||||
pdm_filter->LP_HZ = audio_freq / 2;
|
||||
pdm_filter->LP_HZ = audio_freq / 2U;
|
||||
pdm_filter->HP_HZ = 10;
|
||||
pdm_filter->Fs = audio_freq;
|
||||
pdm_filter->Out_MicChannels = 1;
|
||||
|
|
|
@ -340,10 +340,10 @@ static int codec_configure_clocks(struct device *dev,
|
|||
LOG_DBG("NDAC: %u MDAC: %u OSR: %u", ndac, mdac, osr);
|
||||
|
||||
if (i2s->options & I2S_OPT_BIT_CLK_MASTER) {
|
||||
bclk_div = osr * mdac / (i2s->word_size * 2); /* stereo */
|
||||
bclk_div = osr * mdac / (i2s->word_size * 2U); /* stereo */
|
||||
if ((bclk_div * i2s->word_size * 2) != (osr * mdac)) {
|
||||
LOG_ERR("Unable to generate BCLK %u from MCLK %u",
|
||||
i2s->frame_clk_freq * i2s->word_size * 2,
|
||||
i2s->frame_clk_freq * i2s->word_size * 2U,
|
||||
cfg->mclk_freq);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -366,7 +366,7 @@ static int codec_configure_clocks(struct device *dev,
|
|||
}
|
||||
|
||||
/* calculate MCLK divider to get ~1MHz */
|
||||
mclk_div = (cfg->mclk_freq + 1000000 - 1) / 1000000;
|
||||
mclk_div = (cfg->mclk_freq + 1000000 - 1) / 1000000U;
|
||||
/* setup timer clock to be MCLK divided */
|
||||
codec_write_reg(dev, TIMER_MCLK_DIV_ADDR,
|
||||
TIMER_MCLK_DIV_EN_EXT | TIMER_MCLK_DIV_VAL(mclk_div));
|
||||
|
|
|
@ -86,7 +86,7 @@ static inline void spi_dump_message(const u8_t *pre, u8_t *buf,
|
|||
for (i = 0U; i < size; i++) {
|
||||
c = buf[i];
|
||||
printk("%x ", c);
|
||||
if (c >= 31 && c <= 126) {
|
||||
if (c >= 31U && c <= 126U) {
|
||||
printk("[%c] ", c);
|
||||
} else {
|
||||
printk("[.] ");
|
||||
|
@ -322,7 +322,7 @@ static void bt_spi_rx_thread(void)
|
|||
kick_cs();
|
||||
ret = bt_spi_transceive(header_master, 5,
|
||||
header_slave, 5);
|
||||
} while ((((header_slave[STATUS_HEADER_TOREAD] == 0 ||
|
||||
} while ((((header_slave[STATUS_HEADER_TOREAD] == 0U ||
|
||||
header_slave[STATUS_HEADER_TOREAD] == 0xFF) &&
|
||||
!ret)) && exit_irq_high_loop());
|
||||
|
||||
|
@ -332,7 +332,7 @@ static void bt_spi_rx_thread(void)
|
|||
do {
|
||||
ret = bt_spi_transceive(&txmsg, size,
|
||||
&rxmsg, size);
|
||||
} while (rxmsg[0] == 0 && ret == 0);
|
||||
} while (rxmsg[0] == 0U && ret == 0);
|
||||
}
|
||||
|
||||
release_cs();
|
||||
|
@ -438,7 +438,7 @@ static int bt_spi_send(struct net_buf *buf)
|
|||
* sleeping or still in the initialisation stage (waking-up).
|
||||
*/
|
||||
} while ((rxmsg[STATUS_HEADER_READY] != READY_NOW ||
|
||||
(rxmsg[1] | rxmsg[2] | rxmsg[3] | rxmsg[4]) == 0) && !ret);
|
||||
(rxmsg[1] | rxmsg[2] | rxmsg[3] | rxmsg[4]) == 0U) && !ret);
|
||||
|
||||
|
||||
k_sem_give(&sem_busy);
|
||||
|
@ -448,7 +448,7 @@ static int bt_spi_send(struct net_buf *buf)
|
|||
do {
|
||||
ret = bt_spi_transceive(buf->data, buf->len,
|
||||
rxmsg, buf->len);
|
||||
} while (rxmsg[0] == 0 && !ret);
|
||||
} while (rxmsg[0] == 0U && !ret);
|
||||
}
|
||||
|
||||
release_cs();
|
||||
|
|
|
@ -227,7 +227,7 @@ int can_stm32_runtime_configure(struct device *dev, enum can_mode mode,
|
|||
}
|
||||
|
||||
prescaler = clock_rate / (BIT_SEG_LENGTH(cfg) * bitrate);
|
||||
if (prescaler == 0 || prescaler > 1024) {
|
||||
if (prescaler == 0U || prescaler > 1024) {
|
||||
LOG_ERR("HAL_CAN_Init failed: prescaler > max (%d > 1024)",
|
||||
prescaler);
|
||||
return -EINVAL;
|
||||
|
@ -346,7 +346,7 @@ int can_stm32_send(struct device *dev, const struct zcan_frame *msg,
|
|||
"standard" : "extended"
|
||||
, msg->rtr == CAN_DATAFRAME ? "no" : "yes");
|
||||
|
||||
__ASSERT(msg->dlc == 0 || msg->data != NULL, "Dataptr is null");
|
||||
__ASSERT(msg->dlc == 0U || msg->data != NULL, "Dataptr is null");
|
||||
__ASSERT(msg->dlc <= CAN_MAX_DLC, "DLC > 8");
|
||||
|
||||
if (can->ESR & CAN_ESR_BOFF) {
|
||||
|
|
|
@ -192,7 +192,7 @@ static void insert_char(char *pos, char c, u8_t end)
|
|||
/* Echo back to console */
|
||||
uart_poll_out(uart_console_dev, c);
|
||||
|
||||
if (end == 0) {
|
||||
if (end == 0U) {
|
||||
*pos = c;
|
||||
return;
|
||||
}
|
||||
|
@ -217,7 +217,7 @@ static void del_char(char *pos, u8_t end)
|
|||
{
|
||||
uart_poll_out(uart_console_dev, '\b');
|
||||
|
||||
if (end == 0) {
|
||||
if (end == 0U) {
|
||||
uart_poll_out(uart_console_dev, ' ');
|
||||
uart_poll_out(uart_console_dev, '\b');
|
||||
return;
|
||||
|
@ -271,10 +271,10 @@ static void handle_ansi(u8_t byte, char *line)
|
|||
if (atomic_test_bit(&esc_state, ESC_ANSI_VAL)) {
|
||||
if (isdigit(byte)) {
|
||||
if (atomic_test_bit(&esc_state, ESC_ANSI_VAL_2)) {
|
||||
ansi_val_2 *= 10;
|
||||
ansi_val_2 *= 10U;
|
||||
ansi_val_2 += byte - '0';
|
||||
} else {
|
||||
ansi_val *= 10;
|
||||
ansi_val *= 10U;
|
||||
ansi_val += byte - '0';
|
||||
}
|
||||
return;
|
||||
|
|
|
@ -92,7 +92,7 @@ static int mcux_rtc_set_alarm(struct device *dev, u8_t chan_id,
|
|||
|
||||
LOG_DBG("Current time is %d ticks", current);
|
||||
|
||||
if (chan_id != 0) {
|
||||
if (chan_id != 0U) {
|
||||
LOG_ERR("Invalid channel id");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -123,7 +123,7 @@ static int mcux_rtc_cancel_alarm(struct device *dev, u8_t chan_id)
|
|||
{
|
||||
struct mcux_rtc_data *data = dev->driver_data;
|
||||
|
||||
if (chan_id != 0) {
|
||||
if (chan_id != 0U) {
|
||||
LOG_ERR("Invalid channel id");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -223,7 +223,7 @@ int ataes132a_aes_ccm_decrypt(struct device *dev,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (in_buf_len != 16 && in_buf_len != 32) {
|
||||
if (in_buf_len != 16U && in_buf_len != 32U) {
|
||||
LOG_ERR("ccm mode only accepts input blocks of 16"
|
||||
" and 32 bytes");
|
||||
return -EINVAL;
|
||||
|
@ -279,14 +279,14 @@ int ataes132a_aes_ccm_decrypt(struct device *dev,
|
|||
0x0, param_buffer, 16,
|
||||
param_buffer, &out_len);
|
||||
|
||||
if (return_code != 0) {
|
||||
if (return_code != 0U) {
|
||||
LOG_ERR("nonce command ended with code %d",
|
||||
return_code);
|
||||
k_sem_give(&data->device_sem);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (param_buffer[0] != 0) {
|
||||
if (param_buffer[0] != 0U) {
|
||||
LOG_ERR("nonce command failed with error"
|
||||
" code %d", param_buffer[0]);
|
||||
k_sem_give(&data->device_sem);
|
||||
|
@ -354,7 +354,7 @@ int ataes132a_aes_ccm_decrypt(struct device *dev,
|
|||
in_buf_len + 4, param_buffer,
|
||||
&out_len);
|
||||
|
||||
if (return_code != 0) {
|
||||
if (return_code != 0U) {
|
||||
LOG_ERR("decrypt command ended with code %d", return_code);
|
||||
k_sem_give(&data->device_sem);
|
||||
return -EINVAL;
|
||||
|
@ -367,7 +367,7 @@ int ataes132a_aes_ccm_decrypt(struct device *dev,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (param_buffer[0] != 0) {
|
||||
if (param_buffer[0] != 0U) {
|
||||
LOG_ERR("legacy command failed with error"
|
||||
" code %d", param_buffer[0]);
|
||||
k_sem_give(&data->device_sem);
|
||||
|
@ -470,14 +470,14 @@ int ataes132a_aes_ccm_encrypt(struct device *dev,
|
|||
0x0, param_buffer, 16,
|
||||
param_buffer, &out_len);
|
||||
|
||||
if (return_code != 0) {
|
||||
if (return_code != 0U) {
|
||||
LOG_ERR("nonce command ended with code %d",
|
||||
return_code);
|
||||
k_sem_give(&data->device_sem);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (param_buffer[0] != 0) {
|
||||
if (param_buffer[0] != 0U) {
|
||||
LOG_ERR("nonce command failed with error"
|
||||
" code %d", param_buffer[0]);
|
||||
k_sem_give(&data->device_sem);
|
||||
|
@ -526,7 +526,7 @@ int ataes132a_aes_ccm_encrypt(struct device *dev,
|
|||
buf_len + 2, param_buffer,
|
||||
&out_len);
|
||||
|
||||
if (return_code != 0) {
|
||||
if (return_code != 0U) {
|
||||
LOG_ERR("encrypt command ended with code %d", return_code);
|
||||
k_sem_give(&data->device_sem);
|
||||
return -EINVAL;
|
||||
|
@ -539,7 +539,7 @@ int ataes132a_aes_ccm_encrypt(struct device *dev,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (param_buffer[0] != 0) {
|
||||
if (param_buffer[0] != 0U) {
|
||||
LOG_ERR("encrypt command failed with error"
|
||||
" code %d", param_buffer[0]);
|
||||
k_sem_give(&data->device_sem);
|
||||
|
@ -549,7 +549,7 @@ int ataes132a_aes_ccm_encrypt(struct device *dev,
|
|||
if (aead_op->tag) {
|
||||
memcpy(aead_op->tag, param_buffer + 1, 16);
|
||||
}
|
||||
memcpy(aead_op->pkt->out_buf, param_buffer + 17, out_len - 17);
|
||||
memcpy(aead_op->pkt->out_buf, param_buffer + 17, out_len - 17U);
|
||||
|
||||
if (mac_mode) {
|
||||
if (mac_mode->include_counter) {
|
||||
|
@ -560,7 +560,7 @@ int ataes132a_aes_ccm_encrypt(struct device *dev,
|
|||
ataes132a_send_command(dev, ATAES_INFO_OP, 0x0,
|
||||
param_buffer, 4,
|
||||
param_buffer, &out_len);
|
||||
if (param_buffer[0] != 0) {
|
||||
if (param_buffer[0] != 0U) {
|
||||
LOG_ERR("info command failed with error"
|
||||
" code %d", param_buffer[0]);
|
||||
k_sem_give(&data->device_sem);
|
||||
|
@ -639,19 +639,19 @@ int ataes132a_aes_ecb_block(struct device *dev,
|
|||
param_buffer, buf_len + 3,
|
||||
param_buffer, &out_len);
|
||||
|
||||
if (return_code != 0) {
|
||||
if (return_code != 0U) {
|
||||
LOG_ERR("legacy command ended with code %d", return_code);
|
||||
k_sem_give(&data->device_sem);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (out_len != 17) {
|
||||
if (out_len != 17U) {
|
||||
LOG_ERR("legacy command response has invalid"
|
||||
" size %d", out_len);
|
||||
k_sem_give(&data->device_sem);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (param_buffer[0] != 0) {
|
||||
if (param_buffer[0] != 0U) {
|
||||
LOG_ERR("legacy command failed with error"
|
||||
" code %d", param_buffer[0]);
|
||||
k_sem_give(&data->device_sem);
|
||||
|
@ -703,7 +703,7 @@ static int do_ccm_encrypt_mac(struct cipher_ctx *ctx,
|
|||
aead_op->pkt->out_len = 32;
|
||||
}
|
||||
|
||||
if (aead_op->ad != NULL || aead_op->ad_len != 0) {
|
||||
if (aead_op->ad != NULL || aead_op->ad_len != 0U) {
|
||||
LOG_ERR("Associated data is not supported.");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -748,7 +748,7 @@ static int do_ccm_decrypt_auth(struct cipher_ctx *ctx,
|
|||
|
||||
aead_op->pkt->ctx = ctx;
|
||||
|
||||
if (aead_op->ad != NULL || aead_op->ad_len != 0) {
|
||||
if (aead_op->ad != NULL || aead_op->ad_len != 0U) {
|
||||
LOG_ERR("Associated data is not supported.");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -806,12 +806,12 @@ static int ataes132a_session_setup(struct device *dev, struct cipher_ctx *ctx,
|
|||
return -EINVAL;
|
||||
}
|
||||
if (mode == CRYPTO_CIPHER_MODE_CCM &&
|
||||
ctx->mode_params.ccm_info.tag_len != 16) {
|
||||
ctx->mode_params.ccm_info.tag_len != 16U) {
|
||||
LOG_ERR("ATAES132A support 16 byte tag only.");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (mode == CRYPTO_CIPHER_MODE_CCM &&
|
||||
ctx->mode_params.ccm_info.nonce_len != 12) {
|
||||
ctx->mode_params.ccm_info.nonce_len != 12U) {
|
||||
LOG_ERR("ATAES132A support 12 byte nonce only.");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -147,7 +147,7 @@ static inline int burst_write_i2c(struct device *dev, u16_t dev_addr,
|
|||
addr_buffer[1] = start_addr & 0xFF;
|
||||
addr_buffer[0] = start_addr >> 8;
|
||||
msg[0].buf = addr_buffer;
|
||||
msg[0].len = 2;
|
||||
msg[0].len = 2U;
|
||||
msg[0].flags = I2C_MSG_WRITE;
|
||||
|
||||
msg[1].buf = buf;
|
||||
|
@ -169,7 +169,7 @@ static inline int burst_read_i2c(struct device *dev, u16_t dev_addr,
|
|||
addr_buffer[1] = start_addr & 0xFF;
|
||||
addr_buffer[0] = start_addr >> 8;
|
||||
msg[0].buf = addr_buffer;
|
||||
msg[0].len = 2;
|
||||
msg[0].len = 2U;
|
||||
msg[0].flags = I2C_MSG_WRITE;
|
||||
|
||||
msg[1].buf = buf;
|
||||
|
|
|
@ -138,7 +138,7 @@ static int mtls_session_setup(struct device *dev, struct cipher_ctx *ctx,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ctx->keylen != 16) {
|
||||
if (ctx->keylen != 16U) {
|
||||
LOG_ERR("%u key size is not supported", ctx->keylen);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -154,7 +154,7 @@ static int mtls_session_setup(struct device *dev, struct cipher_ctx *ctx,
|
|||
mbedtls_ccm_init(mtls_ctx);
|
||||
|
||||
ret = mbedtls_ccm_setkey(mtls_ctx, MBEDTLS_CIPHER_ID_AES,
|
||||
ctx->key.bit_stream, ctx->keylen * 8);
|
||||
ctx->key.bit_stream, ctx->keylen * 8U);
|
||||
if (ret) {
|
||||
LOG_ERR("Could not setup the key (%d)", ret);
|
||||
mtls_sessions[ctx_idx].in_use = false;
|
||||
|
|
|
@ -223,7 +223,7 @@ static int tc_session_setup(struct device *dev, struct cipher_ctx *ctx,
|
|||
ctx->ops.cbc_crypt_hndlr = do_cbc_encrypt;
|
||||
break;
|
||||
case CRYPTO_CIPHER_MODE_CTR:
|
||||
if (ctx->mode_params.ctr_info.ctr_len != 32) {
|
||||
if (ctx->mode_params.ctr_info.ctr_len != 32U) {
|
||||
LOG_ERR("Tinycrypt supports only 32 bit "
|
||||
"counter");
|
||||
return -EINVAL;
|
||||
|
@ -244,7 +244,7 @@ static int tc_session_setup(struct device *dev, struct cipher_ctx *ctx,
|
|||
break;
|
||||
case CRYPTO_CIPHER_MODE_CTR:
|
||||
/* Maybe validate CTR length */
|
||||
if (ctx->mode_params.ctr_info.ctr_len != 32) {
|
||||
if (ctx->mode_params.ctr_info.ctr_len != 32U) {
|
||||
LOG_ERR("Tinycrypt supports only 32 bit "
|
||||
"counter");
|
||||
return -EINVAL;
|
||||
|
|
|
@ -55,7 +55,7 @@ static int ili9340_init(struct device *dev)
|
|||
data->cs_ctrl.gpio_dev =
|
||||
device_get_binding(DT_ILITEK_ILI9340_0_CS_GPIO_CONTROLLER);
|
||||
data->cs_ctrl.gpio_pin = DT_ILITEK_ILI9340_0_CS_GPIO_PIN;
|
||||
data->cs_ctrl.delay = 0;
|
||||
data->cs_ctrl.delay = 0U;
|
||||
data->spi_config.cs = &(data->cs_ctrl);
|
||||
#else
|
||||
data->spi_config.cs = NULL;
|
||||
|
@ -229,8 +229,8 @@ static void ili9340_get_capabilities(const struct device *dev,
|
|||
struct display_capabilities *capabilities)
|
||||
{
|
||||
memset(capabilities, 0, sizeof(struct display_capabilities));
|
||||
capabilities->x_resolution = 320;
|
||||
capabilities->y_resolution = 240;
|
||||
capabilities->x_resolution = 320U;
|
||||
capabilities->y_resolution = 240U;
|
||||
capabilities->supported_pixel_formats = PIXEL_FORMAT_RGB_888;
|
||||
capabilities->current_pixel_format = PIXEL_FORMAT_RGB_888;
|
||||
capabilities->current_orientation = DISPLAY_ORIENTATION_NORMAL;
|
||||
|
|
|
@ -183,10 +183,10 @@ static int mcux_elcdif_init(struct device *dev)
|
|||
|
||||
elcdif_rgb_mode_config_t rgb_mode = config->rgb_mode;
|
||||
|
||||
data->pixel_bytes = config->bits_per_pixel / 8;
|
||||
data->pixel_bytes = config->bits_per_pixel / 8U;
|
||||
data->fb_bytes = data->pixel_bytes *
|
||||
rgb_mode.panelWidth * rgb_mode.panelHeight;
|
||||
data->write_idx = 1;
|
||||
data->write_idx = 1U;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(data->fb); i++) {
|
||||
if (k_mem_pool_alloc(&mcux_elcdif_pool, &data->fb[i],
|
||||
|
|
|
@ -89,8 +89,8 @@ static void sdl_display_write_rgb888(u8_t *disp_buf,
|
|||
__ASSERT((3 * desc->pitch * desc->height) <= desc->buf_size,
|
||||
"Input buffer to small");
|
||||
|
||||
for (h_idx = 0; h_idx < desc->height; ++h_idx) {
|
||||
for (w_idx = 0; w_idx < desc->width; ++w_idx) {
|
||||
for (h_idx = 0U; h_idx < desc->height; ++h_idx) {
|
||||
for (w_idx = 0U; w_idx < desc->width; ++w_idx) {
|
||||
byte_ptr = (const u8_t *)buf +
|
||||
3 * ((h_idx * desc->pitch) + w_idx);
|
||||
pixel = *byte_ptr << 16;
|
||||
|
@ -116,22 +116,22 @@ static void sdl_display_write_mono(u8_t *disp_buf,
|
|||
|
||||
__ASSERT((desc->pitch * desc->height) <= (8 * desc->buf_size),
|
||||
"Input buffer to small");
|
||||
__ASSERT((desc->height % 8) == 0,
|
||||
__ASSERT((desc->height % 8) == 0U,
|
||||
"Input buffer height not aligned per 8 pixels");
|
||||
|
||||
if (one_is_black) {
|
||||
one_color = 0;
|
||||
one_color = 0U;
|
||||
} else {
|
||||
one_color = 0x00FFFFFF;
|
||||
}
|
||||
|
||||
for (tile_idx = 0; tile_idx < desc->height/8; ++tile_idx) {
|
||||
for (w_idx = 0; w_idx < desc->width; ++w_idx) {
|
||||
for (tile_idx = 0U; tile_idx < desc->height/8U; ++tile_idx) {
|
||||
for (w_idx = 0U; w_idx < desc->width; ++w_idx) {
|
||||
byte_ptr = (const u8_t *)buf +
|
||||
((tile_idx * desc->pitch) + w_idx);
|
||||
disp_buf_start = disp_buf;
|
||||
for (h_idx = 0; h_idx < 8; ++h_idx) {
|
||||
if ((*byte_ptr & BIT(7-h_idx)) != 0) {
|
||||
for (h_idx = 0U; h_idx < 8; ++h_idx) {
|
||||
if ((*byte_ptr & BIT(7-h_idx)) != 0U) {
|
||||
pixel = one_color;
|
||||
} else {
|
||||
pixel = (~one_color) & 0x00FFFFFF;
|
||||
|
|
|
@ -125,7 +125,7 @@ void glcd_cursor_pos_set(struct device *port, u8_t col, u8_t row)
|
|||
|
||||
unsigned char data[2];
|
||||
|
||||
if (row == 0) {
|
||||
if (row == 0U) {
|
||||
col |= 0x80;
|
||||
} else {
|
||||
col |= 0xC0;
|
||||
|
|
|
@ -308,7 +308,7 @@ static void show_row(struct k_timer *timer)
|
|||
update_pins(disp, disp->row[disp->cur]);
|
||||
disp->cur = (disp->cur + 1) % DISPLAY_ROWS;
|
||||
|
||||
if (disp->cur == 0 && disp->expiry != K_FOREVER &&
|
||||
if (disp->cur == 0U && disp->expiry != K_FOREVER &&
|
||||
k_uptime_get() > disp->expiry) {
|
||||
if (disp->scroll) {
|
||||
update_scroll(disp);
|
||||
|
|
|
@ -228,7 +228,7 @@ int ssd1306_write(const struct device *dev, const u16_t x, const u16_t y,
|
|||
return -1;
|
||||
}
|
||||
|
||||
if (buf == NULL || desc->buf_size == 0) {
|
||||
if (buf == NULL || desc->buf_size == 0U) {
|
||||
LOG_ERR("Display buffer is not available");
|
||||
return -1;
|
||||
}
|
||||
|
@ -238,7 +238,7 @@ int ssd1306_write(const struct device *dev, const u16_t x, const u16_t y,
|
|||
return -1;
|
||||
}
|
||||
|
||||
if (x != 0 && y != 0) {
|
||||
if (x != 0U && y != 0U) {
|
||||
LOG_ERR("Unsupported origin");
|
||||
return -1;
|
||||
}
|
||||
|
|
|
@ -277,7 +277,7 @@ static int ssd1673_write(const struct device *dev, const u16_t x,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (buf == NULL || desc->buf_size == 0) {
|
||||
if (buf == NULL || desc->buf_size == 0U) {
|
||||
LOG_ERR("Display buffer is not available");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -297,13 +297,13 @@ static int ssd1673_write(const struct device *dev, const u16_t x,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if ((desc->height % EPD_PANEL_NUMOF_ROWS_PER_PAGE) != 0) {
|
||||
if ((desc->height % EPD_PANEL_NUMOF_ROWS_PER_PAGE) != 0U) {
|
||||
LOG_ERR("Buffer height not multiple of %d",
|
||||
EPD_PANEL_NUMOF_ROWS_PER_PAGE);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if ((y % EPD_PANEL_NUMOF_ROWS_PER_PAGE) != 0) {
|
||||
if ((y % EPD_PANEL_NUMOF_ROWS_PER_PAGE) != 0U) {
|
||||
LOG_ERR("Y coordinate not multiple of %d",
|
||||
EPD_PANEL_NUMOF_ROWS_PER_PAGE);
|
||||
return -EINVAL;
|
||||
|
@ -461,7 +461,7 @@ static int ssd1673_clear_and_write_buffer(struct device *dev)
|
|||
memset(clear_page, 0xff, sizeof(clear_page));
|
||||
sbuf.buf = clear_page;
|
||||
sbuf.len = sizeof(clear_page);
|
||||
for (page = 0; page <= (SSD1673_PANEL_LAST_PAGE + 1); ++page) {
|
||||
for (page = 0U; page <= (SSD1673_PANEL_LAST_PAGE + 1); ++page) {
|
||||
err = spi_write(driver->spi_dev, &driver->spi_config, &buf_set);
|
||||
if (err < 0) {
|
||||
return err;
|
||||
|
@ -629,7 +629,7 @@ static int ssd1673_init(struct device *dev)
|
|||
}
|
||||
|
||||
driver->cs_ctrl.gpio_pin = DT_SOLOMON_SSD1673FB_0_CS_GPIO_PIN;
|
||||
driver->cs_ctrl.delay = 0;
|
||||
driver->cs_ctrl.delay = 0U;
|
||||
driver->spi_config.cs = &driver->cs_ctrl;
|
||||
#endif
|
||||
|
||||
|
|
|
@ -74,7 +74,7 @@ static int nios2_msgdma_config(struct device *dev, u32_t channel,
|
|||
u32_t control;
|
||||
|
||||
/* Nios-II MSGDMA supports only one channel per DMA core */
|
||||
if (channel != 0) {
|
||||
if (channel != 0U) {
|
||||
LOG_ERR("invalid channel number");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -85,7 +85,7 @@ static int nios2_msgdma_config(struct device *dev, u32_t channel,
|
|||
return -EINVAL;
|
||||
}
|
||||
#else
|
||||
if (cfg->block_count != 1) {
|
||||
if (cfg->block_count != 1U) {
|
||||
LOG_ERR("invalid block count!!");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -156,7 +156,7 @@ static int nios2_msgdma_transfer_start(struct device *dev, u32_t channel)
|
|||
int status;
|
||||
|
||||
/* Nios-II mSGDMA supports only one channel per DMA core */
|
||||
if (channel != 0) {
|
||||
if (channel != 0U) {
|
||||
LOG_ERR("Invalid channel number");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -136,7 +136,7 @@ static int dma_qmsi_chan_config(struct device *dev, u32_t channel,
|
|||
u32_t temp = 0U;
|
||||
int ret = 0;
|
||||
|
||||
if (config->block_count != 1) {
|
||||
if (config->block_count != 1U) {
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
|
|
|
@ -191,13 +191,13 @@ static int sam_xdmac_config(struct device *dev, u32_t channel,
|
|||
__ASSERT_NO_MSG(cfg->source_data_size == cfg->dest_data_size);
|
||||
__ASSERT_NO_MSG(cfg->source_burst_length == cfg->dest_burst_length);
|
||||
|
||||
if (cfg->source_data_size != 1 && cfg->source_data_size != 2 &&
|
||||
cfg->source_data_size != 4) {
|
||||
if (cfg->source_data_size != 1U && cfg->source_data_size != 2U &&
|
||||
cfg->source_data_size != 4U) {
|
||||
LOG_ERR("Invalid 'source_data_size' value");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (cfg->block_count != 1) {
|
||||
if (cfg->block_count != 1U) {
|
||||
LOG_ERR("Only single block transfer is currently supported."
|
||||
" Please submit a patch.");
|
||||
return -EINVAL;
|
||||
|
@ -212,7 +212,7 @@ static int sam_xdmac_config(struct device *dev, u32_t channel,
|
|||
case MEMORY_TO_MEMORY:
|
||||
channel_cfg.cfg =
|
||||
XDMAC_CC_TYPE_MEM_TRAN
|
||||
| XDMAC_CC_MBSIZE(burst_size == 0 ? 0 : burst_size - 1)
|
||||
| XDMAC_CC_MBSIZE(burst_size == 0U ? 0 : burst_size - 1)
|
||||
| XDMAC_CC_SAM_INCREMENTED_AM
|
||||
| XDMAC_CC_DAM_INCREMENTED_AM;
|
||||
break;
|
||||
|
|
|
@ -210,13 +210,13 @@ static u32_t dma_stm32_irq_status(struct dma_stm32_device *ddata,
|
|||
irqs = dma_stm32_read(ddata, DMA_STM32_LISR);
|
||||
}
|
||||
|
||||
return (irqs >> (((id & 2) << 3) | ((id & 1) * 6)));
|
||||
return (irqs >> (((id & 2) << 3) | ((id & 1) * 6U)));
|
||||
}
|
||||
|
||||
static void dma_stm32_irq_clear(struct dma_stm32_device *ddata,
|
||||
u32_t id, u32_t irqs)
|
||||
{
|
||||
irqs = irqs << (((id & 2) << 3) | ((id & 1) * 6));
|
||||
irqs = irqs << (((id & 2) << 3) | ((id & 1) * 6U));
|
||||
|
||||
if (id & 4) {
|
||||
dma_stm32_write(ddata, DMA_STM32_HIFCR, irqs);
|
||||
|
|
|
@ -235,7 +235,7 @@ static int entropy_nrf5_get_entropy(struct device *device, u8_t *buf, u16_t len)
|
|||
buf, len);
|
||||
k_sem_give(&entropy_nrf5_data.sem_lock);
|
||||
|
||||
if (bytes == 0) {
|
||||
if (bytes == 0U) {
|
||||
/* Pool is empty: Sleep until next interrupt. */
|
||||
k_sem_take(&entropy_nrf5_data.sem_sync, K_FOREVER);
|
||||
continue;
|
||||
|
@ -256,7 +256,7 @@ static int entropy_nrf5_get_entropy_isr(struct device *dev, u8_t *buf, u16_t len
|
|||
/* Check if this API is called on correct driver instance. */
|
||||
__ASSERT_NO_MSG(&entropy_nrf5_data == DEV_DATA(dev));
|
||||
|
||||
if (likely((flags & ENTROPY_BUSYWAIT) == 0)) {
|
||||
if (likely((flags & ENTROPY_BUSYWAIT) == 0U)) {
|
||||
return rng_pool_get((struct rng_pool *)(entropy_nrf5_data.isr),
|
||||
buf, len);
|
||||
}
|
||||
|
|
|
@ -196,7 +196,7 @@ static void eth_dw_isr(struct device *dev)
|
|||
* by the shared IRQ driver. So check here if the interrupt
|
||||
* is coming from the GPIO controller (or somewhere else).
|
||||
*/
|
||||
if ((int_status & STATUS_RX_INT) == 0) {
|
||||
if ((int_status & STATUS_RX_INT) == 0U) {
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -250,11 +250,11 @@ static int rx_descriptors_init(Gmac *gmac, struct gmac_queue *queue)
|
|||
"Incorrect length of RX data buffer");
|
||||
/* Give ownership to GMAC and remove the wrap bit */
|
||||
rx_desc_list->buf[i].w0 = (u32_t)rx_buf_addr & GMAC_RXW0_ADDR;
|
||||
rx_desc_list->buf[i].w1 = 0;
|
||||
rx_desc_list->buf[i].w1 = 0U;
|
||||
}
|
||||
|
||||
/* Set the wrap bit on the last descriptor */
|
||||
rx_desc_list->buf[rx_desc_list->len - 1].w0 |= GMAC_RXW0_WRAP;
|
||||
rx_desc_list->buf[rx_desc_list->len - 1U].w0 |= GMAC_RXW0_WRAP;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -270,12 +270,12 @@ static void tx_descriptors_init(Gmac *gmac, struct gmac_queue *queue)
|
|||
tx_desc_list->tail = 0U;
|
||||
|
||||
for (int i = 0; i < tx_desc_list->len; i++) {
|
||||
tx_desc_list->buf[i].w0 = 0;
|
||||
tx_desc_list->buf[i].w0 = 0U;
|
||||
tx_desc_list->buf[i].w1 = GMAC_TXW1_USED;
|
||||
}
|
||||
|
||||
/* Set the wrap bit on the last descriptor */
|
||||
tx_desc_list->buf[tx_desc_list->len - 1].w1 |= GMAC_TXW1_WRAP;
|
||||
tx_desc_list->buf[tx_desc_list->len - 1U].w1 |= GMAC_TXW1_WRAP;
|
||||
|
||||
#if GMAC_MULTIPLE_TX_PACKETS == 1
|
||||
/* Reset TX frame list */
|
||||
|
@ -608,7 +608,7 @@ static void rx_error_handler(Gmac *gmac, struct gmac_queue *queue)
|
|||
queue->rx_desc_list.tail = 0U;
|
||||
|
||||
for (int i = 0; i < queue->rx_desc_list.len; i++) {
|
||||
queue->rx_desc_list.buf[i].w1 = 0;
|
||||
queue->rx_desc_list.buf[i].w1 = 0U;
|
||||
queue->rx_desc_list.buf[i].w0 &= ~GMAC_RXW0_OWNERSHIP;
|
||||
}
|
||||
|
||||
|
@ -633,17 +633,17 @@ static int get_mck_clock_divisor(u32_t mck)
|
|||
{
|
||||
u32_t mck_divisor;
|
||||
|
||||
if (mck <= 20000000) {
|
||||
if (mck <= 20000000U) {
|
||||
mck_divisor = GMAC_NCFGR_CLK_MCK_8;
|
||||
} else if (mck <= 40000000) {
|
||||
} else if (mck <= 40000000U) {
|
||||
mck_divisor = GMAC_NCFGR_CLK_MCK_16;
|
||||
} else if (mck <= 80000000) {
|
||||
} else if (mck <= 80000000U) {
|
||||
mck_divisor = GMAC_NCFGR_CLK_MCK_32;
|
||||
} else if (mck <= 120000000) {
|
||||
} else if (mck <= 120000000U) {
|
||||
mck_divisor = GMAC_NCFGR_CLK_MCK_48;
|
||||
} else if (mck <= 160000000) {
|
||||
} else if (mck <= 160000000U) {
|
||||
mck_divisor = GMAC_NCFGR_CLK_MCK_64;
|
||||
} else if (mck <= 240000000) {
|
||||
} else if (mck <= 240000000U) {
|
||||
mck_divisor = GMAC_NCFGR_CLK_MCK_96;
|
||||
} else {
|
||||
LOG_ERR("No valid MDC clock");
|
||||
|
@ -754,7 +754,7 @@ static int eth_sam_gmac_get_qav_idle_slope(Gmac *gmac, int queue_id,
|
|||
}
|
||||
|
||||
/* Convert to bps as expected by upper layer */
|
||||
*idle_slope *= 8;
|
||||
*idle_slope *= 8U;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -772,7 +772,7 @@ static int eth_sam_gmac_get_qav_delta_bandwidth(Gmac *gmac, int queue_id,
|
|||
}
|
||||
|
||||
/* Calculate in Bps */
|
||||
idle_slope /= 8;
|
||||
idle_slope /= 8U;
|
||||
|
||||
/* Get bandwidth and convert to bps */
|
||||
bandwidth = eth_sam_gmac_get_bandwidth(gmac);
|
||||
|
@ -781,7 +781,7 @@ static int eth_sam_gmac_get_qav_delta_bandwidth(Gmac *gmac, int queue_id,
|
|||
* divide bandwidth - these numbers are so large that it should not
|
||||
* influence the outcome and saves us from employing larger data types.
|
||||
*/
|
||||
*delta_bandwidth = idle_slope / (bandwidth / 100);
|
||||
*delta_bandwidth = idle_slope / (bandwidth / 100U);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -799,7 +799,7 @@ static int eth_sam_gmac_setup_qav_delta_bandwidth(Gmac *gmac, int queue_id,
|
|||
|
||||
bandwidth = eth_sam_gmac_get_bandwidth(gmac);
|
||||
|
||||
idle_slope = (bandwidth * queue_share) / 100;
|
||||
idle_slope = (bandwidth * queue_share) / 100U;
|
||||
|
||||
return eth_sam_gmac_setup_qav_idle_slope(gmac, queue_id, idle_slope);
|
||||
}
|
||||
|
@ -1052,16 +1052,16 @@ static int priority_queue_init_as_idle(Gmac *gmac, struct gmac_queue *queue)
|
|||
"RX descriptors have to be word aligned");
|
||||
__ASSERT(!((u32_t)tx_desc_list->buf & ~GMAC_TBQB_ADDR_Msk),
|
||||
"TX descriptors have to be word aligned");
|
||||
__ASSERT((rx_desc_list->len == 1) && (tx_desc_list->len == 1),
|
||||
__ASSERT((rx_desc_list->len == 1U) && (tx_desc_list->len == 1U),
|
||||
"Priority queues are currently not supported, descriptor "
|
||||
"list has to have a single entry");
|
||||
|
||||
/* Setup RX descriptor lists */
|
||||
/* Take ownership from GMAC and set the wrap bit */
|
||||
rx_desc_list->buf[0].w0 = GMAC_RXW0_WRAP;
|
||||
rx_desc_list->buf[0].w1 = 0;
|
||||
rx_desc_list->buf[0].w1 = 0U;
|
||||
/* Setup TX descriptor lists */
|
||||
tx_desc_list->buf[0].w0 = 0;
|
||||
tx_desc_list->buf[0].w0 = 0U;
|
||||
/* Take ownership from GMAC and set the wrap bit */
|
||||
tx_desc_list->buf[0].w1 = GMAC_TXW1_USED | GMAC_TXW1_WRAP;
|
||||
|
||||
|
@ -1179,13 +1179,13 @@ static struct net_pkt *frame_get(struct gmac_queue *queue)
|
|||
}
|
||||
|
||||
/* Update buffer descriptor status word */
|
||||
rx_desc->w1 = 0;
|
||||
rx_desc->w1 = 0U;
|
||||
/* Guarantee that status word is written before the address
|
||||
* word to avoid race condition.
|
||||
*/
|
||||
__DMB(); /* data memory barrier */
|
||||
/* Update buffer descriptor address word */
|
||||
wrap = (tail == rx_desc_list->len-1 ? GMAC_RXW0_WRAP : 0);
|
||||
wrap = (tail == rx_desc_list->len-1U ? GMAC_RXW0_WRAP : 0);
|
||||
rx_desc->w0 = ((u32_t)frag->data & GMAC_RXW0_ADDR) | wrap;
|
||||
|
||||
MODULO_INC(tail, rx_desc_list->len);
|
||||
|
@ -1379,7 +1379,7 @@ static int eth_tx(struct device *dev, struct net_pkt *pkt)
|
|||
*/
|
||||
tx_desc->w1 = (frag_len & GMAC_TXW1_LEN)
|
||||
| (!frag->frags ? GMAC_TXW1_LASTBUFFER : 0)
|
||||
| (tx_desc_list->head == tx_desc_list->len - 1
|
||||
| (tx_desc_list->head == tx_desc_list->len - 1U
|
||||
? GMAC_TXW1_WRAP : 0)
|
||||
| (tx_desc == tx_first_desc ? GMAC_TXW1_USED : 0);
|
||||
|
||||
|
@ -1814,7 +1814,7 @@ static int eth_sam_gmac_set_qav_param(struct device *dev,
|
|||
idle_slope = config->qav_param.idle_slope;
|
||||
|
||||
/* The standard uses bps, SAM GMAC uses Bps - convert now */
|
||||
idle_slope /= 8;
|
||||
idle_slope /= 8U;
|
||||
|
||||
return eth_sam_gmac_setup_qav_idle_slope(gmac, queue_id,
|
||||
idle_slope);
|
||||
|
|
|
@ -81,8 +81,8 @@ static int smsc_mac_regwrite(u8_t reg, u32_t val)
|
|||
|
||||
int smsc_phy_regread(u8_t regoffset, u32_t *data)
|
||||
{
|
||||
u32_t val = 0;
|
||||
u32_t phycmd = 0;
|
||||
u32_t val = 0U;
|
||||
u32_t phycmd = 0U;
|
||||
unsigned int time_out = REG_WRITE_TIMEOUT;
|
||||
|
||||
if (smsc_mac_regread(SMSC9220_MAC_MII_ACC, &val) < 0) {
|
||||
|
@ -90,11 +90,11 @@ int smsc_phy_regread(u8_t regoffset, u32_t *data)
|
|||
}
|
||||
|
||||
if (val & MAC_MII_ACC_MIIBZY) {
|
||||
*data = 0;
|
||||
*data = 0U;
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
phycmd = 0;
|
||||
phycmd = 0U;
|
||||
phycmd |= PHY_ADDR << 11;
|
||||
phycmd |= (regoffset & 0x1F) << 6;
|
||||
phycmd |= MAC_MII_ACC_READ;
|
||||
|
@ -104,16 +104,16 @@ int smsc_phy_regread(u8_t regoffset, u32_t *data)
|
|||
return -1;
|
||||
}
|
||||
|
||||
val = 0;
|
||||
val = 0U;
|
||||
do {
|
||||
k_sleep(1);
|
||||
time_out--;
|
||||
if (smsc_mac_regread(SMSC9220_MAC_MII_ACC, &val)) {
|
||||
return -1;
|
||||
}
|
||||
} while (time_out != 0 && (val & MAC_MII_ACC_MIIBZY));
|
||||
} while (time_out != 0U && (val & MAC_MII_ACC_MIIBZY));
|
||||
|
||||
if (time_out == 0) {
|
||||
if (time_out == 0U) {
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
|
@ -126,8 +126,8 @@ int smsc_phy_regread(u8_t regoffset, u32_t *data)
|
|||
|
||||
int smsc_phy_regwrite(u8_t regoffset, u32_t data)
|
||||
{
|
||||
u32_t val = 0;
|
||||
u32_t phycmd = 0;
|
||||
u32_t val = 0U;
|
||||
u32_t phycmd = 0U;
|
||||
unsigned int time_out = REG_WRITE_TIMEOUT;
|
||||
|
||||
if (smsc_mac_regread(SMSC9220_MAC_MII_ACC, &val) < 0) {
|
||||
|
@ -157,9 +157,9 @@ int smsc_phy_regwrite(u8_t regoffset, u32_t data)
|
|||
if (smsc_mac_regread(SMSC9220_MAC_MII_ACC, &phycmd)) {
|
||||
return -1;
|
||||
}
|
||||
} while (time_out != 0 && (phycmd & MAC_MII_ACC_MIIBZY));
|
||||
} while (time_out != 0U && (phycmd & MAC_MII_ACC_MIIBZY));
|
||||
|
||||
if (time_out == 0) {
|
||||
if (time_out == 0U) {
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
|
@ -224,9 +224,9 @@ static int smsc_soft_reset(void)
|
|||
do {
|
||||
k_sleep(1);
|
||||
time_out--;
|
||||
} while (time_out != 0 && (SMSC9220->HW_CFG & HW_CFG_SRST));
|
||||
} while (time_out != 0U && (SMSC9220->HW_CFG & HW_CFG_SRST));
|
||||
|
||||
if (time_out == 0) {
|
||||
if (time_out == 0U) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -236,7 +236,7 @@ static int smsc_soft_reset(void)
|
|||
void smsc_set_txfifo(unsigned int val)
|
||||
{
|
||||
/* 2kb minimum, 14kb maximum */
|
||||
if (val >= 2 && val <= 14) {
|
||||
if (val >= 2U && val <= 14U) {
|
||||
SMSC9220->HW_CFG = val << 16;
|
||||
}
|
||||
}
|
||||
|
@ -289,7 +289,7 @@ int smsc_reset_phy(void)
|
|||
*/
|
||||
void smsc_advertise_caps(void)
|
||||
{
|
||||
u32_t aneg_adv = 0;
|
||||
u32_t aneg_adv = 0U;
|
||||
|
||||
smsc_phy_regread(SMSC9220_PHY_ANEG_ADV, &aneg_adv);
|
||||
aneg_adv |= 0xDE0;
|
||||
|
@ -300,8 +300,8 @@ void smsc_advertise_caps(void)
|
|||
|
||||
void smsc_establish_link(void)
|
||||
{
|
||||
u32_t bcr = 0;
|
||||
u32_t hw_cfg = 0;
|
||||
u32_t bcr = 0U;
|
||||
u32_t hw_cfg = 0U;
|
||||
|
||||
smsc_phy_regread(SMSC9220_PHY_BCONTROL, &bcr);
|
||||
bcr |= (1 << 12) | (1 << 9);
|
||||
|
@ -321,7 +321,7 @@ inline void smsc_enable_xmit(void)
|
|||
|
||||
void smsc_enable_mac_xmit(void)
|
||||
{
|
||||
u32_t mac_cr = 0;
|
||||
u32_t mac_cr = 0U;
|
||||
|
||||
smsc_mac_regread(SMSC9220_MAC_CR, &mac_cr);
|
||||
|
||||
|
@ -333,7 +333,7 @@ void smsc_enable_mac_xmit(void)
|
|||
|
||||
void smsc_enable_mac_recv(void)
|
||||
{
|
||||
u32_t mac_cr = 0;
|
||||
u32_t mac_cr = 0U;
|
||||
|
||||
smsc_mac_regread(SMSC9220_MAC_CR, &mac_cr);
|
||||
mac_cr |= (1 << 2); /* Recv enable */
|
||||
|
@ -342,7 +342,7 @@ void smsc_enable_mac_recv(void)
|
|||
|
||||
int smsc_init(void)
|
||||
{
|
||||
unsigned int phyreset = 0;
|
||||
unsigned int phyreset = 0U;
|
||||
|
||||
if (smsc_check_id() < 0) {
|
||||
return -1;
|
||||
|
@ -455,13 +455,13 @@ static int smsc_write_tx_fifo(const u8_t *buf, u32_t len, bool is_last)
|
|||
len = (len + 3) & ~3;
|
||||
}
|
||||
|
||||
if ((len & 3) != 0 || len == 0) {
|
||||
if ((len & 3) != 0U || len == 0U) {
|
||||
LOG_ERR("Chunk size not aligned: %u", len);
|
||||
return -1;
|
||||
}
|
||||
|
||||
buf32 = (u32_t *)buf;
|
||||
len /= 4;
|
||||
len /= 4U;
|
||||
do {
|
||||
SMSC9220->TX_DATA_PORT = *buf32++;
|
||||
} while (--len);
|
||||
|
@ -536,9 +536,9 @@ static int smsc_read_rx_fifo(struct net_pkt *pkt, u32_t len)
|
|||
{
|
||||
u32_t buf32;
|
||||
|
||||
__ASSERT_NO_MSG((len & 3) == 0 && len >= 4);
|
||||
__ASSERT_NO_MSG((len & 3) == 0U && len >= 4U);
|
||||
|
||||
len /= 4;
|
||||
len /= 4U;
|
||||
|
||||
do {
|
||||
buf32 = SMSC9220->RX_DATA_PORT;
|
||||
|
@ -560,7 +560,7 @@ static struct net_pkt *smsc_recv_pkt(struct device *dev, u32_t pkt_size)
|
|||
/* Round up to next DWORD size */
|
||||
rem_size = (pkt_size + 3) & ~3;
|
||||
/* Don't account for FCS when filling net pkt */
|
||||
rem_size -= 4;
|
||||
rem_size -= 4U;
|
||||
|
||||
pkt = net_pkt_rx_alloc_with_buffer(context->iface, rem_size,
|
||||
AF_UNSPEC, 0, K_NO_WAIT);
|
||||
|
@ -616,7 +616,7 @@ static void eth_smsc911x_isr(struct device *dev)
|
|||
* pending for as long as there're packets in FIFO. And when
|
||||
* there's none, finally acknowledge it.
|
||||
*/
|
||||
if (pkt_pending == 0) {
|
||||
if (pkt_pending == 0U) {
|
||||
goto done;
|
||||
}
|
||||
|
||||
|
|
|
@ -41,7 +41,7 @@ static void eth_stellaris_flush(struct device *dev)
|
|||
if (dev_data->tx_pos != 0) {
|
||||
sys_write32(dev_data->tx_word, REG_MACDATA);
|
||||
dev_data->tx_pos = 0;
|
||||
dev_data->tx_word = 0;
|
||||
dev_data->tx_word = 0U;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -54,7 +54,7 @@ static void eth_stellaris_send_byte(struct device *dev, u8_t byte)
|
|||
if (dev_data->tx_pos == 4) {
|
||||
sys_write32(dev_data->tx_word, REG_MACDATA);
|
||||
dev_data->tx_pos = 0;
|
||||
dev_data->tx_word = 0;
|
||||
dev_data->tx_word = 0U;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -74,7 +74,7 @@ static int eth_stellaris_send(struct device *dev, struct net_pkt *pkt)
|
|||
|
||||
/* Send the payload */
|
||||
for (frag = pkt->frags; frag; frag = frag->frags) {
|
||||
for (i = 0; i < frag->len; ++i) {
|
||||
for (i = 0U; i < frag->len; ++i) {
|
||||
eth_stellaris_send_byte(dev, frag->data[i]);
|
||||
}
|
||||
}
|
||||
|
@ -150,7 +150,7 @@ static struct net_pkt *eth_stellaris_rx_pkt(struct device *dev,
|
|||
* The remaining 2 bytes, in the first word is appended to the
|
||||
* ethernet frame.
|
||||
*/
|
||||
count = 2;
|
||||
count = 2U;
|
||||
data = (u8_t *)®_val + 2;
|
||||
if (net_pkt_write(pkt, data, count)) {
|
||||
goto error;
|
||||
|
@ -162,7 +162,7 @@ static struct net_pkt *eth_stellaris_rx_pkt(struct device *dev,
|
|||
/* Read the rest of words, minus the partial word and FCS byte. */
|
||||
for (; bytes_left > 7; bytes_left -= 4) {
|
||||
reg_val = sys_read32(REG_MACDATA);
|
||||
count = 4;
|
||||
count = 4U;
|
||||
data = (u8_t *)®_val;
|
||||
if (net_pkt_write(pkt, data, count)) {
|
||||
goto error;
|
||||
|
|
|
@ -42,7 +42,7 @@ static int mdio_bus_wait(Gmac *gmac)
|
|||
u32_t retries = 100U; /* will wait up to 1 s */
|
||||
|
||||
while (!(gmac->GMAC_NSR & GMAC_NSR_IDLE)) {
|
||||
if (retries-- == 0) {
|
||||
if (retries-- == 0U) {
|
||||
LOG_ERR("timeout");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
@ -123,7 +123,7 @@ static int phy_soft_reset(const struct phy_sam_gmac_dev *phy)
|
|||
* up to 0.5 s.
|
||||
*/
|
||||
do {
|
||||
if (retries-- == 0) {
|
||||
if (retries-- == 0U) {
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
|
@ -223,7 +223,7 @@ int phy_sam_gmac_auto_negotiate(const struct phy_sam_gmac_dev *phy,
|
|||
|
||||
/* Wait for the auto-negotiation process to complete */
|
||||
do {
|
||||
if (retries-- == 0) {
|
||||
if (retries-- == 0U) {
|
||||
retval = -ETIMEDOUT;
|
||||
goto auto_negotiate_exit;
|
||||
}
|
||||
|
|
|
@ -133,7 +133,7 @@ static bool write_range_is_valid(off_t offset, u32_t size)
|
|||
{
|
||||
return read_range_is_valid(offset, size)
|
||||
&& (offset % sizeof(u32_t) == 0)
|
||||
&& (size % 4 == 0);
|
||||
&& (size % 4 == 0U);
|
||||
}
|
||||
|
||||
static bool read_range_is_valid(off_t offset, u32_t size)
|
||||
|
|
|
@ -24,7 +24,7 @@ LOG_MODULE_REGISTER(LOG_DOMAIN);
|
|||
bool flash_stm32_valid_range(struct device *dev, off_t offset, u32_t len,
|
||||
bool write)
|
||||
{
|
||||
return (!write || (offset % 2 == 0 && len % 2 == 0)) &&
|
||||
return (!write || (offset % 2 == 0 && len % 2 == 0U)) &&
|
||||
flash_stm32_range_exists(dev, offset, len);
|
||||
}
|
||||
|
||||
|
@ -134,7 +134,7 @@ int flash_stm32_write_range(struct device *dev, unsigned int offset,
|
|||
{
|
||||
int i, rc = 0;
|
||||
|
||||
for (i = 0; i < len; i += 2, offset += 2) {
|
||||
for (i = 0; i < len; i += 2, offset += 2U) {
|
||||
rc = write_hword(dev, offset, ((const u16_t *) data)[i>>1]);
|
||||
if (rc < 0) {
|
||||
return rc;
|
||||
|
|
|
@ -24,7 +24,7 @@ LOG_MODULE_REGISTER(LOG_DOMAIN);
|
|||
bool flash_stm32_valid_range(struct device *dev, off_t offset, u32_t len,
|
||||
bool write)
|
||||
{
|
||||
return (!write || (offset % 2 == 0 && len % 2 == 0)) &&
|
||||
return (!write || (offset % 2 == 0 && len % 2 == 0U)) &&
|
||||
flash_stm32_range_exists(dev, offset, len);
|
||||
}
|
||||
|
||||
|
@ -131,7 +131,7 @@ int flash_stm32_write_range(struct device *dev, unsigned int offset,
|
|||
{
|
||||
int i, rc = 0;
|
||||
|
||||
for (i = 0; i < len; i += 2, offset += 2) {
|
||||
for (i = 0; i < len; i += 2, offset += 2U) {
|
||||
rc = write_hword(dev, offset, ((const u16_t *) data)[i>>1]);
|
||||
if (rc < 0) {
|
||||
return rc;
|
||||
|
|
|
@ -77,11 +77,11 @@ static int erase_sector(struct device *dev, u32_t sector)
|
|||
#if defined(FLASH_OPTCR_nDBANK) && FLASH_SECTOR_TOTAL == 24
|
||||
#if CONFIG_FLASH_SIZE == 2048
|
||||
if (sector > 11) {
|
||||
sector += 4;
|
||||
sector += 4U;
|
||||
}
|
||||
#elif CONFIG_FLASH_SIZE == 1024
|
||||
if (sector > 7) {
|
||||
sector += 8;
|
||||
sector += 8U;
|
||||
}
|
||||
#endif /* CONFIG_FLASH_SIZE */
|
||||
#endif /* defined(FLASH_OPTCR_nDBANK) && FLASH_SECTOR_TOTAL == 24 */
|
||||
|
|
|
@ -30,7 +30,7 @@ LOG_MODULE_REGISTER(LOG_DOMAIN);
|
|||
bool flash_stm32_valid_range(struct device *dev, off_t offset, u32_t len,
|
||||
bool write)
|
||||
{
|
||||
return (!write || (offset % 8 == 0 && len % 8 == 0)) &&
|
||||
return (!write || (offset % 8 == 0 && len % 8 == 0U)) &&
|
||||
flash_stm32_range_exists(dev, offset, len);
|
||||
}
|
||||
|
||||
|
@ -132,7 +132,7 @@ static int erase_page(struct device *dev, unsigned int page)
|
|||
#ifdef FLASH_CR_BKER
|
||||
regs->cr &= ~FLASH_CR_BKER_Msk;
|
||||
/* Select bank, only for DUALBANK devices */
|
||||
if (page >= 256)
|
||||
if (page >= 256U)
|
||||
regs->cr |= FLASH_CR_BKER;
|
||||
#endif
|
||||
regs->cr &= ~FLASH_CR_PNB_Msk;
|
||||
|
@ -173,7 +173,7 @@ int flash_stm32_write_range(struct device *dev, unsigned int offset,
|
|||
{
|
||||
int i, rc = 0;
|
||||
|
||||
for (i = 0; i < len; i += 8, offset += 8) {
|
||||
for (i = 0; i < len; i += 8, offset += 8U) {
|
||||
rc = write_dword(dev, offset,
|
||||
UNALIGNED_GET((const u64_t *) data + (i >> 3)));
|
||||
if (rc < 0) {
|
||||
|
|
|
@ -87,7 +87,7 @@ static int flash_nios2_qspi_erase(struct device *dev, off_t offset, size_t len)
|
|||
for (i = offset/qspi_dev->sector_size;
|
||||
i < qspi_dev->number_of_sectors; i++) {
|
||||
|
||||
if ((remaining_length <= 0) ||
|
||||
if ((remaining_length <= 0U) ||
|
||||
erase_offset >= (offset + len)) {
|
||||
break;
|
||||
}
|
||||
|
@ -280,7 +280,7 @@ static int flash_nios2_qspi_write(struct device *dev, off_t offset,
|
|||
for (i = offset/qspi_dev->sector_size;
|
||||
i < qspi_dev->number_of_sectors; i++) {
|
||||
|
||||
if (remaining_length <= 0) {
|
||||
if (remaining_length <= 0U) {
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -139,7 +139,7 @@ static inline int dw_base_to_block_base(u32_t base_addr)
|
|||
}
|
||||
static inline int dw_derive_port_from_base(u32_t base_addr)
|
||||
{
|
||||
u32_t port = (base_addr & 0x3f) / 12;
|
||||
u32_t port = (base_addr & 0x3f) / 12U;
|
||||
return port;
|
||||
}
|
||||
static inline int dw_interrupt_support(const struct gpio_dw_config *config)
|
||||
|
|
|
@ -158,7 +158,7 @@ static int gpio_intel_apl_isr(struct device *dev)
|
|||
reg = cfg->reg_base + REG_GPI_INT_STS_BASE
|
||||
+ ((cfg->pin_offset >> 5) << 2);
|
||||
int_sts = sys_read32(reg);
|
||||
acc_mask = 0;
|
||||
acc_mask = 0U;
|
||||
|
||||
SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&data->cb, cb, tmp, node) {
|
||||
cur_mask = int_sts & cb->pin_mask;
|
||||
|
@ -220,7 +220,7 @@ static int gpio_intel_apl_config(struct device *dev, int access_op,
|
|||
}
|
||||
|
||||
/* read in pad configuration register */
|
||||
reg = cfg->reg_base + data->pad_base + (raw_pin * 8);
|
||||
reg = cfg->reg_base + data->pad_base + (raw_pin * 8U);
|
||||
cfg0 = sys_read32(reg);
|
||||
cfg1 = sys_read32(reg + 4);
|
||||
|
||||
|
@ -304,7 +304,7 @@ static int gpio_intel_apl_write(struct device *dev, int access_op,
|
|||
return -EPERM;
|
||||
}
|
||||
|
||||
reg = cfg->reg_base + data->pad_base + (raw_pin * 8);
|
||||
reg = cfg->reg_base + data->pad_base + (raw_pin * 8U);
|
||||
val = sys_read32(reg);
|
||||
|
||||
if (value) {
|
||||
|
@ -340,7 +340,7 @@ static int gpio_intel_apl_read(struct device *dev, int access_op,
|
|||
return -EPERM;
|
||||
}
|
||||
|
||||
reg = cfg->reg_base + data->pad_base + (raw_pin * 8);
|
||||
reg = cfg->reg_base + data->pad_base + (raw_pin * 8U);
|
||||
val = sys_read32(reg);
|
||||
|
||||
if (!(val & PAD_CFG0_TXDIS)) {
|
||||
|
|
|
@ -49,7 +49,7 @@ static int gpio_mcux_configure(struct device *dev,
|
|||
}
|
||||
|
||||
/* Check if GPIO port supports interrupts */
|
||||
if ((flags & GPIO_INT) && ((config->flags & GPIO_INT) == 0)) {
|
||||
if ((flags & GPIO_INT) && ((config->flags & GPIO_INT) == 0U)) {
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
|
|
|
@ -105,7 +105,7 @@ static int gpiote_pin_int_cfg(struct device *port, u32_t pin)
|
|||
|
||||
if (data->double_edge & BIT(pin)) {
|
||||
pol = NRF_GPIOTE_POLARITY_TOGGLE;
|
||||
} else if (((data->active_level & BIT(pin)) != 0)
|
||||
} else if (((data->active_level & BIT(pin)) != 0U)
|
||||
^ ((BIT(pin) & data->inverted) != 0)) {
|
||||
pol = NRF_GPIOTE_POLARITY_LOTOHI;
|
||||
} else {
|
||||
|
@ -337,7 +337,7 @@ static void cfg_level_pins(struct device *port)
|
|||
{
|
||||
const struct gpio_nrfx_data *data = get_port_data(port);
|
||||
const struct gpio_nrfx_cfg *cfg = get_port_cfg(port);
|
||||
u32_t pin = 0;
|
||||
u32_t pin = 0U;
|
||||
u32_t bit = 1U << pin;
|
||||
u32_t level_pins = get_level_pins(port);
|
||||
|
||||
|
@ -381,7 +381,7 @@ static u32_t check_level_trigger_pins(struct device *port)
|
|||
* they appear to have triggered or not. This ensures
|
||||
* nobody's requesting DETECT.
|
||||
*/
|
||||
u32_t pin = 0;
|
||||
u32_t pin = 0U;
|
||||
u32_t bit = 1U << pin;
|
||||
|
||||
while (level_pins) {
|
||||
|
@ -440,7 +440,7 @@ static void gpiote_event_handler(void)
|
|||
nrf_gpiote_event_is_set(evt)) {
|
||||
u32_t abs_pin = nrf_gpiote_event_pin_get(i);
|
||||
/* Divide absolute pin number to port and pin parts. */
|
||||
fired_triggers[abs_pin / 32] |= BIT(abs_pin % 32);
|
||||
fired_triggers[abs_pin / 32U] |= BIT(abs_pin % 32);
|
||||
nrf_gpiote_event_clear(evt);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -35,8 +35,8 @@ static int gpio_rv32m1_configure(struct device *dev,
|
|||
GPIO_Type *gpio_base = config->gpio_base;
|
||||
PORT_Type *port_base = config->port_base;
|
||||
port_interrupt_t port_interrupt = 0;
|
||||
u32_t mask = 0;
|
||||
u32_t pcr = 0;
|
||||
u32_t mask = 0U;
|
||||
u32_t pcr = 0U;
|
||||
u8_t i;
|
||||
|
||||
/* Check for an invalid pin configuration */
|
||||
|
@ -45,7 +45,7 @@ static int gpio_rv32m1_configure(struct device *dev,
|
|||
}
|
||||
|
||||
/* Check if GPIO port supports interrupts */
|
||||
if ((flags & GPIO_INT) && ((config->flags & GPIO_INT) == 0)) {
|
||||
if ((flags & GPIO_INT) && ((config->flags & GPIO_INT) == 0U)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -120,7 +120,7 @@ static int gpio_rv32m1_configure(struct device *dev,
|
|||
port_base->PCR[pin] = (port_base->PCR[pin] & ~mask) | pcr |
|
||||
PORT_PCR_MUX(kPORT_MuxAsGpio);
|
||||
} else { /* GPIO_ACCESS_BY_PORT */
|
||||
for (i = 0; i < ARRAY_SIZE(port_base->PCR); i++) {
|
||||
for (i = 0U; i < ARRAY_SIZE(port_base->PCR); i++) {
|
||||
port_base->PCR[i] = (port_base->PCR[pin] & ~mask) | pcr
|
||||
| PORT_PCR_MUX(kPORT_MuxAsGpio);
|
||||
}
|
||||
|
@ -206,7 +206,7 @@ static int gpio_rv32m1_disable_callback(struct device *dev,
|
|||
if (access_op == GPIO_ACCESS_BY_PIN) {
|
||||
data->pin_callback_enables &= ~BIT(pin);
|
||||
} else {
|
||||
data->pin_callback_enables = 0;
|
||||
data->pin_callback_enables = 0U;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -87,7 +87,7 @@ static int gpio_sam0_write(struct device *dev, int access_op, u32_t pin,
|
|||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
if (value != 0) {
|
||||
if (value != 0U) {
|
||||
config->regs->OUTSET.bit.OUTSET = mask;
|
||||
} else {
|
||||
config->regs->OUTCLR.bit.OUTCLR = mask;
|
||||
|
|
|
@ -307,7 +307,7 @@ static int gpio_stm32_write(struct device *dev, int access_op,
|
|||
}
|
||||
|
||||
pin = stm32_pinval_get(pin);
|
||||
if (value != 0) {
|
||||
if (value != 0U) {
|
||||
LL_GPIO_SetOutputPin(gpio, pin);
|
||||
} else {
|
||||
LL_GPIO_ResetOutputPin(gpio, pin);
|
||||
|
|
|
@ -50,7 +50,7 @@ static inline void _i2c_dw_data_ask(struct device *dev)
|
|||
(struct i2c_dw_registers *)dw->base_address;
|
||||
|
||||
/* No more bytes to request, so command queue is no longer needed */
|
||||
if (dw->request_bytes == 0) {
|
||||
if (dw->request_bytes == 0U) {
|
||||
regs->ic_intr_mask.bits.tx_empty = 0U;
|
||||
return;
|
||||
}
|
||||
|
@ -84,7 +84,7 @@ static inline void _i2c_dw_data_ask(struct device *dev)
|
|||
|
||||
/* After receiving the last byte, send STOP if needed */
|
||||
if ((dw->xfr_flags & I2C_MSG_STOP)
|
||||
&& (dw->request_bytes == 1)) {
|
||||
&& (dw->request_bytes == 1U)) {
|
||||
data |= IC_DATA_CMD_STOP;
|
||||
}
|
||||
|
||||
|
@ -110,13 +110,13 @@ static void _i2c_dw_data_read(struct device *dev)
|
|||
dw->xfr_len--;
|
||||
dw->rx_pending--;
|
||||
|
||||
if (dw->xfr_len == 0) {
|
||||
if (dw->xfr_len == 0U) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Nothing to receive anymore */
|
||||
if (dw->xfr_len == 0) {
|
||||
if (dw->xfr_len == 0U) {
|
||||
dw->state &= ~I2C_DW_CMD_RECV;
|
||||
return;
|
||||
}
|
||||
|
@ -132,7 +132,7 @@ static int _i2c_dw_data_send(struct device *dev)
|
|||
(struct i2c_dw_registers *)dw->base_address;
|
||||
|
||||
/* Nothing to send anymore, mask the interrupt */
|
||||
if (dw->xfr_len == 0) {
|
||||
if (dw->xfr_len == 0U) {
|
||||
regs->ic_intr_mask.bits.tx_empty = 0U;
|
||||
|
||||
dw->state &= ~I2C_DW_CMD_SEND;
|
||||
|
@ -151,7 +151,7 @@ static int _i2c_dw_data_send(struct device *dev)
|
|||
}
|
||||
|
||||
/* Send STOP if needed */
|
||||
if ((dw->xfr_len == 1) && (dw->xfr_flags & I2C_MSG_STOP)) {
|
||||
if ((dw->xfr_len == 1U) && (dw->xfr_flags & I2C_MSG_STOP)) {
|
||||
data |= IC_DATA_CMD_STOP;
|
||||
}
|
||||
|
||||
|
@ -253,7 +253,7 @@ static void i2c_dw_isr(void *arg)
|
|||
/* If STOP is not expected, finish processing this
|
||||
* message if there is nothing left to do anymore.
|
||||
*/
|
||||
if (((dw->xfr_len == 0)
|
||||
if (((dw->xfr_len == 0U)
|
||||
&& !(dw->xfr_flags & I2C_MSG_STOP))
|
||||
|| (ret != 0)) {
|
||||
goto done;
|
||||
|
@ -461,7 +461,7 @@ static int i2c_dw_transfer(struct device *dev,
|
|||
}
|
||||
|
||||
/* Send STOP if this is the last message */
|
||||
if (msg_left == 1) {
|
||||
if (msg_left == 1U) {
|
||||
dw->xfr_flags |= I2C_MSG_STOP;
|
||||
}
|
||||
|
||||
|
|
|
@ -139,14 +139,14 @@ static int i2c_esp32_configure_speed(const struct i2c_esp32_config *config,
|
|||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
period = (APB_CLK_FREQ / freq_hz) / 2;
|
||||
period = (APB_CLK_FREQ / freq_hz) / 2U;
|
||||
|
||||
esp32_set_mask32(period << I2C_SCL_LOW_PERIOD_S,
|
||||
I2C_SCL_LOW_PERIOD_REG(config->index));
|
||||
esp32_set_mask32(period << I2C_SCL_HIGH_PERIOD_S,
|
||||
I2C_SCL_HIGH_PERIOD_REG(config->index));
|
||||
|
||||
period /= 2; /* Set hold and setup times to 1/2th of period */
|
||||
period /= 2U; /* Set hold and setup times to 1/2th of period */
|
||||
esp32_set_mask32(period << I2C_SCL_START_HOLD_TIME_S,
|
||||
I2C_SCL_START_HOLD_REG(config->index));
|
||||
esp32_set_mask32(period << I2C_SCL_RSTART_SETUP_TIME_S,
|
||||
|
@ -156,7 +156,7 @@ static int i2c_esp32_configure_speed(const struct i2c_esp32_config *config,
|
|||
esp32_set_mask32(period << I2C_SCL_STOP_SETUP_TIME_S,
|
||||
I2C_SCL_STOP_SETUP_REG(config->index));
|
||||
|
||||
period /= 2; /* Set sample and hold times to 1/4th of period */
|
||||
period /= 2U; /* Set sample and hold times to 1/4th of period */
|
||||
esp32_set_mask32(period << I2C_SDA_HOLD_TIME_S,
|
||||
I2C_SDA_HOLD_REG(config->index));
|
||||
esp32_set_mask32(period << I2C_SDA_SAMPLE_TIME_S,
|
||||
|
@ -404,7 +404,7 @@ static int i2c_esp32_read_msg(struct device *dev, u16_t addr,
|
|||
* slave device. Divide the read command in two segments as
|
||||
* recommended by the ESP32 Technical Reference Manual.
|
||||
*/
|
||||
if (msg.len - to_read <= 1) {
|
||||
if (msg.len - to_read <= 1U) {
|
||||
/* Read the last byte and explicitly ask for an
|
||||
* acknowledgment.
|
||||
*/
|
||||
|
|
|
@ -96,7 +96,7 @@ static void i2c_imx_read(struct device *dev, u8_t *rxBuffer, u8_t rxSize)
|
|||
transfer->rxBuff = rxBuffer;
|
||||
transfer->rxSize = rxSize;
|
||||
|
||||
if (transfer->rxSize == 1) {
|
||||
if (transfer->rxSize == 1U) {
|
||||
/* Send Nack */
|
||||
I2C_SetAckBit(base, false);
|
||||
} else {
|
||||
|
@ -194,7 +194,7 @@ static int i2c_imx_transfer(struct device *dev, struct i2c_msg *msgs,
|
|||
while ((I2C_I2SR_REG(base) & i2cStatusBusBusy) && (--timeout)) {
|
||||
}
|
||||
|
||||
if (timeout == 0) {
|
||||
if (timeout == 0U) {
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -281,7 +281,7 @@ static void i2c_imx_isr(void *arg)
|
|||
transfer->ack =
|
||||
!(I2C_GetStatusFlag(base, i2cStatusReceivedAck));
|
||||
|
||||
if (transfer->txSize == 0) {
|
||||
if (transfer->txSize == 0U) {
|
||||
/* Close I2C interrupt. */
|
||||
I2C_SetIntCmd(base, false);
|
||||
/* Release I2C Bus. */
|
||||
|
@ -294,7 +294,7 @@ static void i2c_imx_isr(void *arg)
|
|||
}
|
||||
} else {
|
||||
/* Normal read operation. */
|
||||
if (transfer->rxSize == 2) {
|
||||
if (transfer->rxSize == 2U) {
|
||||
/* Send Nack */
|
||||
I2C_SetAckBit(base, false);
|
||||
} else {
|
||||
|
@ -302,7 +302,7 @@ static void i2c_imx_isr(void *arg)
|
|||
I2C_SetAckBit(base, true);
|
||||
}
|
||||
|
||||
if (transfer->rxSize == 1) {
|
||||
if (transfer->rxSize == 1U) {
|
||||
/* Switch back to Tx direction to avoid
|
||||
* additional I2C bus read.
|
||||
*/
|
||||
|
@ -315,7 +315,7 @@ static void i2c_imx_isr(void *arg)
|
|||
transfer->rxSize--;
|
||||
|
||||
/* receive finished. */
|
||||
if (transfer->rxSize == 0) {
|
||||
if (transfer->rxSize == 0U) {
|
||||
/* Close I2C interrupt. */
|
||||
I2C_SetIntCmd(base, false);
|
||||
/* Release I2C Bus. */
|
||||
|
|
|
@ -133,7 +133,7 @@ static int i2c_stm32_transfer(struct device *dev, struct i2c_msg *msg,
|
|||
*next_msg_flags : 0;
|
||||
|
||||
if (current->len > 255) {
|
||||
current->len = 255;
|
||||
current->len = 255U;
|
||||
current->flags &= ~I2C_MSG_STOP;
|
||||
if (next_msg_flags) {
|
||||
*next_msg_flags = current->flags &
|
||||
|
|
|
@ -35,7 +35,7 @@ static inline void handle_sb(I2C_TypeDef *i2c, struct i2c_stm32_data *data)
|
|||
slave = (((saddr & 0x0300) >> 7) & 0xFF);
|
||||
u8_t header = slave | HEADER;
|
||||
|
||||
if (data->current.is_restart == 0) {
|
||||
if (data->current.is_restart == 0U) {
|
||||
data->current.is_restart = 1U;
|
||||
} else {
|
||||
header |= I2C_REQUEST_READ;
|
||||
|
@ -65,10 +65,10 @@ static inline void handle_addr(I2C_TypeDef *i2c, struct i2c_stm32_data *data)
|
|||
}
|
||||
}
|
||||
if (!data->current.is_write) {
|
||||
if (data->current.len == 1) {
|
||||
if (data->current.len == 1U) {
|
||||
/* Single byte reception: enable NACK and clear POS */
|
||||
LL_I2C_AcknowledgeNextData(i2c, LL_I2C_NACK);
|
||||
} else if (data->current.len == 2) {
|
||||
} else if (data->current.len == 2U) {
|
||||
/* 2-byte reception: enable NACK and set POS */
|
||||
LL_I2C_AcknowledgeNextData(i2c, LL_I2C_NACK);
|
||||
LL_I2C_EnableBitPOS(i2c);
|
||||
|
@ -81,7 +81,7 @@ static inline void handle_txe(I2C_TypeDef *i2c, struct i2c_stm32_data *data)
|
|||
{
|
||||
if (data->current.len) {
|
||||
data->current.len--;
|
||||
if (data->current.len == 0) {
|
||||
if (data->current.len == 0U) {
|
||||
/*
|
||||
* This is the last byte to transmit disable Buffer
|
||||
* interrupt and wait for a BTF interrupt
|
||||
|
@ -431,10 +431,10 @@ s32_t stm32_i2c_msg_read(struct device *dev, struct i2c_msg *msg,
|
|||
}
|
||||
}
|
||||
|
||||
if (len == 1) {
|
||||
if (len == 1U) {
|
||||
/* Single byte reception: enable NACK and set STOP */
|
||||
LL_I2C_AcknowledgeNextData(i2c, LL_I2C_NACK);
|
||||
} else if (len == 2) {
|
||||
} else if (len == 2U) {
|
||||
/* 2-byte reception: enable NACK and set POS */
|
||||
LL_I2C_AcknowledgeNextData(i2c, LL_I2C_NACK);
|
||||
LL_I2C_EnableBitPOS(i2c);
|
||||
|
|
|
@ -636,7 +636,7 @@ int stm32_i2c_configure_timing(struct device *dev, u32_t clock)
|
|||
break;
|
||||
} while (presc < 16);
|
||||
|
||||
if (presc >= 16) {
|
||||
if (presc >= 16U) {
|
||||
LOG_DBG("I2C:failed to find prescaler value");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -83,7 +83,7 @@ static int i2c_clk_set(Twi *const twi, u32_t speed)
|
|||
cl_div = ((SOC_ATMEL_SAM_MCK_FREQ_HZ / (2 * speed)) - 4)
|
||||
/ (1 << ck_div);
|
||||
|
||||
if (cl_div <= 255) {
|
||||
if (cl_div <= 255U) {
|
||||
div_completed = true;
|
||||
} else {
|
||||
ck_div++;
|
||||
|
@ -168,7 +168,7 @@ static void read_msg_start(Twi *const twi, struct twi_msg *msg, u8_t daddr)
|
|||
twi->TWI_MMR = TWI_MMR_MREAD | TWI_MMR_DADR(daddr);
|
||||
|
||||
/* In single data byte read the START and STOP must both be set */
|
||||
twi_cr_stop = (msg->len == 1) ? TWI_CR_STOP : 0;
|
||||
twi_cr_stop = (msg->len == 1U) ? TWI_CR_STOP : 0;
|
||||
/* Start the transfer by sending START condition */
|
||||
twi->TWI_CR = TWI_CR_START | twi_cr_stop;
|
||||
|
||||
|
@ -255,7 +255,7 @@ static void i2c_sam_twi_isr(void *arg)
|
|||
|
||||
msg->buf[msg->idx++] = twi->TWI_RHR;
|
||||
|
||||
if (msg->idx == msg->len - 1) {
|
||||
if (msg->idx == msg->len - 1U) {
|
||||
/* Send a STOP condition on the TWI */
|
||||
twi->TWI_CR = TWI_CR_STOP;
|
||||
}
|
||||
|
|
|
@ -83,7 +83,7 @@ static int i2c_clk_set(Twihs *const twihs, u32_t speed)
|
|||
cl_div = ((SOC_ATMEL_SAM_MCK_FREQ_HZ / (2 * speed)) - 3)
|
||||
/ (1 << ck_div);
|
||||
|
||||
if (cl_div <= 255) {
|
||||
if (cl_div <= 255U) {
|
||||
div_completed = true;
|
||||
} else {
|
||||
ck_div++;
|
||||
|
@ -173,7 +173,7 @@ static void read_msg_start(Twihs *const twihs, struct twihs_msg *msg,
|
|||
twihs->TWIHS_IER = TWIHS_IER_RXRDY | TWIHS_IER_TXCOMP | TWIHS_IER_NACK;
|
||||
|
||||
/* In single data byte read the START and STOP must both be set */
|
||||
twihs_cr_stop = (msg->len == 1) ? TWIHS_CR_STOP : 0;
|
||||
twihs_cr_stop = (msg->len == 1U) ? TWIHS_CR_STOP : 0;
|
||||
/* Start the transfer by sending START condition */
|
||||
twihs->TWIHS_CR = TWIHS_CR_START | twihs_cr_stop;
|
||||
}
|
||||
|
@ -242,7 +242,7 @@ static void i2c_sam_twihs_isr(void *arg)
|
|||
|
||||
msg->buf[msg->idx++] = twihs->TWIHS_RHR;
|
||||
|
||||
if (msg->idx == msg->len - 1) {
|
||||
if (msg->idx == msg->len - 1U) {
|
||||
/* Send STOP condition */
|
||||
twihs->TWIHS_CR = TWIHS_CR_STOP;
|
||||
}
|
||||
|
|
|
@ -77,7 +77,7 @@ static int i2c_sifive_send_addr(struct device *dev,
|
|||
u16_t rw_flag)
|
||||
{
|
||||
const struct i2c_sifive_cfg *config = dev->config->config_info;
|
||||
u8_t command = 0;
|
||||
u8_t command = 0U;
|
||||
|
||||
/* Wait for a previous transfer to complete */
|
||||
while (i2c_sifive_busy(dev))
|
||||
|
@ -109,7 +109,7 @@ static int i2c_sifive_write_msg(struct device *dev,
|
|||
{
|
||||
const struct i2c_sifive_cfg *config = dev->config->config_info;
|
||||
int rc = 0;
|
||||
u8_t command = 0;
|
||||
u8_t command = 0U;
|
||||
|
||||
rc = i2c_sifive_send_addr(dev, addr, SF_TX_WRITE);
|
||||
if (rc != 0) {
|
||||
|
@ -157,7 +157,7 @@ static int i2c_sifive_read_msg(struct device *dev,
|
|||
u16_t addr)
|
||||
{
|
||||
const struct i2c_sifive_cfg *config = dev->config->config_info;
|
||||
u8_t command = 0;
|
||||
u8_t command = 0U;
|
||||
|
||||
i2c_sifive_send_addr(dev, addr, SF_TX_READ);
|
||||
|
||||
|
@ -198,8 +198,8 @@ static int i2c_sifive_read_msg(struct device *dev,
|
|||
static int i2c_sifive_configure(struct device *dev, u32_t dev_config)
|
||||
{
|
||||
const struct i2c_sifive_cfg *config = NULL;
|
||||
u32_t i2c_speed = 0;
|
||||
u16_t prescale = 0;
|
||||
u32_t i2c_speed = 0U;
|
||||
u16_t prescale = 0U;
|
||||
|
||||
/* Check for NULL pointers */
|
||||
if (dev == NULL) {
|
||||
|
@ -223,10 +223,10 @@ static int i2c_sifive_configure(struct device *dev, u32_t dev_config)
|
|||
/* Configure bus frequency */
|
||||
switch (I2C_SPEED_GET(dev_config)) {
|
||||
case I2C_SPEED_STANDARD:
|
||||
i2c_speed = 100000; /* 100 KHz */
|
||||
i2c_speed = 100000U; /* 100 KHz */
|
||||
break;
|
||||
case I2C_SPEED_FAST:
|
||||
i2c_speed = 400000; /* 400 KHz */
|
||||
i2c_speed = 400000U; /* 400 KHz */
|
||||
break;
|
||||
case I2C_SPEED_FAST_PLUS:
|
||||
case I2C_SPEED_HIGH:
|
||||
|
@ -308,7 +308,7 @@ static int i2c_sifive_transfer(struct device *dev,
|
|||
static int i2c_sifive_init(struct device *dev)
|
||||
{
|
||||
const struct i2c_sifive_cfg *config = dev->config->config_info;
|
||||
u32_t dev_config = 0;
|
||||
u32_t dev_config = 0U;
|
||||
int rc = 0;
|
||||
|
||||
dev_config = (I2C_MODE_MASTER | _i2c_map_dt_bitrate(config->f_bus));
|
||||
|
|
|
@ -339,7 +339,7 @@ static int i2s_cavs_configure(struct device *dev, enum i2s_dir dir,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (i2s_cfg->frame_clk_freq == 0) {
|
||||
if (i2s_cfg->frame_clk_freq == 0U) {
|
||||
LOG_ERR("Invalid frame_clk_freq %u",
|
||||
i2s_cfg->frame_clk_freq);
|
||||
return -EINVAL;
|
||||
|
@ -444,8 +444,8 @@ static int i2s_cavs_configure(struct device *dev, enum i2s_dir dir,
|
|||
* In addition, double M so that it can be later divided by 2
|
||||
* to get an approximately 50% duty cycle clock
|
||||
*/
|
||||
i2s_m = (bit_clk_freq << 1) / 100;
|
||||
i2s_n = mclk / 100;
|
||||
i2s_m = (bit_clk_freq << 1) / 100U;
|
||||
i2s_n = mclk / 100U;
|
||||
|
||||
/* set divider value of 1 which divides the clock by 2 */
|
||||
mdiv = 1U;
|
||||
|
@ -521,7 +521,7 @@ static int i2s_cavs_configure(struct device *dev, enum i2s_dir dir,
|
|||
mn_div->nval = I2S_MNVAL(i2s_n);
|
||||
|
||||
/* Set up DMA channel parameters */
|
||||
word_size_bytes = (word_size_bits + 7) / 8;
|
||||
word_size_bytes = (word_size_bits + 7) / 8U;
|
||||
dev_data->tx.dma_cfg.source_data_size = word_size_bytes;
|
||||
dev_data->tx.dma_cfg.dest_data_size = word_size_bytes;
|
||||
dev_data->rx.dma_cfg.source_data_size = word_size_bytes;
|
||||
|
|
|
@ -33,7 +33,7 @@ LOG_MODULE_REGISTER(i2s_ll_stm32);
|
|||
|
||||
static unsigned int div_round_closest(u32_t dividend, u32_t divisor)
|
||||
{
|
||||
return (dividend + (divisor / 2)) / divisor;
|
||||
return (dividend + (divisor / 2U)) / divisor;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -204,7 +204,7 @@ static int i2s_stm32_configure(struct device *dev, enum i2s_dir dir,
|
|||
stream->master = false;
|
||||
}
|
||||
|
||||
if (i2s_cfg->frame_clk_freq == 0) {
|
||||
if (i2s_cfg->frame_clk_freq == 0U) {
|
||||
stream->queue_drop(stream);
|
||||
memset(&stream->cfg, 0, sizeof(struct i2s_config));
|
||||
stream->state = I2S_STATE_NOT_READY;
|
||||
|
@ -223,11 +223,11 @@ static int i2s_stm32_configure(struct device *dev, enum i2s_dir dir,
|
|||
}
|
||||
|
||||
/* set I2S Data Format */
|
||||
if (i2s_cfg->word_size == 16) {
|
||||
if (i2s_cfg->word_size == 16U) {
|
||||
LL_I2S_SetDataFormat(cfg->i2s, LL_I2S_DATAFORMAT_16B);
|
||||
} else if (i2s_cfg->word_size == 24) {
|
||||
} else if (i2s_cfg->word_size == 24U) {
|
||||
LL_I2S_SetDataFormat(cfg->i2s, LL_I2S_DATAFORMAT_24B);
|
||||
} else if (i2s_cfg->word_size == 32) {
|
||||
} else if (i2s_cfg->word_size == 32U) {
|
||||
LL_I2S_SetDataFormat(cfg->i2s, LL_I2S_DATAFORMAT_32B);
|
||||
} else {
|
||||
LOG_ERR("invalid word size");
|
||||
|
|
|
@ -338,7 +338,7 @@ static int set_rx_data_format(const struct i2s_sam_dev_cfg *const dev_cfg,
|
|||
break;
|
||||
|
||||
case I2S_FMT_DATA_FORMAT_PCM_LONG:
|
||||
fslen = num_words * word_size_bits / 2 - 1;
|
||||
fslen = num_words * word_size_bits / 2U - 1;
|
||||
|
||||
ssc_rcmr = (pin_rf_en ? SSC_RCMR_START_RF_RISING : 0)
|
||||
| SSC_RCMR_STTDLY(0);
|
||||
|
@ -348,7 +348,7 @@ static int set_rx_data_format(const struct i2s_sam_dev_cfg *const dev_cfg,
|
|||
break;
|
||||
|
||||
case I2S_FMT_DATA_FORMAT_LEFT_JUSTIFIED:
|
||||
fslen = num_words * word_size_bits / 2 - 1;
|
||||
fslen = num_words * word_size_bits / 2U - 1;
|
||||
|
||||
ssc_rcmr = SSC_RCMR_CKI
|
||||
| (pin_rf_en ? SSC_RCMR_START_RF_RISING : 0)
|
||||
|
@ -377,7 +377,7 @@ static int set_rx_data_format(const struct i2s_sam_dev_cfg *const dev_cfg,
|
|||
* frame period is an odd number set it to be one bit longer.
|
||||
*/
|
||||
ssc_rcmr |= (pin_rf_en ? 0 : SSC_RCMR_START_TRANSMIT)
|
||||
| SSC_RCMR_PERIOD((num_words * word_size_bits + 1) / 2 - 1);
|
||||
| SSC_RCMR_PERIOD((num_words * word_size_bits + 1) / 2U - 1);
|
||||
|
||||
/* Receive Clock Mode Register */
|
||||
ssc->SSC_RCMR = ssc_rcmr;
|
||||
|
@ -426,7 +426,7 @@ static int set_tx_data_format(const struct i2s_sam_dev_cfg *const dev_cfg,
|
|||
break;
|
||||
|
||||
case I2S_FMT_DATA_FORMAT_PCM_LONG:
|
||||
fslen = num_words * word_size_bits / 2 - 1;
|
||||
fslen = num_words * word_size_bits / 2U - 1;
|
||||
|
||||
ssc_tcmr = SSC_TCMR_CKI
|
||||
| SSC_TCMR_START_TF_RISING
|
||||
|
@ -436,7 +436,7 @@ static int set_tx_data_format(const struct i2s_sam_dev_cfg *const dev_cfg,
|
|||
break;
|
||||
|
||||
case I2S_FMT_DATA_FORMAT_LEFT_JUSTIFIED:
|
||||
fslen = num_words * word_size_bits / 2 - 1;
|
||||
fslen = num_words * word_size_bits / 2U - 1;
|
||||
|
||||
ssc_tcmr = SSC_TCMR_START_TF_RISING
|
||||
| SSC_TCMR_STTDLY(0);
|
||||
|
@ -457,7 +457,7 @@ static int set_tx_data_format(const struct i2s_sam_dev_cfg *const dev_cfg,
|
|||
? SSC_TCMR_CKS_TK : SSC_TCMR_CKS_MCK)
|
||||
| ((i2s_cfg->options & I2S_OPT_BIT_CLK_GATED)
|
||||
? SSC_TCMR_CKO_TRANSFER : SSC_TCMR_CKO_CONTINUOUS)
|
||||
| SSC_TCMR_PERIOD((num_words * word_size_bits + 1) / 2 - 1);
|
||||
| SSC_TCMR_PERIOD((num_words * word_size_bits + 1) / 2U - 1);
|
||||
|
||||
/* Transmit Clock Mode Register */
|
||||
ssc->SSC_TCMR = ssc_tcmr;
|
||||
|
@ -483,19 +483,19 @@ static int set_tx_data_format(const struct i2s_sam_dev_cfg *const dev_cfg,
|
|||
/* Calculate number of bytes required to store a word of bit_size length */
|
||||
static u8_t get_word_size_bytes(u8_t bit_size)
|
||||
{
|
||||
u8_t byte_size_min = (bit_size + 7) / 8;
|
||||
u8_t byte_size_min = (bit_size + 7) / 8U;
|
||||
u8_t byte_size;
|
||||
|
||||
byte_size = (byte_size_min == 3) ? 4 : byte_size_min;
|
||||
byte_size = (byte_size_min == 3U) ? 4 : byte_size_min;
|
||||
|
||||
return byte_size;
|
||||
}
|
||||
|
||||
static int bit_clock_set(Ssc *const ssc, u32_t bit_clk_freq)
|
||||
{
|
||||
u32_t clk_div = SOC_ATMEL_SAM_MCK_FREQ_HZ / bit_clk_freq / 2;
|
||||
u32_t clk_div = SOC_ATMEL_SAM_MCK_FREQ_HZ / bit_clk_freq / 2U;
|
||||
|
||||
if (clk_div == 0 || clk_div >= (1 << 12)) {
|
||||
if (clk_div == 0U || clk_div >= (1 << 12)) {
|
||||
LOG_ERR("Invalid bit clock frequency");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -554,7 +554,7 @@ static int i2s_sam_configure(struct device *dev, enum i2s_dir dir,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (i2s_cfg->frame_clk_freq == 0) {
|
||||
if (i2s_cfg->frame_clk_freq == 0U) {
|
||||
stream->queue_drop(stream);
|
||||
(void)memset(&stream->cfg, 0, sizeof(struct i2s_config));
|
||||
stream->state = I2S_STATE_NOT_READY;
|
||||
|
|
|
@ -282,7 +282,7 @@ static u32_t rf_evaluate_freq_setting(struct cc1200_context *ctx, u32_t chan)
|
|||
u32_t rf, lo_div;
|
||||
|
||||
rf = ctx->rf_settings->chan_center_freq0 +
|
||||
((chan * (u32_t)ctx->rf_settings->channel_spacing) / 10);
|
||||
((chan * (u32_t)ctx->rf_settings->channel_spacing) / 10U);
|
||||
lo_div = get_lo_divider(ctx);
|
||||
|
||||
LOG_DBG("Calculating freq for %u KHz RF (%u)", rf, lo_div);
|
||||
|
@ -298,23 +298,23 @@ static u32_t rf_evaluate_freq_setting(struct cc1200_context *ctx, u32_t chan)
|
|||
}
|
||||
|
||||
if (hz < 1000) {
|
||||
freq_tmp = (hz * lo_div * 65536) / xtal;
|
||||
freq_tmp = (hz * lo_div * 65536U) / xtal;
|
||||
} else {
|
||||
freq_tmp = ((hz * lo_div) / xtal) * 65536;
|
||||
freq_tmp = ((hz * lo_div) / xtal) * 65536U;
|
||||
}
|
||||
|
||||
rst = freq_tmp % factor;
|
||||
freq_tmp /= factor;
|
||||
|
||||
if (factor > 1 && (rst/(factor/10)) > 5) {
|
||||
if (factor > 1 && (rst/(factor/10U)) > 5) {
|
||||
freq_tmp++;
|
||||
}
|
||||
|
||||
freq += freq_tmp;
|
||||
|
||||
factor *= 10;
|
||||
mult_10 /= 10;
|
||||
xtal /= 10;
|
||||
factor *= 10U;
|
||||
mult_10 /= 10U;
|
||||
xtal /= 10U;
|
||||
rf -= hz;
|
||||
}
|
||||
|
||||
|
@ -751,7 +751,7 @@ static int configure_spi(struct device *dev)
|
|||
}
|
||||
|
||||
cs_ctrl.gpio_pin = DT_IEEE802154_CC1200_GPIO_SPI_CS_PIN;
|
||||
cs_ctrl.delay = 0;
|
||||
cs_ctrl.delay = 0U;
|
||||
|
||||
cc1200->spi_cfg.cs = &cs_ctrl;
|
||||
|
||||
|
|
|
@ -574,12 +574,12 @@ static inline void insert_radio_noise_details(struct net_pkt *pkt, u8_t *buf)
|
|||
* lqi = (lqi - 50) * 4
|
||||
*/
|
||||
lqi = buf[1] & CC2520_FCS_CORRELATION;
|
||||
if (lqi <= 50) {
|
||||
if (lqi <= 50U) {
|
||||
lqi = 0U;
|
||||
} else if (lqi >= 110) {
|
||||
} else if (lqi >= 110U) {
|
||||
lqi = 255U;
|
||||
} else {
|
||||
lqi = (lqi - 50) << 2;
|
||||
lqi = (lqi - 50U) << 2;
|
||||
}
|
||||
|
||||
net_pkt_set_ieee802154_lqi(pkt, lqi);
|
||||
|
@ -645,7 +645,7 @@ static void cc2520_rx(int arg)
|
|||
}
|
||||
|
||||
if (!IS_ENABLED(CONFIG_IEEE802154_RAW_MODE)) {
|
||||
pkt_len -= 2;
|
||||
pkt_len -= 2U;
|
||||
}
|
||||
|
||||
if (!read_rxfifo_content(cc2520, pkt->buffer, pkt_len)) {
|
||||
|
@ -992,7 +992,7 @@ static inline int configure_spi(struct device *dev)
|
|||
}
|
||||
|
||||
cs_ctrl.gpio_pin = DT_TI_CC2520_0_CS_GPIO_PIN;
|
||||
cs_ctrl.delay = 0;
|
||||
cs_ctrl.delay = 0U;
|
||||
|
||||
cc2520->spi_cfg.cs = &cs_ctrl;
|
||||
|
||||
|
@ -1375,7 +1375,7 @@ static int cc2520_crypto_begin_session(struct device *dev,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ctx->mode_params.ccm_info.nonce_len != 13) {
|
||||
if (ctx->mode_params.ccm_info.nonce_len != 13U) {
|
||||
LOG_ERR("Nonce length erroneous (%u)",
|
||||
ctx->mode_params.ccm_info.nonce_len);
|
||||
return -EINVAL;
|
||||
|
|
|
@ -465,7 +465,7 @@ static int kw41z_stop(struct device *dev)
|
|||
|
||||
static u8_t kw41z_convert_lqi(u8_t hw_lqi)
|
||||
{
|
||||
if (hw_lqi >= 220) {
|
||||
if (hw_lqi >= 220U) {
|
||||
return 255;
|
||||
} else {
|
||||
return (51 * hw_lqi) / 44;
|
||||
|
@ -502,8 +502,8 @@ static inline void kw41z_rx(struct kw41z_context *kw41z, u8_t len)
|
|||
#if CONFIG_SOC_MKW41Z4
|
||||
/* PKT_BUFFER_RX needs to be accessed aligned to 16 bits */
|
||||
for (u16_t reg_val = 0, i = 0; i < pkt_len; i++) {
|
||||
if (i % 2 == 0) {
|
||||
reg_val = ZLL->PKT_BUFFER_RX[i/2];
|
||||
if (i % 2 == 0U) {
|
||||
reg_val = ZLL->PKT_BUFFER_RX[i/2U];
|
||||
buf->data[i] = reg_val & 0xFF;
|
||||
} else {
|
||||
buf->data[i] = reg_val >> 8;
|
||||
|
@ -514,7 +514,7 @@ static inline void kw41z_rx(struct kw41z_context *kw41z, u8_t len)
|
|||
for (u32_t reg_val = 0, i = 0; i < pkt_len; i++) {
|
||||
switch (i % 4) {
|
||||
case 0:
|
||||
reg_val = ZLL->PKT_BUFFER[i/4];
|
||||
reg_val = ZLL->PKT_BUFFER[i/4U];
|
||||
buf->data[i] = reg_val & 0xFF;
|
||||
break;
|
||||
case 1:
|
||||
|
@ -704,7 +704,7 @@ static void kw41z_isr(int unused)
|
|||
* frame, 1 frame length, 2 frame control,
|
||||
* 1 sequence, 2 FCS. Times two to convert to symbols.
|
||||
*/
|
||||
rx_len = rx_len * 2 + 12 + 22 + 2;
|
||||
rx_len = rx_len * 2U + 12 + 22 + 2;
|
||||
kw41z_tmr3_set_timeout(rx_len);
|
||||
}
|
||||
restart_rx = 0U;
|
||||
|
@ -765,7 +765,7 @@ static void kw41z_isr(int unused)
|
|||
ZLL_IRQSTS_RX_FRAME_LENGTH_SHIFT;
|
||||
|
||||
if (irqsts & ZLL_IRQSTS_RXIRQ_MASK) {
|
||||
if (rx_len != 0) {
|
||||
if (rx_len != 0U) {
|
||||
kw41z_rx(&kw41z_context_data,
|
||||
rx_len);
|
||||
}
|
||||
|
|
|
@ -920,7 +920,7 @@ static int mcr20a_set_channel(struct device *dev, u16_t channel)
|
|||
}
|
||||
|
||||
LOG_DBG("%u", channel);
|
||||
channel -= 11;
|
||||
channel -= 11U;
|
||||
buf[0] = set_bits_pll_int0_val(pll_int_lt[channel]);
|
||||
buf[1] = (u8_t)pll_frac_lt[channel];
|
||||
buf[2] = (u8_t)(pll_frac_lt[channel] >> 8);
|
||||
|
@ -1387,7 +1387,7 @@ static inline int configure_spi(struct device *dev)
|
|||
}
|
||||
|
||||
mcr20a->cs_ctrl.gpio_pin = DT_NXP_MCR20A_0_CS_GPIO_PIN;
|
||||
mcr20a->cs_ctrl.delay = 0;
|
||||
mcr20a->cs_ctrl.delay = 0U;
|
||||
|
||||
mcr20a->spi_cfg.cs = &mcr20a->cs_ctrl;
|
||||
|
||||
|
|
|
@ -442,7 +442,7 @@ static int loapic_suspend(struct device *port)
|
|||
*/
|
||||
lvt = LOAPIC_READ(LOAPIC_TIMER + (loapic_irq * 0x10));
|
||||
|
||||
if ((lvt & LOAPIC_LVT_MASKED) == 0) {
|
||||
if ((lvt & LOAPIC_LVT_MASKED) == 0U) {
|
||||
sys_bitfield_set_bit((mem_addr_t)loapic_suspend_buf,
|
||||
loapic_irq);
|
||||
}
|
||||
|
|
|
@ -188,7 +188,7 @@ void __irq_controller_irq_config(unsigned int vector, unsigned int irq,
|
|||
if (irq != CONFIG_MVIC_TIMER_IRQ) {
|
||||
_mvic_rte_set(irq, MVIC_IOWIN_MASK | flags);
|
||||
} else {
|
||||
__ASSERT(flags == 0,
|
||||
__ASSERT(flags == 0U,
|
||||
"Timer interrupt cannot have triggering flags set");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -160,7 +160,7 @@ static void plic_irq_handler(void *arg)
|
|||
* If the IRQ is out of range, call z_irq_spurious.
|
||||
* A call to z_irq_spurious will not return.
|
||||
*/
|
||||
if (irq == 0 || irq >= PLIC_IRQS)
|
||||
if (irq == 0U || irq >= PLIC_IRQS)
|
||||
z_irq_spurious(NULL);
|
||||
|
||||
irq += RISCV_MAX_GENERIC_IRQ;
|
||||
|
|
|
@ -52,7 +52,7 @@ static inline int enable(struct device *dev, struct device *isr_dev)
|
|||
|
||||
for (i = 0U; i < config->client_count; i++) {
|
||||
if (clients->client[i].isr_dev == isr_dev) {
|
||||
clients->client[i].enabled = 1;
|
||||
clients->client[i].enabled = 1U;
|
||||
irq_enable(config->irq_num);
|
||||
return 0;
|
||||
}
|
||||
|
@ -84,7 +84,7 @@ static inline int disable(struct device *dev, struct device *isr_dev)
|
|||
|
||||
for (i = 0U; i < config->client_count; i++) {
|
||||
if (clients->client[i].isr_dev == isr_dev) {
|
||||
clients->client[i].enabled = 0;
|
||||
clients->client[i].enabled = 0U;
|
||||
if (last_enabled_isr(clients, config->client_count)) {
|
||||
irq_disable(config->irq_num);
|
||||
}
|
||||
|
|
|
@ -76,7 +76,7 @@ static int lp3943_get_led_reg(u32_t *led, u8_t *reg)
|
|||
/* Fall through */
|
||||
case 7:
|
||||
*reg = LP3943_LS1;
|
||||
*led -= 4;
|
||||
*led -= 4U;
|
||||
break;
|
||||
case 8:
|
||||
case 9:
|
||||
|
@ -84,7 +84,7 @@ static int lp3943_get_led_reg(u32_t *led, u8_t *reg)
|
|||
/* Fall through */
|
||||
case 11:
|
||||
*reg = LP3943_LS2;
|
||||
*led -= 8;
|
||||
*led -= 8U;
|
||||
break;
|
||||
case 12:
|
||||
case 13:
|
||||
|
@ -92,7 +92,7 @@ static int lp3943_get_led_reg(u32_t *led, u8_t *reg)
|
|||
/* Fall through */
|
||||
case 15:
|
||||
*reg = LP3943_LS3;
|
||||
*led -= 12;
|
||||
*led -= 12U;
|
||||
break;
|
||||
default:
|
||||
LOG_ERR("Invalid LED specified");
|
||||
|
@ -151,7 +151,7 @@ static int lp3943_led_blink(struct device *dev, u32_t led,
|
|||
reg = LP3943_PSC1;
|
||||
}
|
||||
|
||||
val = (period * 255) / dev_data->max_period;
|
||||
val = (period * 255U) / dev_data->max_period;
|
||||
if (i2c_reg_write_byte(data->i2c, CONFIG_LP3943_I2C_ADDRESS,
|
||||
reg, val)) {
|
||||
LOG_ERR("LED write failed");
|
||||
|
@ -192,7 +192,7 @@ static int lp3943_led_set_brightness(struct device *dev, u32_t led,
|
|||
reg = LP3943_PWM1;
|
||||
}
|
||||
|
||||
val = (value * 255) / dev_data->max_brightness;
|
||||
val = (value * 255U) / dev_data->max_brightness;
|
||||
if (i2c_reg_write_byte(data->i2c, CONFIG_LP3943_I2C_ADDRESS,
|
||||
reg, val)) {
|
||||
LOG_ERR("LED write failed");
|
||||
|
|
|
@ -70,7 +70,7 @@ static int pca9633_led_blink(struct device *dev, u32_t led,
|
|||
* (time_on / period) = (GDC / 256) ->
|
||||
* GDC = ((time_on * 256) / period)
|
||||
*/
|
||||
gdc = delay_on * 256 / period;
|
||||
gdc = delay_on * 256U / period;
|
||||
if (i2c_reg_write_byte(data->i2c, CONFIG_PCA9633_I2C_ADDRESS,
|
||||
PCA9633_GRPPWM,
|
||||
gdc)) {
|
||||
|
@ -84,7 +84,7 @@ static int pca9633_led_blink(struct device *dev, u32_t led,
|
|||
* So, period (in ms) = (((GFRQ + 1) / 24) * 1000) ->
|
||||
* GFRQ = ((period * 24 / 1000) - 1)
|
||||
*/
|
||||
gfrq = (period * 24 / 1000) - 1;
|
||||
gfrq = (period * 24U / 1000) - 1;
|
||||
if (i2c_reg_write_byte(data->i2c, CONFIG_PCA9633_I2C_ADDRESS,
|
||||
PCA9633_GRPFREQ,
|
||||
gfrq)) {
|
||||
|
@ -126,7 +126,7 @@ static int pca9633_led_set_brightness(struct device *dev, u32_t led,
|
|||
}
|
||||
|
||||
/* Set the LED brightness value */
|
||||
val = (value * 255) / dev_data->max_brightness;
|
||||
val = (value * 255U) / dev_data->max_brightness;
|
||||
if (i2c_reg_write_byte(data->i2c, CONFIG_PCA9633_I2C_ADDRESS,
|
||||
PCA9633_PWM_BASE + led,
|
||||
val)) {
|
||||
|
|
|
@ -90,13 +90,13 @@ static void intel_gna_interrupt_handler(struct device *dev)
|
|||
pending_resp.response.output_len = pending_req.output_len;
|
||||
pending_resp.callback = pending_req.callback;
|
||||
|
||||
pending_resp.response.stats.cycles_per_sec = 200000000;
|
||||
pending_resp.response.stats.cycles_per_sec = 200000000U;
|
||||
if (regs->gnasts & GNA_STS_STATS_VALID) {
|
||||
pending_resp.response.stats.total_cycles = regs->gnaptc;
|
||||
pending_resp.response.stats.stall_cycles = regs->gnasc;
|
||||
} else {
|
||||
pending_resp.response.stats.total_cycles = 0;
|
||||
pending_resp.response.stats.stall_cycles = 0;
|
||||
pending_resp.response.stats.total_cycles = 0U;
|
||||
pending_resp.response.stats.stall_cycles = 0U;
|
||||
}
|
||||
|
||||
k_msgq_put(&gna->response_queue, &pending_resp, K_NO_WAIT);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue