sys: util: migrate all files to DIV_ROUND_UP

ceiling_fraction is deprecated, use DIV_ROUND_UP.

Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
This commit is contained in:
Gerard Marull-Paretas 2022-11-23 09:55:08 +01:00 committed by Carles Cufí
parent 53da110dbf
commit 0ebe14beb4
54 changed files with 99 additions and 99 deletions

View file

@ -799,7 +799,7 @@ static int adc_npcx_init(const struct device *dev)
}
/* Configure the ADC clock */
prescaler = ceiling_fraction(data->input_clk, NPCX_ADC_CLK);
prescaler = DIV_ROUND_UP(data->input_clk, NPCX_ADC_CLK);
if (prescaler > 0x40) {
prescaler = 0x40;
}

View file

@ -114,8 +114,8 @@ BUILD_ASSERT(INST_0_SCK_FREQUENCY >= (NRF_QSPI_BASE_CLOCK_FREQ / 16),
#else
/* For requested SCK < 32 MHz, use divider /2 for HFCLK192M. */
#define BASE_CLOCK_DIV NRF_CLOCK_HFCLK_DIV_2
#define INST_0_SCK_CFG (ceiling_fraction(NRF_QSPI_BASE_CLOCK_FREQ / 2, \
INST_0_SCK_FREQUENCY) - 1)
#define INST_0_SCK_CFG (DIV_ROUND_UP(NRF_QSPI_BASE_CLOCK_FREQ / 2, \
INST_0_SCK_FREQUENCY) - 1)
#endif
#else
@ -126,7 +126,7 @@ BUILD_ASSERT(INST_0_SCK_FREQUENCY >= (NRF_QSPI_BASE_CLOCK_FREQ / 16),
#if (INST_0_SCK_FREQUENCY >= NRF_QSPI_BASE_CLOCK_FREQ)
#define INST_0_SCK_CFG NRF_QSPI_FREQ_DIV1
#else
#define INST_0_SCK_CFG (ceiling_fraction(NRF_QSPI_BASE_CLOCK_FREQ, \
#define INST_0_SCK_CFG (DIV_ROUND_UP(NRF_QSPI_BASE_CLOCK_FREQ, \
INST_0_SCK_FREQUENCY) - 1)
#endif
@ -1295,7 +1295,7 @@ static int enter_dpd(const struct device *const dev)
if (t_enter_dpd) {
uint32_t t_enter_dpd_us =
ceiling_fraction(t_enter_dpd, NSEC_PER_USEC);
DIV_ROUND_UP(t_enter_dpd, NSEC_PER_USEC);
k_busy_wait(t_enter_dpd_us);
}
@ -1321,7 +1321,7 @@ static int exit_dpd(const struct device *const dev)
if (t_exit_dpd) {
uint32_t t_exit_dpd_us =
ceiling_fraction(t_exit_dpd, NSEC_PER_USEC);
DIV_ROUND_UP(t_exit_dpd, NSEC_PER_USEC);
k_busy_wait(t_exit_dpd_us);
}

View file

@ -685,10 +685,10 @@ static const struct flash_driver_api spi_flash_at45_api = {
.sector_size = DT_INST_PROP(idx, sector_size), \
.block_size = DT_INST_PROP(idx, block_size), \
.page_size = DT_INST_PROP(idx, page_size), \
.t_enter_dpd = ceiling_fraction( \
.t_enter_dpd = DIV_ROUND_UP( \
DT_INST_PROP(idx, enter_dpd_delay), \
NSEC_PER_USEC), \
.t_exit_dpd = ceiling_fraction( \
.t_exit_dpd = DIV_ROUND_UP( \
DT_INST_PROP(idx, exit_dpd_delay), \
NSEC_PER_USEC), \
.use_udpd = DT_INST_PROP(idx, use_udpd), \

View file

@ -46,17 +46,17 @@ LOG_MODULE_REGISTER(spi_nor, CONFIG_FLASH_LOG_LEVEL);
#define SPI_NOR_MAX_ADDR_WIDTH 4
#if DT_INST_NODE_HAS_PROP(0, t_enter_dpd)
#define T_DP_MS ceiling_fraction(DT_INST_PROP(0, t_enter_dpd), NSEC_PER_MSEC)
#define T_DP_MS DIV_ROUND_UP(DT_INST_PROP(0, t_enter_dpd), NSEC_PER_MSEC)
#else /* T_ENTER_DPD */
#define T_DP_MS 0
#endif /* T_ENTER_DPD */
#if DT_INST_NODE_HAS_PROP(0, t_exit_dpd)
#define T_RES1_MS ceiling_fraction(DT_INST_PROP(0, t_exit_dpd), NSEC_PER_MSEC)
#define T_RES1_MS DIV_ROUND_UP(DT_INST_PROP(0, t_exit_dpd), NSEC_PER_MSEC)
#endif /* T_EXIT_DPD */
#if DT_INST_NODE_HAS_PROP(0, dpd_wakeup_sequence)
#define T_DPDD_MS ceiling_fraction(DT_INST_PROP_BY_IDX(0, dpd_wakeup_sequence, 0), NSEC_PER_MSEC)
#define T_CRDP_MS ceiling_fraction(DT_INST_PROP_BY_IDX(0, dpd_wakeup_sequence, 1), NSEC_PER_MSEC)
#define T_RDP_MS ceiling_fraction(DT_INST_PROP_BY_IDX(0, dpd_wakeup_sequence, 2), NSEC_PER_MSEC)
#define T_DPDD_MS DIV_ROUND_UP(DT_INST_PROP_BY_IDX(0, dpd_wakeup_sequence, 0), NSEC_PER_MSEC)
#define T_CRDP_MS DIV_ROUND_UP(DT_INST_PROP_BY_IDX(0, dpd_wakeup_sequence, 1), NSEC_PER_MSEC)
#define T_RDP_MS DIV_ROUND_UP(DT_INST_PROP_BY_IDX(0, dpd_wakeup_sequence, 2), NSEC_PER_MSEC)
#else /* DPD_WAKEUP_SEQUENCE */
#define T_DPDD_MS 0
#endif /* DPD_WAKEUP_SEQUENCE */

View file

@ -381,7 +381,7 @@ static int fpga_ice40_load_spi(const struct device *dev, uint32_t *image_ptr, ui
LOG_DBG("Send %u clocks", config->leading_clocks);
tx_buf.buf = clock_buf;
tx_buf.len = ceiling_fraction(config->leading_clocks, BITS_PER_BYTE);
tx_buf.len = DIV_ROUND_UP(config->leading_clocks, BITS_PER_BYTE);
ret = spi_write_dt(&config->bus, &tx_bufs);
if (ret < 0) {
LOG_ERR("Failed to send leading %u clocks: %d", config->leading_clocks, ret);
@ -413,7 +413,7 @@ static int fpga_ice40_load_spi(const struct device *dev, uint32_t *image_ptr, ui
LOG_DBG("Send %u clocks", config->trailing_clocks);
tx_buf.buf = clock_buf;
tx_buf.len = ceiling_fraction(config->trailing_clocks, BITS_PER_BYTE);
tx_buf.len = DIV_ROUND_UP(config->trailing_clocks, BITS_PER_BYTE);
ret = spi_write_dt(&config->bus, &tx_bufs);
if (ret < 0) {
LOG_ERR("Failed to send trailing %u clocks: %d", config->trailing_clocks, ret);

View file

@ -46,7 +46,7 @@ static int lpd880x_update(const struct device *dev, void *data, size_t size)
* a zero byte propagates through at most 32 LED driver ICs.
* The LPD8803 is the worst case, at 3 output channels per IC.
*/
uint8_t reset_size = ceiling_fraction(ceiling_fraction(size, 3), 32);
uint8_t reset_size = DIV_ROUND_UP(DIV_ROUND_UP(size, 3), 32);
uint8_t reset_buf[reset_size];
uint8_t last = 0x00;
const struct spi_buf bufs[3] = {

View file

@ -153,7 +153,7 @@ static int sys_mm_drv_hpsram_pwr(uint32_t bank_idx, bool enable, bool non_blocki
static void sys_mm_drv_report_page_usage(void)
{
/* PMC uses 32 KB banks */
uint32_t pmc_banks = ceiling_fraction(used_pages, KB(32) / CONFIG_MM_DRV_PAGE_SIZE);
uint32_t pmc_banks = DIV_ROUND_UP(used_pages, KB(32) / CONFIG_MM_DRV_PAGE_SIZE);
if (used_pmc_banks_reported != pmc_banks) {
if (!adsp_comm_widget_pmc_send_ipc(pmc_banks)) {

View file

@ -80,7 +80,7 @@ static int peci_npcx_configure(const struct device *dev, uint32_t bitrate)
* The unit of the bitrate is in Kbps, need to convert it to bps when
* calculate the divider
*/
bit_rate_divider = ceiling_fraction(data->peci_src_clk_freq, bitrate * 1000 * 4) - 1;
bit_rate_divider = DIV_ROUND_UP(data->peci_src_clk_freq, bitrate * 1000 * 4) - 1;
/*
* Make sure the divider doesn't exceed the max valid value and is not lower than the
* minimal valid value.

View file

@ -113,7 +113,7 @@ static int pwm_npcx_set_cycles(const struct device *dev, uint32_t channel,
* maximum pwm period cycles and won't exceed it.
* Then prescaler = ceil (period_cycles / pwm_max_period_cycles)
*/
prescaler = ceiling_fraction(period_cycles, NPCX_PWM_MAX_PERIOD_CYCLES);
prescaler = DIV_ROUND_UP(period_cycles, NPCX_PWM_MAX_PERIOD_CYCLES);
if (prescaler > NPCX_PWM_MAX_PRESCALER) {
return -EINVAL;
}

View file

@ -181,7 +181,7 @@ static int pca9685_set_cycles(const struct device *dev,
return -EINVAL;
}
pre_scale = ceiling_fraction((int64_t)period_count, PWM_STEPS) - 1;
pre_scale = DIV_ROUND_UP((int64_t)period_count, PWM_STEPS) - 1;
if (pre_scale < PRE_SCALE_MIN) {
LOG_ERR("period_count %u < %u (min)", period_count,
@ -203,7 +203,7 @@ static int pca9685_set_cycles(const struct device *dev,
}
/* Adjust PWM output for the resolution of the PCA9685 */
led_off_count = ceiling_fraction(pulse_count * PWM_STEPS, period_count);
led_off_count = DIV_ROUND_UP(pulse_count * PWM_STEPS, period_count);
buf[0] = ADDR_LED_ON_L(channel);
if (led_off_count == 0) {

View file

@ -53,7 +53,7 @@ LOG_MODULE_REGISTER(itim, LOG_LEVEL_ERR);
/ CONFIG_SYS_CLOCK_TICKS_PER_SEC)
#define SYS_CYCLES_PER_USEC (sys_clock_hw_cycles_per_sec() / 1000000)
#define EVT_CYCLES_FROM_TICKS(ticks) \
ceiling_fraction(ticks * EVT_CYCLES_PER_SEC, \
DIV_ROUND_UP(ticks * EVT_CYCLES_PER_SEC, \
CONFIG_SYS_CLOCK_TICKS_PER_SEC)
#define NPCX_ITIM_CLK_SEL_DELAY 92 /* Delay for clock selection (Unit:us) */
/* Timeout for enabling ITIM module: 100us (Unit:cycles) */

View file

@ -630,7 +630,7 @@ void sys_clock_set_timeout(int32_t ticks, bool idle)
* the requested ticks have passed starting now.
*/
cyc += unannounced;
cyc = ceiling_fraction(cyc, CYC_PER_TICK) * CYC_PER_TICK;
cyc = DIV_ROUND_UP(cyc, CYC_PER_TICK) * CYC_PER_TICK;
/* Due to elapsed time the calculation above might produce a
* duration that laps the counter. Don't let it.

View file

@ -236,11 +236,11 @@ static int wdt_npcx_setup(const struct device *dev, uint8_t options)
* One clock period of T0 timer is 32/32.768 KHz = 0.976 ms.
* Then the counter value is timeout/0.976 - 1.
*/
inst->TWDT0 = MAX(ceiling_fraction(data->timeout * NPCX_WDT_CLK,
inst->TWDT0 = MAX(DIV_ROUND_UP(data->timeout * NPCX_WDT_CLK,
32 * 1000) - 1, 1);
/* Configure 8-bit watchdog counter */
inst->WDCNT = MIN(ceiling_fraction(data->timeout, 32) +
inst->WDCNT = MIN(DIV_ROUND_UP(data->timeout, 32) +
CONFIG_WDT_NPCX_DELAY_CYCLES, 0xff);
LOG_DBG("WDT setup: TWDT0, WDCNT are %d, %d", inst->TWDT0, inst->WDCNT);

View file

@ -106,7 +106,7 @@ struct bt_mesh_blob_block {
/** Number of chunks in block. */
uint16_t chunk_count;
/** Bitmap of missing chunks. */
uint8_t missing[ceiling_fraction(CONFIG_BT_MESH_BLOB_CHUNK_COUNT_MAX,
uint8_t missing[DIV_ROUND_UP(CONFIG_BT_MESH_BLOB_CHUNK_COUNT_MAX,
8)];
};

View file

@ -43,7 +43,7 @@ struct bt_mesh_blob_target_pull {
int64_t block_report_timestamp;
/** Missing chunks reported by this Target node. */
uint8_t missing[ceiling_fraction(CONFIG_BT_MESH_BLOB_CHUNK_COUNT_MAX, 8)];
uint8_t missing[DIV_ROUND_UP(CONFIG_BT_MESH_BLOB_CHUNK_COUNT_MAX, 8)];
};
/** BLOB Transfer Client Target node. */

View file

@ -29,7 +29,7 @@ struct bt_mesh_blob_srv;
*/
#if defined(CONFIG_BT_MESH_BLOB_SRV)
#define BT_MESH_BLOB_BLOCKS_MAX \
(ceiling_fraction(CONFIG_BT_MESH_BLOB_SIZE_MAX, \
(DIV_ROUND_UP(CONFIG_BT_MESH_BLOB_SIZE_MAX, \
CONFIG_BT_MESH_BLOB_BLOCK_SIZE_MIN))
#else
#define BT_MESH_BLOB_BLOCKS_MAX 1

View file

@ -182,7 +182,7 @@ struct can_frame {
/** The frame payload data. */
union {
uint8_t data[CAN_MAX_DLEN];
uint32_t data_32[ceiling_fraction(CAN_MAX_DLEN, sizeof(uint32_t))];
uint32_t data_32[DIV_ROUND_UP(CAN_MAX_DLEN, sizeof(uint32_t))];
};
};

View file

@ -157,8 +157,8 @@ enum z_log_msg_mode {
#ifdef CONFIG_LOG_USE_VLA
#define Z_LOG_MSG_ON_STACK_ALLOC(ptr, len) \
long long _ll_buf[ceiling_fraction(len, sizeof(long long))]; \
long double _ld_buf[ceiling_fraction(len, sizeof(long double))]; \
long long _ll_buf[DIV_ROUND_UP(len, sizeof(long long))]; \
long double _ld_buf[DIV_ROUND_UP(len, sizeof(long double))]; \
ptr = (sizeof(long double) == Z_LOG_MSG_ALIGNMENT) ? \
(struct log_msg *)_ld_buf : (struct log_msg *)_ll_buf; \
if (IS_ENABLED(CONFIG_LOG_TEST_CLEAR_MESSAGE_SPACE)) { \
@ -208,7 +208,7 @@ enum z_log_msg_mode {
(offsetof(struct log_msg, data) + pkg_len + (data_len))
#define Z_LOG_MSG_ALIGNED_WLEN(pkg_len, data_len) \
ceiling_fraction(ROUND_UP(Z_LOG_MSG_LEN(pkg_len, data_len), \
DIV_ROUND_UP(ROUND_UP(Z_LOG_MSG_LEN(pkg_len, data_len), \
Z_LOG_MSG_ALIGNMENT), \
sizeof(uint32_t))

View file

@ -215,7 +215,7 @@ struct sys_hashmap_config {
#define SYS_HASHMAP_CONFIG(_max_size, _load_factor) \
{ \
.max_size = (size_t)_max_size, .load_factor = (uint8_t)_load_factor, \
.initial_n_buckets = NHPOT(ceiling_fraction(100, _load_factor)), \
.initial_n_buckets = NHPOT(DIV_ROUND_UP(100, _load_factor)), \
}
/**

View file

@ -205,8 +205,8 @@ static inline int linear_range_get_index(const struct linear_range *r,
if (r->step == 0U) {
*idx = r->min_idx;
} else {
*idx = r->min_idx + ceiling_fraction((uint32_t)(val - r->min),
r->step);
*idx = r->min_idx + DIV_ROUND_UP((uint32_t)(val - r->min),
r->step);
}
return 0;
@ -286,7 +286,7 @@ static inline int linear_range_get_win_index(const struct linear_range *r,
return 0;
}
*idx = r->min_idx + ceiling_fraction((uint32_t)(val_min - r->min), r->step);
*idx = r->min_idx + DIV_ROUND_UP((uint32_t)(val_min - r->min), r->step);
if ((r->min + r->step * (*idx - r->min_idx)) > val_max) {
return -EINVAL;
}

View file

@ -77,7 +77,7 @@ static TIME_CONSTEXPR inline int sys_clock_hw_cycles_per_sec(void)
* @retval false Use algorithm preventing overflow of intermediate value.
*/
#define Z_TMCVT_USE_FAST_ALGO(from_hz, to_hz) \
((ceiling_fraction(CONFIG_SYS_CLOCK_MAX_TIMEOUT_DAYS * 24ULL * 3600ULL * from_hz, \
((DIV_ROUND_UP(CONFIG_SYS_CLOCK_MAX_TIMEOUT_DAYS * 24ULL * 3600ULL * from_hz, \
UINT32_MAX) * to_hz) <= UINT32_MAX)
/* Time converter generator gadget. Selects from one of three

View file

@ -826,7 +826,7 @@ static char *encode_uint(uint_value_type value,
/* Number of hex "digits" in the fractional part of an IEEE 754-2008
* double precision float.
*/
#define FRACTION_HEX ceiling_fraction(FRACTION_BITS, 4)
#define FRACTION_HEX DIV_ROUND_UP(FRACTION_BITS, 4)
/* Number of bits in the exponent of an IEEE 754-2008 double precision
* float.

View file

@ -50,7 +50,7 @@ int nanosleep(const struct timespec *rqtp, struct timespec *rmtp)
}
/* TODO: improve upper bound when hr timers are available */
us = ceiling_fraction(ns, NSEC_PER_USEC);
us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
do {
us = k_usleep(us);
} while (us != 0);

View file

@ -94,7 +94,7 @@ static inline uint8_t model_time_encode(int32_t ms)
continue;
}
uint8_t steps = ceiling_fraction(ms, time_res[i]);
uint8_t steps = DIV_ROUND_UP(ms, time_res[i]);
return steps | (i << 6);
}

View file

@ -126,7 +126,7 @@ static bool nrf53_anomaly_160_check(void)
{
/* System clock cycles needed to cover 200 us window. */
const uint32_t window_cycles =
ceiling_fraction(200 * CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC,
DIV_ROUND_UP(200 * CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC,
1000000);
static uint32_t timestamps[5];
static bool timestamps_filled;

View file

@ -107,7 +107,7 @@ __imr void hp_sram_init(uint32_t memory_size)
* Calculate total number of used SRAM banks (EBB)
* to power up only necessary banks
*/
ebb_in_use = ceiling_fraction(memory_size, SRAM_BANK_SIZE);
ebb_in_use = DIV_ROUND_UP(memory_size, SRAM_BANK_SIZE);
hp_sram_pm_banks(ebb_in_use);

View file

@ -89,7 +89,7 @@ static inline bool isr_rx_ci_adva_check(uint8_t tx_addr, uint8_t *addr,
#if defined(CONFIG_BT_CTLR_ADV_EXT)
#define PAYLOAD_BASED_FRAG_COUNT \
ceiling_fraction(CONFIG_BT_CTLR_ADV_DATA_LEN_MAX, \
DIV_ROUND_UP(CONFIG_BT_CTLR_ADV_DATA_LEN_MAX, \
PDU_AC_PAYLOAD_SIZE_MAX)
#define PAYLOAD_FRAG_COUNT MAX(PAYLOAD_BASED_FRAG_COUNT, BT_CTLR_DF_PER_ADV_CTE_NUM_MAX)
#define BT_CTLR_ADV_AUX_SET CONFIG_BT_CTLR_ADV_AUX_SET

View file

@ -1227,7 +1227,7 @@ static void isr_rx_iso_data_valid(const struct lll_sync_iso *const lll,
stream = ull_sync_iso_lll_stream_get(lll->stream_handle[0]);
iso_meta->timestamp = HAL_TICKER_TICKS_TO_US(radio_tmr_start_get()) +
radio_tmr_aa_restore() +
(ceiling_fraction(lll->ptc_curr, lll->bn) *
(DIV_ROUND_UP(lll->ptc_curr, lll->bn) *
lll->pto * lll->iso_interval *
PERIODIC_INT_UNIT_US) -
addr_us_get(lll->phy) -

View file

@ -478,7 +478,7 @@ static uint32_t calculate_tifs(uint8_t len)
* LE Test packet interval: I(L) = ceil((L + 249) / 625) * 625 us
* where L is an LE Test packet length in microseconds unit.
*/
interval = ceiling_fraction((transmit_time + 249), SCAN_INT_UNIT_US) * SCAN_INT_UNIT_US;
interval = DIV_ROUND_UP((transmit_time + 249), SCAN_INT_UNIT_US) * SCAN_INT_UNIT_US;
return interval - transmit_time;
}

View file

@ -79,7 +79,7 @@ static inline bool isr_rx_ci_adva_check(struct pdu_adv *adv,
#if defined(CONFIG_BT_CTLR_ADV_EXT)
#define PAYLOAD_BASED_FRAG_COUNT \
ceiling_fraction(CONFIG_BT_CTLR_ADV_DATA_LEN_MAX, \
DIV_ROUND_UP(CONFIG_BT_CTLR_ADV_DATA_LEN_MAX, \
PDU_AC_PAYLOAD_SIZE_MAX)
#define BT_CTLR_ADV_AUX_SET CONFIG_BT_CTLR_ADV_AUX_SET
#if defined(CONFIG_BT_CTLR_ADV_PERIODIC)

View file

@ -101,7 +101,7 @@ static void isr_tx(void *param)
/* LE Test Packet Interval */
l = radio_tmr_end_get() - radio_tmr_ready_get();
i = ceiling_fraction((l + 249), SCAN_INT_UNIT_US) * SCAN_INT_UNIT_US;
i = DIV_ROUND_UP((l + 249), SCAN_INT_UNIT_US) * SCAN_INT_UNIT_US;
t = radio_tmr_end_get() - l + i;
t -= radio_tx_ready_delay_get(test_phy, test_phy_flags);

View file

@ -1221,7 +1221,7 @@ uint8_t ll_adv_enable(uint8_t enable)
interval_min_us = time_us +
(scan_delay + scan_window) * USEC_PER_MSEC;
if ((interval * SCAN_INT_UNIT_US) < interval_min_us) {
interval = ceiling_fraction(interval_min_us,
interval = DIV_ROUND_UP(interval_min_us,
SCAN_INT_UNIT_US);
}
@ -1470,7 +1470,7 @@ uint8_t ll_adv_enable(uint8_t enable)
* BIG radio events.
*/
aux->interval =
ceiling_fraction(((uint64_t)adv->interval *
DIV_ROUND_UP(((uint64_t)adv->interval *
ADV_INT_UNIT_US) +
HAL_TICKER_TICKS_TO_US(
ULL_ADV_RANDOM_DELAY),

View file

@ -592,7 +592,7 @@ uint8_t ll_adv_aux_ad_data_set(uint8_t handle, uint8_t op, uint8_t frag_pref,
* BIG radio events.
*/
aux->interval =
ceiling_fraction(((uint64_t)adv->interval *
DIV_ROUND_UP(((uint64_t)adv->interval *
ADV_INT_UNIT_US) +
HAL_TICKER_TICKS_TO_US(
ULL_ADV_RANDOM_DELAY),

View file

@ -231,7 +231,7 @@ uint8_t ll_big_create(uint8_t big_handle, uint8_t adv_handle, uint8_t num_bis,
1U;
/* BN (Burst Count), Mandatory BN = 1 */
bn = ceiling_fraction(max_sdu, lll_adv_iso->max_pdu) * sdu_per_event;
bn = DIV_ROUND_UP(max_sdu, lll_adv_iso->max_pdu) * sdu_per_event;
if (bn > PDU_BIG_BN_MAX) {
/* Restrict each BIG event to maximum burst per BIG event */
lll_adv_iso->bn = PDU_BIG_BN_MAX;
@ -239,7 +239,7 @@ uint8_t ll_big_create(uint8_t big_handle, uint8_t adv_handle, uint8_t num_bis,
/* Ceil the required burst count per SDU to next maximum burst
* per BIG event.
*/
bn = ceiling_fraction(bn, PDU_BIG_BN_MAX) * PDU_BIG_BN_MAX;
bn = DIV_ROUND_UP(bn, PDU_BIG_BN_MAX) * PDU_BIG_BN_MAX;
} else {
lll_adv_iso->bn = bn;
}

View file

@ -171,7 +171,7 @@ uint8_t ll_cig_parameters_commit(uint8_t cig_id)
* 10 ms 12.5 ms 40 50 25%
*/
iso_interval_us = cig->c_sdu_interval;
cig->iso_interval = ceiling_fraction(iso_interval_us, ISO_INT_UNIT_US);
cig->iso_interval = DIV_ROUND_UP(iso_interval_us, ISO_INT_UNIT_US);
} else {
iso_interval_us = cig->iso_interval * ISO_INT_UNIT_US;
}
@ -324,7 +324,7 @@ uint8_t ll_cig_parameters_commit(uint8_t cig_id)
if (!cig->central.test) {
#if defined(CONFIG_BT_CTLR_CONN_ISO_LOW_LATENCY_POLICY)
/* Use symmetric flush timeout */
cis->lll.tx.ft = ceiling_fraction(total_time, iso_interval_us);
cis->lll.tx.ft = DIV_ROUND_UP(total_time, iso_interval_us);
cis->lll.rx.ft = cis->lll.tx.ft;
#elif defined(CONFIG_BT_CTLR_CONN_ISO_RELIABILITY_POLICY)
@ -334,26 +334,26 @@ uint8_t ll_cig_parameters_commit(uint8_t cig_id)
* SDU_Interval <= CIG_Sync_Delay
*/
cis->lll.tx.ft =
ceiling_fraction(cig->c_latency - cig->c_sdu_interval -
DIV_ROUND_UP(cig->c_latency - cig->c_sdu_interval -
iso_interval_us, iso_interval_us);
cis->lll.rx.ft =
ceiling_fraction(cig->p_latency - cig->p_sdu_interval -
DIV_ROUND_UP(cig->p_latency - cig->p_sdu_interval -
iso_interval_us, iso_interval_us);
} else {
/* TL = CIG_Sync_Delay + FT x ISO_Interval - SDU_Interval.
* SDU_Interval <= CIG_Sync_Delay
*/
cis->lll.tx.ft =
ceiling_fraction(cig->c_latency + cig->c_sdu_interval -
DIV_ROUND_UP(cig->c_latency + cig->c_sdu_interval -
iso_interval_us, iso_interval_us);
cis->lll.rx.ft =
ceiling_fraction(cig->p_latency + cig->p_sdu_interval -
DIV_ROUND_UP(cig->p_latency + cig->p_sdu_interval -
iso_interval_us, iso_interval_us);
}
#else
LL_ASSERT(0);
#endif
cis->lll.nse = ceiling_fraction(se[i].total_count,
cis->lll.nse = DIV_ROUND_UP(se[i].total_count,
cis->lll.tx.ft);
}
@ -757,17 +757,17 @@ static void set_bn_max_pdu(bool framed, uint32_t iso_interval,
* Continuation header = 2 bytes
* MaxDrift (Max. allowed SDU delivery timing drift) = 100 ppm
*/
max_drift = ceiling_fraction(SDU_MAX_DRIFT_PPM * sdu_interval, 1000000U);
ceil_f = ceiling_fraction(iso_interval + max_drift, sdu_interval);
ceil_f_x_max_sdu = ceiling_fraction(max_sdu * (iso_interval + max_drift),
max_drift = DIV_ROUND_UP(SDU_MAX_DRIFT_PPM * sdu_interval, 1000000U);
ceil_f = DIV_ROUND_UP(iso_interval + max_drift, sdu_interval);
ceil_f_x_max_sdu = DIV_ROUND_UP(max_sdu * (iso_interval + max_drift),
sdu_interval);
/* Strategy: Keep lowest possible BN.
* TODO: Implement other strategies, possibly as policies.
*/
max_pdu_bn1 = ceil_f * 5 + ceil_f_x_max_sdu;
*bn = ceiling_fraction(max_pdu_bn1, LL_CIS_OCTETS_TX_MAX);
*max_pdu = ceiling_fraction(max_pdu_bn1, *bn) + 2;
*bn = DIV_ROUND_UP(max_pdu_bn1, LL_CIS_OCTETS_TX_MAX);
*max_pdu = DIV_ROUND_UP(max_pdu_bn1, *bn) + 2;
} else {
/* For unframed, ISO_Interval must be N x SDU_Interval */
LL_ASSERT(iso_interval % sdu_interval == 0);
@ -775,6 +775,6 @@ static void set_bn_max_pdu(bool framed, uint32_t iso_interval,
/* Core 5.3 Vol 6, Part G section 2.1:
* BN >= ceil(Max_SDU/Max_PDU * ISO_Interval/SDU_Interval)
*/
*bn = ceiling_fraction(max_sdu * iso_interval, (*max_pdu) * sdu_interval);
*bn = DIV_ROUND_UP(max_sdu * iso_interval, (*max_pdu) * sdu_interval);
}
}

View file

@ -2159,7 +2159,7 @@ void ull_conn_update_parameters(struct ll_conn *conn, uint8_t is_cu_proc, uint8_
lll->periph.window_widening_periodic_us * instant_latency;
lll->periph.window_widening_periodic_us =
ceiling_fraction(((lll_clock_ppm_local_get() +
DIV_ROUND_UP(((lll_clock_ppm_local_get() +
lll_clock_ppm_get(conn->periph.sca)) *
conn_interval_us), 1000000U);
lll->periph.window_widening_max_us = (conn_interval_us >> 1U) - EVENT_IFS_US;
@ -2242,7 +2242,7 @@ void ull_conn_update_peer_sca(struct ll_conn *conn)
periodic_us = conn_interval_us;
lll->periph.window_widening_periodic_us =
ceiling_fraction(((lll_clock_ppm_local_get() +
DIV_ROUND_UP(((lll_clock_ppm_local_get() +
lll_clock_ppm_get(conn->periph.sca)) *
conn_interval_us), 1000000U);

View file

@ -681,7 +681,7 @@ void ull_conn_iso_ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
uint32_t iso_interval_us_frac =
EVENT_US_TO_US_FRAC(cig->iso_interval * CONN_INT_UNIT_US);
cig->lll.window_widening_periodic_us_frac =
ceiling_fraction(((lll_clock_ppm_local_get() +
DIV_ROUND_UP(((lll_clock_ppm_local_get() +
lll_clock_ppm_get(cig->sca_update - 1)) *
iso_interval_us_frac),
1000000U);
@ -1285,7 +1285,7 @@ void ull_conn_iso_transmit_test_cig_interval(uint16_t handle, uint32_t ticks_at_
* on 64-bit sdu_counter:
* (39 bits x 22 bits (4x10^6 us) = 61 bits / 8 bits (255 us) = 53 bits)
*/
sdu_counter = ceiling_fraction((cis->lll.event_count + 1U) * iso_interval,
sdu_counter = DIV_ROUND_UP((cis->lll.event_count + 1U) * iso_interval,
sdu_interval);
if (cis->hdr.test_mode.tx_sdu_counter == 0U) {

View file

@ -190,7 +190,7 @@ void ull_periph_setup(struct node_rx_hdr *rx, struct node_rx_ftr *ftr,
/* calculate the window widening */
conn->periph.sca = pdu_adv->connect_ind.sca;
lll->periph.window_widening_periodic_us =
ceiling_fraction(((lll_clock_ppm_local_get() +
DIV_ROUND_UP(((lll_clock_ppm_local_get() +
lll_clock_ppm_get(conn->periph.sca)) *
conn_interval_us), USEC_PER_SEC);
lll->periph.window_widening_max_us = (conn_interval_us >> 1) -

View file

@ -191,7 +191,7 @@ uint8_t ull_peripheral_iso_acquire(struct ll_conn *acl,
cig->lll.window_widening_max_us = (iso_interval_us >> 1) -
EVENT_IFS_US;
cig->lll.window_widening_periodic_us_frac =
ceiling_fraction(((lll_clock_ppm_local_get() +
DIV_ROUND_UP(((lll_clock_ppm_local_get() +
lll_clock_ppm_get(acl->periph.sca)) *
EVENT_US_TO_US_FRAC(iso_interval_us)), USEC_PER_SEC);

View file

@ -758,7 +758,7 @@ void ull_sync_setup(struct ll_scan_set *scan, struct ll_scan_aux_set *aux,
#endif /* CONFIG_BT_CTLR_SYNC_ISO */
lll->window_widening_periodic_us =
ceiling_fraction(((lll_clock_ppm_local_get() +
DIV_ROUND_UP(((lll_clock_ppm_local_get() +
lll_clock_ppm_get(sca)) *
interval_us), USEC_PER_SEC);
lll->window_widening_max_us = (interval_us >> 1) - EVENT_IFS_US;

View file

@ -471,7 +471,7 @@ void ull_sync_iso_setup(struct ll_sync_iso_set *sync_iso,
sca = sync_iso->sync->lll.sca;
lll->window_widening_periodic_us =
ceiling_fraction(((lll_clock_ppm_local_get() +
DIV_ROUND_UP(((lll_clock_ppm_local_get() +
lll_clock_ppm_get(sca)) *
interval_us), USEC_PER_SEC);
lll->window_widening_max_us = (interval_us >> 1) - EVENT_IFS_US;

View file

@ -866,7 +866,7 @@ static void l2cap_chan_rx_init(struct bt_l2cap_le_chan *chan)
if (chan->chan.ops->alloc_buf) {
/* Auto tune credits to receive a full packet */
chan->rx.init_credits =
ceiling_fraction(chan->rx.mtu,
DIV_ROUND_UP(chan->rx.mtu,
BT_L2CAP_RX_MTU);
} else {
chan->rx.init_credits = L2CAP_LE_MAX_CREDITS;

View file

@ -270,7 +270,7 @@ static void block_set(struct bt_mesh_blob_cli *cli, uint16_t block_idx)
cli->block.size = blob_block_size(cli->xfer->size, cli->xfer->block_size_log,
block_idx);
cli->block.chunk_count =
ceiling_fraction(cli->block.size, cli->xfer->chunk_size);
DIV_ROUND_UP(cli->block.size, cli->xfer->chunk_size);
if (cli->xfer->mode == BT_MESH_BLOB_XFER_MODE_PUSH) {
blob_chunk_missing_set_all(&cli->block);
@ -1352,7 +1352,7 @@ static int handle_block_status(struct bt_mesh_model *mod, struct bt_mesh_msg_ctx
status.block.number = net_buf_simple_pull_le16(buf);
chunk_size = net_buf_simple_pull_le16(buf);
status.block.chunk_count =
ceiling_fraction(cli->block.size, chunk_size);
DIV_ROUND_UP(cli->block.size, chunk_size);
LOG_DBG("status: %u block: %u encoding: %u", status.status,
status.block.number, status.missing);
@ -1531,7 +1531,7 @@ int bt_mesh_blob_cli_send(struct bt_mesh_blob_cli *cli,
cli->xfer = xfer;
cli->inputs = inputs;
cli->io = io;
cli->block_count = ceiling_fraction(cli->xfer->size,
cli->block_count = DIV_ROUND_UP(cli->xfer->size,
(1U << cli->xfer->block_size_log));
block_set(cli, 0);

View file

@ -83,7 +83,7 @@ static int block_start(const struct bt_mesh_blob_io *io,
return err;
}
erase_size = page.size * ceiling_fraction(block->size, page.size);
erase_size = page.size * DIV_ROUND_UP(block->size, page.size);
#else
erase_size = block->size;
#endif

View file

@ -47,7 +47,7 @@ static void suspend(struct bt_mesh_blob_srv *srv);
static inline uint32_t block_count_get(const struct bt_mesh_blob_srv *srv)
{
return ceiling_fraction(srv->state.xfer.size,
return DIV_ROUND_UP(srv->state.xfer.size,
(1U << srv->state.xfer.block_size_log));
}
@ -83,7 +83,7 @@ static void store_state(const struct bt_mesh_blob_srv *srv)
}
/* Convert bit count to byte count: */
uint32_t block_len = ceiling_fraction(block_count_get(srv), 8);
uint32_t block_len = DIV_ROUND_UP(block_count_get(srv), 8);
bt_mesh_model_data_store(
srv->mod, false, NULL, &srv->state,
@ -149,7 +149,7 @@ static int pull_req_max(const struct bt_mesh_blob_srv *srv)
#if defined(CONFIG_BT_MESH_LOW_POWER)
/* No point in requesting more than the friend node can hold: */
if (bt_mesh_lpn_established()) {
uint32_t segments_per_chunk = ceiling_fraction(
uint32_t segments_per_chunk = DIV_ROUND_UP(
BLOB_CHUNK_SDU_LEN(srv->state.xfer.chunk_size),
BT_MESH_APP_SEG_SDU_MAX);
@ -310,7 +310,7 @@ static void xfer_status_rsp(struct bt_mesh_blob_srv *srv,
net_buf_simple_add_u8(&buf, srv->state.xfer.block_size_log);
net_buf_simple_add_le16(&buf, srv->state.mtu_size);
net_buf_simple_add_mem(&buf, srv->state.blocks,
ceiling_fraction(block_count_get(srv), 8));
DIV_ROUND_UP(block_count_get(srv), 8));
send:
ctx->send_ttl = srv->state.ttl;
@ -356,12 +356,12 @@ static void block_status_rsp(struct bt_mesh_blob_srv *srv,
if (format == BT_MESH_BLOB_CHUNKS_MISSING_SOME) {
net_buf_simple_add_mem(&buf, srv->block.missing,
ceiling_fraction(srv->block.chunk_count,
DIV_ROUND_UP(srv->block.chunk_count,
8));
LOG_DBG("Bits: %s",
bt_hex(srv->block.missing,
ceiling_fraction(srv->block.chunk_count, 8)));
DIV_ROUND_UP(srv->block.chunk_count, 8)));
} else if (format == BT_MESH_BLOB_CHUNKS_MISSING_ENCODED) {
int count = pull_req_max(srv);
@ -619,11 +619,11 @@ static int handle_block_start(struct bt_mesh_model *mod, struct bt_mesh_msg_ctx
}
if (!chunk_size || chunk_size > max_chunk_size(srv) ||
(ceiling_fraction((1 << srv->state.xfer.block_size_log), chunk_size) >
(DIV_ROUND_UP((1 << srv->state.xfer.block_size_log), chunk_size) >
max_chunk_count(srv))) {
LOG_WRN("Invalid chunk size: (chunk size: %u, max: %u, ceil: %u, count: %u)",
chunk_size, max_chunk_size(srv),
ceiling_fraction((1 << srv->state.xfer.block_size_log), chunk_size),
DIV_ROUND_UP((1 << srv->state.xfer.block_size_log), chunk_size),
max_chunk_count(srv));
status = BT_MESH_BLOB_ERR_INVALID_CHUNK_SIZE;
goto rsp;
@ -632,7 +632,7 @@ static int handle_block_start(struct bt_mesh_model *mod, struct bt_mesh_msg_ctx
srv->block.size = blob_block_size(
srv->state.xfer.size, srv->state.xfer.block_size_log, block_number);
srv->block.number = block_number;
srv->block.chunk_count = ceiling_fraction(srv->block.size, chunk_size);
srv->block.chunk_count = DIV_ROUND_UP(srv->block.size, chunk_size);
srv->state.xfer.chunk_size = chunk_size;
srv->block.offset = block_number * (1UL << srv->state.xfer.block_size_log);

View file

@ -134,9 +134,9 @@ static uint8_t get_progress(const struct bt_mesh_blob_xfer_info *info)
uint8_t blocks_not_rxed_size;
int i;
total_blocks = ceiling_fraction(info->size, 1U << info->block_size_log);
total_blocks = DIV_ROUND_UP(info->size, 1U << info->block_size_log);
blocks_not_rxed_size = ceiling_fraction(total_blocks, 8);
blocks_not_rxed_size = DIV_ROUND_UP(total_blocks, 8);
for (i = 0; i < blocks_not_rxed_size; i++) {
blocks_not_rxed += info->missing_blocks[i % 8] & (1 << (i % 8));

View file

@ -28,7 +28,7 @@ extern "C" {
* object record at a time so the buffer must be a multiple of object record length.
*/
#define OTS_DIR_LIST_BUFFER_SIZE (DIR_LIST_OBJ_RECORD_MAX_SIZE * \
ceiling_fraction(CONFIG_BT_OTS_L2CAP_CHAN_TX_MTU, DIR_LIST_OBJ_RECORD_MAX_SIZE))
DIV_ROUND_UP(CONFIG_BT_OTS_L2CAP_CHAN_TX_MTU, DIR_LIST_OBJ_RECORD_MAX_SIZE))
struct bt_ots_dir_list {
struct net_buf_simple net_buf;

View file

@ -752,7 +752,7 @@ bool z_log_msg_pending(void)
void z_log_msg_enqueue(const struct log_link *link, const void *data, size_t len)
{
struct log_msg *log_msg = (struct log_msg *)data;
size_t wlen = ceiling_fraction(ROUND_UP(len, Z_LOG_MSG_ALIGNMENT), sizeof(int));
size_t wlen = DIV_ROUND_UP(ROUND_UP(len, Z_LOG_MSG_ALIGNMENT), sizeof(int));
struct mpsc_pbuf_buffer *mpsc_pbuffer = link->mpsc_pbuf ? link->mpsc_pbuf : &log_buffer;
struct log_msg *local_msg = msg_alloc(mpsc_pbuffer, wlen);

View file

@ -127,7 +127,7 @@ static atomic_val_t add_drop_msg(void)
union log_frontend_pkt generic_pkt;
struct log_frontend_uart_dropped_pkt *pkt;
size_t len = sizeof(struct log_frontend_uart_dropped_pkt);
size_t wlen = ceiling_fraction(len, sizeof(uint32_t));
size_t wlen = DIV_ROUND_UP(len, sizeof(uint32_t));
if (atomic_cas(&adding_drop, 0, 1) == false) {
return 1;
@ -268,7 +268,7 @@ void log_frontend_msg(const void *source,
size_t dlen = desc.data_len;
bool dev_ready = device_is_ready(dev);
size_t total_len = plen + dlen + sizeof(struct log_frontend_uart_pkt);
size_t total_wlen = ceiling_fraction(total_len, sizeof(uint32_t));
size_t total_wlen = DIV_ROUND_UP(total_len, sizeof(uint32_t));
if (in_panic) {
sync_msg(source, desc, package, data);

View file

@ -477,7 +477,7 @@ static uint32_t dhcpv4_get_timeleft(int64_t start, uint32_t time, int64_t now)
* rounded-up whole seconds until the deadline.
*/
if (deadline > now) {
ret = (uint32_t)ceiling_fraction(deadline - now, MSEC_PER_SEC);
ret = (uint32_t)DIV_ROUND_UP(deadline - now, MSEC_PER_SEC);
}
return ret;

View file

@ -141,7 +141,7 @@ static bool copy_to_pbuffer(struct mpsc_pbuf_buffer *mpsc_buffer,
*/
uint8_t *dst_data = (uint8_t *)dst + sizeof(struct mpsc_pbuf_hdr);
uint8_t *src_data = (uint8_t *)msg + sizeof(struct mpsc_pbuf_hdr);
size_t hdr_wlen = ceiling_fraction(sizeof(struct mpsc_pbuf_hdr),
size_t hdr_wlen = DIV_ROUND_UP(sizeof(struct mpsc_pbuf_hdr),
sizeof(uint32_t));
dst->hdr.data = msg->buf.hdr.data;

View file

@ -96,7 +96,7 @@ ZTEST(blob_io_flash, test_chunk_read)
/* Simulate reading whole partition divided into blocks and chunk of maximum sizes */
while (remaining > 0) {
block.chunk_count =
ceiling_fraction(CONFIG_BT_MESH_BLOB_BLOCK_SIZE_MAX,
DIV_ROUND_UP(CONFIG_BT_MESH_BLOB_BLOCK_SIZE_MAX,
CHUNK_SIZE);
block.size = remaining > CONFIG_BT_MESH_BLOB_BLOCK_SIZE_MAX
? CONFIG_BT_MESH_BLOB_BLOCK_SIZE_MAX
@ -193,7 +193,7 @@ ZTEST(blob_io_flash, test_chunk_write)
/* Simulate writing whole partition divided into blocks and chunk of maximum sizes */
while (remaining > 0) {
block.chunk_count =
ceiling_fraction(CONFIG_BT_MESH_BLOB_BLOCK_SIZE_MAX,
DIV_ROUND_UP(CONFIG_BT_MESH_BLOB_BLOCK_SIZE_MAX,
CHUNK_SIZE);
block.size = remaining > CONFIG_BT_MESH_BLOB_BLOCK_SIZE_MAX
? CONFIG_BT_MESH_BLOB_BLOCK_SIZE_MAX

View file

@ -814,7 +814,7 @@ static void dfu_cli_inputs_prepare(uint16_t group)
dfu_cli_xfer.targets[i].blob.addr = addr;
if (recover) {
memset(&dfu_cli_xfer.pull[i].missing, 1,
ceiling_fraction(CONFIG_BT_MESH_BLOB_CHUNK_COUNT_MAX, 8));
DIV_ROUND_UP(CONFIG_BT_MESH_BLOB_CHUNK_COUNT_MAX, 8));
dfu_cli_xfer.targets[i].blob.pull = &dfu_cli_xfer.pull[i];
}

View file

@ -181,7 +181,7 @@ static void common(const uint32_t s, uint32_t ns)
uint64_t actual_ns = k_cyc_to_ns_ceil64((now - then));
uint64_t exp_ns = (uint64_t)s * NSEC_PER_SEC + ns;
/* round up to the nearest microsecond for k_busy_wait() */
exp_ns = ceiling_fraction(exp_ns, NSEC_PER_USEC) * NSEC_PER_USEC;
exp_ns = DIV_ROUND_UP(exp_ns, NSEC_PER_USEC) * NSEC_PER_USEC;
/* lower bounds check */
zassert_true(actual_ns >= exp_ns,