kernel: Remove legacy mem_pool usage

The mailbox and msgq utilities had API variants that could pass old
mem_pool blocks through the data structure.  That API is being
deprected (and the features were obscure), so remove the internal
support.

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
Andy Ross 2020-12-07 05:47:48 -08:00 committed by Anas Nashif
parent ea7ffbde7d
commit c844bd87b3
6 changed files with 0 additions and 265 deletions

View file

@ -181,9 +181,7 @@ static void mbox_message_dispose(struct k_mbox_msg *rx_msg)
return;
}
/* release sender's memory pool block */
if (rx_msg->tx_block.data != NULL) {
z_mem_pool_free(&rx_msg->tx_block);
rx_msg->tx_block.data = NULL;
}
@ -351,40 +349,6 @@ void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer)
mbox_message_dispose(rx_msg);
}
int z_mbox_data_block_get(struct k_mbox_msg *rx_msg, struct k_mem_pool *pool,
struct k_mem_block *block, k_timeout_t timeout)
{
int result;
/* handle case where data is to be discarded */
if (pool == NULL) {
rx_msg->size = 0;
mbox_message_dispose(rx_msg);
return 0;
}
/* handle case where data is already in a memory pool block */
if (rx_msg->tx_block.data != NULL) {
/* give ownership of the block to receiver */
*block = rx_msg->tx_block;
rx_msg->tx_block.data = NULL;
/* now dispose of message */
mbox_message_dispose(rx_msg);
return 0;
}
/* allocate memory pool block (even when message size is 0!) */
result = z_mem_pool_alloc(pool, block, rx_msg->size, timeout);
if (result != 0) {
return result;
}
/* retrieve non-block data into new block, then dispose of message */
k_mbox_data_get(rx_msg, block->data);
return 0;
}
/**
* @brief Handle immediate consumption of received mailbox message data.
*

View file

@ -65,8 +65,6 @@ static void pipe_async_finish(struct k_pipe_async *async_desc)
* to prevent the called routines from scheduling a new thread.
*/
z_mem_pool_free(async_desc->desc.block);
if (async_desc->desc.sem != NULL) {
k_sem_give(async_desc->desc.sem);
}

View file

@ -61,9 +61,6 @@ K_PIPE_DEFINE(PIPE_NOBUFF, 0, 4);
K_PIPE_DEFINE(PIPE_SMALLBUFF, 256, 4);
K_PIPE_DEFINE(PIPE_BIGBUFF, 4096, 4);
Z_MEM_POOL_DEFINE(DEMOPOOL, 16, 16, 1, 4);
/**
*
* @brief Check for keypress
@ -135,7 +132,6 @@ void main(void)
sema_test();
mutex_test();
memorymap_test();
mempool_test();
mailbox_test();
pipe_test();
PRINT_STRING("| END OF TESTS "

View file

@ -1,46 +0,0 @@
/* mempool_b.c */
/*
* Copyright (c) 1997-2010, 2013-2014 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "master.h"
#ifdef MEMPOOL_BENCH
/**
*
* @brief Memory pool get/free test
*
* @return N/A
*/
void mempool_test(void)
{
uint32_t et; /* elapsed time */
int i;
int32_t return_value = 0;
struct k_mem_block block;
PRINT_STRING(dashline, output_file);
et = BENCH_START();
for (i = 0; i < NR_OF_POOL_RUNS; i++) {
return_value |= z_mem_pool_alloc(&DEMOPOOL,
&block,
16,
K_FOREVER);
z_mem_pool_free(&block);
}
et = TIME_STAMP_DELTA_GET(et);
check_result();
if (return_value != 0) {
k_panic();
}
PRINT_F(output_file, FORMAT,
"average alloc and dealloc memory pool block",
SYS_CLOCK_HW_CYCLES_TO_NS_AVG(et, (2 * NR_OF_POOL_RUNS)));
}
#endif /* MEMPOOL_BENCH */

View file

@ -40,19 +40,12 @@ void test_main(void)
ztest_1cpu_unit_test(test_mbox_put_get_null),
ztest_unit_test(test_mbox_put_get_buffer),
ztest_1cpu_unit_test(test_mbox_async_put_get_buffer),
ztest_1cpu_unit_test(test_mbox_async_put_get_block),
ztest_unit_test(test_mbox_target_source_thread_buffer),
ztest_1cpu_unit_test(test_mbox_target_source_thread_block),
ztest_unit_test(test_mbox_incorrect_receiver_tid),
ztest_1cpu_unit_test(test_mbox_incorrect_transmit_tid),
ztest_1cpu_unit_test(test_mbox_timed_out_mbox_get),
ztest_unit_test(test_mbox_block_get_invalid_pool),
ztest_unit_test(test_mbox_msg_tid_mismatch),
ztest_1cpu_unit_test(test_mbox_block_get_buff_to_pool),
ztest_1cpu_unit_test(
test_mbox_block_get_buff_to_smaller_pool),
ztest_1cpu_unit_test(test_mbox_dispose_size_0_msg),
ztest_1cpu_unit_test(test_mbox_clean_up_tx_pool),
ztest_1cpu_unit_test(test_mbox_async_put_to_waiting_get),
ztest_unit_test(
test_mbox_get_waiting_put_incorrect_tid),

View file

@ -15,8 +15,6 @@
#define MAIL_LEN 64
/**TESTPOINT: init via K_MBOX_DEFINE*/
K_MBOX_DEFINE(kmbox);
Z_MEM_POOL_DEFINE(mpooltx, 8, MAIL_LEN, 1, 4);
Z_MEM_POOL_DEFINE(mpoolrx, 8, MAIL_LEN, 1, 4);
static struct k_mbox mbox;
@ -35,17 +33,12 @@ static enum mmsg_type {
ASYNC_PUT_GET_BUFFER,
ASYNC_PUT_GET_BLOCK,
TARGET_SOURCE_THREAD_BUFFER,
TARGET_SOURCE_THREAD_BLOCK,
MAX_INFO_TYPE,
INCORRECT_RECEIVER_TID,
INCORRECT_TRANSMIT_TID,
TIMED_OUT_MBOX_GET,
BLOCK_GET_INVALID_POOL,
MSG_TID_MISMATCH,
BLOCK_GET_BUFF_TO_POOL,
BLOCK_GET_BUFF_TO_SMALLER_POOL,
DISPOSE_SIZE_0_MSG,
CLEAN_UP_TX_POOL,
ASYNC_PUT_TO_WAITING_GET,
GET_WAITING_PUT_INCORRECT_TID,
ASYNC_MULTIPLE_PUT,
@ -55,15 +48,10 @@ static enum mmsg_type {
static char data[MAX_INFO_TYPE][MAIL_LEN] = {
"send/recv an empty message",
"send/recv msg using a buffer",
"async send/recv msg using a buffer",
"async send/recv msg using a memory block",
"specify target/source thread, using a buffer",
"specify target/source thread, using a memory block"
};
static char big_msg_data[256]
= "Large message buffer, too big for mem_pool to receive";
static void async_put_sema_give(void *p1, void *p2, void *p3)
{
k_sem_give(&sync_sema);
@ -146,37 +134,12 @@ static void tmbox_put(struct k_mbox *pmbox)
break;
case ASYNC_PUT_GET_BLOCK:
__fallthrough;
case TARGET_SOURCE_THREAD_BLOCK:
/**TESTPOINT: mbox async put mem block*/
mmsg.info = ASYNC_PUT_GET_BLOCK;
mmsg.size = MAIL_LEN;
mmsg.tx_data = NULL;
zassert_equal(z_mem_pool_alloc(&mpooltx, &mmsg.tx_block,
MAIL_LEN, K_NO_WAIT), 0, NULL);
memcpy(mmsg.tx_block.data, data[info_type], MAIL_LEN);
if (info_type == TARGET_SOURCE_THREAD_BLOCK) {
mmsg.tx_target_thread = receiver_tid;
} else {
mmsg.tx_target_thread = K_ANY;
}
k_mbox_async_put(pmbox, &mmsg, &sync_sema);
/*wait for msg being taken*/
k_sem_take(&sync_sema, K_FOREVER);
break;
case INCORRECT_TRANSMIT_TID:
mmsg.tx_target_thread = random_tid;
zassert_true(k_mbox_put(pmbox,
&mmsg,
K_NO_WAIT) == -ENOMSG, NULL);
break;
case BLOCK_GET_INVALID_POOL:
/* To dispose of the rx msg using block get */
mmsg.info = PUT_GET_NULL;
mmsg.size = 0;
mmsg.tx_data = NULL;
mmsg.tx_target_thread = K_ANY;
k_mbox_put(pmbox, &mmsg, K_FOREVER);
break;
case MSG_TID_MISMATCH:
/* keep one msg in the queue and try to get with a wrong tid */
mmsg.info = PUT_GET_NULL;
@ -186,27 +149,6 @@ static void tmbox_put(struct k_mbox *pmbox)
/* timeout because this msg wont be received with a _get*/
k_mbox_put(pmbox, &mmsg, TIMEOUT);
break;
case BLOCK_GET_BUFF_TO_POOL:
/* copy the tx buffer data onto a pool
* block via data_block_get
*/
mmsg.size = sizeof(data[1]);
mmsg.tx_data = data[1];
mmsg.tx_block.data = NULL;
mmsg.tx_target_thread = K_ANY;
zassert_true(k_mbox_put(pmbox, &mmsg, K_FOREVER) == 0, NULL);
break;
case BLOCK_GET_BUFF_TO_SMALLER_POOL:
/* copy the tx buffer data onto a pool block via data_block_get
* but size is bigger than what the mem_pool can handle at
* that point of time
*/
mmsg.size = sizeof(big_msg_data);
mmsg.tx_data = big_msg_data;
mmsg.tx_block.data = NULL;
mmsg.tx_target_thread = K_ANY;
zassert_true(k_mbox_put(pmbox, &mmsg, TIMEOUT) == 0, NULL);
break;
case DISPOSE_SIZE_0_MSG:
/* Get a msg and dispose it by making the size = 0 */
@ -217,16 +159,6 @@ static void tmbox_put(struct k_mbox *pmbox)
zassert_true(k_mbox_put(pmbox, &mmsg, K_FOREVER) == 0, NULL);
break;
case CLEAN_UP_TX_POOL:
/* Dispose of tx mem pool once we receive it */
mmsg.size = MAIL_LEN;
mmsg.tx_data = NULL;
zassert_equal(z_mem_pool_alloc(&mpooltx, &mmsg.tx_block,
MAIL_LEN, K_NO_WAIT), 0, NULL);
memcpy(mmsg.tx_block.data, data[0], MAIL_LEN);
mmsg.tx_target_thread = K_ANY;
zassert_true(k_mbox_put(pmbox, &mmsg, K_FOREVER) == 0, NULL);
break;
case ASYNC_PUT_TO_WAITING_GET:
k_sem_take(&sync_sema, K_FOREVER);
mmsg.size = sizeof(data[0]);
@ -300,7 +232,6 @@ static void tmbox_get(struct k_mbox *pmbox)
{
struct k_mbox_msg mmsg = {0};
char rxdata[MAIL_LEN];
struct k_mem_block rxblock;
switch (info_type) {
case PUT_GET_NULL:
@ -347,26 +278,6 @@ static void tmbox_get(struct k_mbox *pmbox)
break;
case ASYNC_PUT_GET_BLOCK:
__fallthrough;
case TARGET_SOURCE_THREAD_BLOCK:
/**TESTPOINT: mbox async get mem block*/
mmsg.size = MAIL_LEN;
if (info_type == TARGET_SOURCE_THREAD_BLOCK) {
mmsg.rx_source_thread = sender_tid;
} else {
mmsg.rx_source_thread = K_ANY;
}
zassert_true(k_mbox_get(pmbox, &mmsg, NULL, K_FOREVER) == 0,
NULL);
zassert_true(z_mbox_data_block_get
(&mmsg, &mpoolrx, &rxblock, K_FOREVER) == 0
, NULL);
zassert_equal(mmsg.info, ASYNC_PUT_GET_BLOCK, NULL);
zassert_equal(mmsg.size, MAIL_LEN, NULL);
/*verify rxblock*/
zassert_true(memcmp(rxblock.data, data[info_type], MAIL_LEN)
== 0, NULL);
z_mem_pool_free(&rxblock);
break;
case INCORRECT_RECEIVER_TID:
mmsg.rx_source_thread = random_tid;
zassert_true(k_mbox_get
@ -378,56 +289,12 @@ static void tmbox_get(struct k_mbox *pmbox)
zassert_true(k_mbox_get(pmbox, &mmsg, NULL, TIMEOUT) == -EAGAIN,
NULL);
break;
case BLOCK_GET_INVALID_POOL:
/* To dispose of the rx msg using block get */
mmsg.rx_source_thread = K_ANY;
zassert_true(k_mbox_get(pmbox, &mmsg, NULL, K_FOREVER) == 0,
NULL);
zassert_true(z_mbox_data_block_get
(&mmsg, NULL, NULL, K_FOREVER) == 0,
NULL);
break;
case MSG_TID_MISMATCH:
mmsg.rx_source_thread = random_tid;
zassert_true(k_mbox_get
(pmbox, &mmsg, NULL, K_NO_WAIT) == -ENOMSG, NULL);
break;
case BLOCK_GET_BUFF_TO_POOL:
/* copy the tx buffer data onto a pool
* block via data_block_get
*/
mmsg.rx_source_thread = K_ANY;
mmsg.size = MAIL_LEN;
zassert_true(k_mbox_get(pmbox, &mmsg, NULL, K_FOREVER) == 0,
NULL);
zassert_true(z_mbox_data_block_get
(&mmsg, &mpoolrx, &rxblock, K_FOREVER) == 0, NULL);
/* verfiy */
zassert_true(memcmp(rxblock.data, data[1], MAIL_LEN)
== 0, NULL);
/* free the block */
z_mem_pool_free(&rxblock);
break;
case BLOCK_GET_BUFF_TO_SMALLER_POOL:
/* copy the tx buffer data onto a smaller
* pool block via data_block_get
*/
mmsg.rx_source_thread = K_ANY;
mmsg.size = sizeof(big_msg_data);
zassert_true(k_mbox_get(pmbox, &mmsg, NULL, K_FOREVER) == 0,
NULL);
zassert_true(z_mbox_data_block_get
(&mmsg, &mpoolrx, &rxblock, K_MSEC(1)) == -EAGAIN,
NULL);
/* Now dispose of the block since the test case finished */
k_mbox_data_get(&mmsg, NULL);
break;
case DISPOSE_SIZE_0_MSG:
mmsg.rx_source_thread = K_ANY;
mmsg.size = 0;
@ -435,13 +302,6 @@ static void tmbox_get(struct k_mbox *pmbox)
NULL);
break;
case CLEAN_UP_TX_POOL:
mmsg.rx_source_thread = K_ANY;
mmsg.size = 0;
zassert_true(k_mbox_get(pmbox, &mmsg, NULL, K_FOREVER) == 0,
NULL);
break;
case ASYNC_PUT_TO_WAITING_GET:
/* Create a new thread to trigger the semaphore needed for the
@ -645,12 +505,6 @@ void test_mbox_target_source_thread_buffer(void)
tmbox(&mbox);
}
void test_mbox_target_source_thread_block(void)
{
info_type = TARGET_SOURCE_THREAD_BLOCK;
tmbox(&mbox);
}
void test_mbox_incorrect_receiver_tid(void)
{
info_type = INCORRECT_RECEIVER_TID;
@ -669,42 +523,18 @@ void test_mbox_timed_out_mbox_get(void)
tmbox(&mbox);
}
void test_mbox_block_get_invalid_pool(void)
{
info_type = BLOCK_GET_INVALID_POOL;
tmbox(&mbox);
}
void test_mbox_msg_tid_mismatch(void)
{
info_type = MSG_TID_MISMATCH;
tmbox(&mbox);
}
void test_mbox_block_get_buff_to_pool(void)
{
info_type = BLOCK_GET_BUFF_TO_POOL;
tmbox(&mbox);
}
void test_mbox_block_get_buff_to_smaller_pool(void)
{
info_type = BLOCK_GET_BUFF_TO_SMALLER_POOL;
tmbox(&mbox);
}
void test_mbox_dispose_size_0_msg(void)
{
info_type = DISPOSE_SIZE_0_MSG;
tmbox(&mbox);
}
void test_mbox_clean_up_tx_pool(void)
{
info_type = CLEAN_UP_TX_POOL;
tmbox(&mbox);
}
void test_mbox_async_put_to_waiting_get(void)
{
info_type = ASYNC_PUT_TO_WAITING_GET;