rtio: Add callback op

Adds a callback op to RTIO enabling C logic to be interspersed with
I/O operations. This is not safe from userspace but allowable in kernel
space.

Signed-off-by: Tom Burdick <thomas.burdick@intel.com>
This commit is contained in:
Tom Burdick 2023-02-17 12:16:05 -06:00 committed by Carles Cufí
parent bb72809326
commit 912e7ff863
5 changed files with 109 additions and 26 deletions

View file

@ -52,9 +52,6 @@ extern "C" {
* @}
*/
struct rtio_iodev;
/**
* @brief RTIO API
* @defgroup rtio_api RTIO API
@ -121,6 +118,23 @@ struct rtio_iodev;
* @}
*/
/** @cond ignore */
struct rtio;
struct rtio_cqe;
struct rtio_sqe;
struct rtio_iodev;
struct rtio_iodev_sqe;
/** @endcond */
/**
* @typedef rtio_callback_t
* @brief Callback signature for RTIO_OP_CALLBACK
* @param r RTIO context being used with the callback
* @param sqe Submission for the callback op
* @param arg0 Argument option as part of the sqe
*/
typedef void (*rtio_callback_t)(struct rtio *r, const struct rtio_sqe *sqe, void *arg0);
/**
* @brief A submission queue event
*/
@ -147,7 +161,6 @@ struct rtio_sqe {
/** OP_TX, OP_RX */
struct {
uint32_t buf_len; /**< Length of buffer */
uint8_t *buf; /**< Buffer to use*/
};
@ -159,12 +172,19 @@ struct rtio_sqe {
/** OP_CALLBACK */
struct {
void (*callback)(struct rtio *r, struct rtio_sqe *sqe, void *arg0);
void *arg0;
rtio_callback_t callback;
void *arg0; /**< Last argument given to callback */
};
};
};
/** @cond ignore */
/* Ensure the rtio_sqe never grows beyond a common cacheline size of 64 bytes */
BUILD_ASSERT(sizeof(struct rtio_sqe) <= 64);
/** @endcond */
/**
* @brief Submission queue
*
@ -195,8 +215,6 @@ struct rtio_cq {
struct rtio_cqe buffer[];
};
struct rtio;
struct rtio_iodev_sqe;
struct rtio_executor_api {
/**
@ -341,11 +359,12 @@ struct rtio_iodev {
/** An operation that transmits (writes) */
#define RTIO_OP_TX (RTIO_OP_RX+1)
/** An operation that transmits tiny writes */
/** An operation that transmits tiny writes by copying the data to write */
#define RTIO_OP_TINY_TX (RTIO_OP_TX+1)
/** An operation that does some small functional work */
#define RTIO_OP_FUNC (RTIO_OP_TINY_TX+1)
/** An operation that calls a given function (callback) */
#define RTIO_OP_CALLBACK (RTIO_OP_TINY_TX+1)
/**
@ -427,6 +446,28 @@ static inline void rtio_sqe_prep_tiny_write(struct rtio_sqe *sqe,
sqe->userdata = userdata;
}
/**
* @brief Prepare a callback op submission
*
* A somewhat special operation in that it may only be done in kernel mode.
*
* Used where general purpose logic is required in a queue of io operations to do
* transforms or logic.
*/
static inline void rtio_sqe_prep_callback(struct rtio_sqe *sqe,
rtio_callback_t callback,
void *arg0,
void *userdata)
{
sqe->op = RTIO_OP_CALLBACK;
sqe->prio = 0;
sqe->flags = 0;
sqe->iodev = NULL;
sqe->callback = callback;
sqe->arg0 = arg0;
sqe->userdata = userdata;
}
/**
* @brief Statically define and initialize a fixed length submission queue.
*

View file

@ -0,0 +1,40 @@
/*
* Copyright (c) 2023 Intel Corporation.
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/rtio/rtio.h>
/**
* @brief Executor handled submissions
*/
static void rtio_executor_submit_self(struct rtio_iodev_sqe *iodev_sqe)
{
const struct rtio_sqe *sqe = iodev_sqe->sqe;
switch (sqe->op) {
case RTIO_OP_CALLBACK:
sqe->callback(iodev_sqe->r, sqe, sqe->arg0);
rtio_iodev_sqe_ok(iodev_sqe, 0);
break;
default:
rtio_iodev_sqe_err(iodev_sqe, -EINVAL);
}
}
/**
* @brief Common executor handling of submit
*
* Some submissions may have NULL for the iodev pointer which implies
* an executor related operation such as a callback or flush. This
* common codepath avoids duplicating efforts for dealing with this.
*/
static void rtio_executor_submit(struct rtio_iodev_sqe *iodev_sqe)
{
if (iodev_sqe->sqe->iodev == NULL) {
rtio_executor_submit_self(iodev_sqe);
} else {
rtio_iodev_submit(iodev_sqe);
}
}

View file

@ -12,10 +12,11 @@
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(rtio_executor_concurrent, CONFIG_RTIO_LOG_LEVEL);
#include "rtio_executor_common.h"
#define CONEX_TASK_COMPLETE BIT(0)
#define CONEX_TASK_SUSPENDED BIT(1)
/**
* @file
* @brief Concurrent RTIO Executor
@ -147,7 +148,6 @@ static void conex_prepare(struct rtio *r, struct rtio_concurrent_executor *exc)
exc->last_sqe = last_sqe;
}
/**
* @brief Resume tasks that are suspended
*
@ -161,7 +161,7 @@ static void conex_resume(struct rtio *r, struct rtio_concurrent_executor *exc)
if (exc->task_status[task_id & exc->task_mask] & CONEX_TASK_SUSPENDED) {
LOG_DBG("resuming suspended task %d", task_id);
exc->task_status[task_id & exc->task_mask] &= ~CONEX_TASK_SUSPENDED;
rtio_iodev_submit(&exc->task_cur[task_id & exc->task_mask]);
rtio_executor_submit(&exc->task_cur[task_id & exc->task_mask]);
}
}
}
@ -176,8 +176,7 @@ static void conex_resume(struct rtio *r, struct rtio_concurrent_executor *exc)
int rtio_concurrent_submit(struct rtio *r)
{
struct rtio_concurrent_executor *exc =
(struct rtio_concurrent_executor *)r->executor;
struct rtio_concurrent_executor *exc = (struct rtio_concurrent_executor *)r->executor;
k_spinlock_key_t key;
key = k_spin_lock(&exc->lock);
@ -219,7 +218,7 @@ void rtio_concurrent_ok(struct rtio_iodev_sqe *iodev_sqe, int result)
next_sqe = rtio_spsc_next(r->sq, sqe);
exc->task_cur[task_id].sqe = next_sqe;
rtio_iodev_submit(&exc->task_cur[task_id]);
rtio_executor_submit(&exc->task_cur[task_id]);
} else {
exc->task_status[task_id] |= CONEX_TASK_COMPLETE;
}

View file

@ -4,6 +4,7 @@
* SPDX-License-Identifier: Apache-2.0
*/
#include "rtio_executor_common.h"
#include <zephyr/rtio/rtio_executor_simple.h>
#include <zephyr/rtio/rtio.h>
#include <zephyr/kernel.h>
@ -61,9 +62,7 @@ int rtio_simple_submit(struct rtio *r)
exc->task.sqe = sqe;
exc->task.r = r;
if (sqe != NULL) {
rtio_iodev_submit(&exc->task);
}
rtio_executor_submit(&exc->task);
return 0;
}
@ -144,6 +143,6 @@ void rtio_simple_err(struct rtio_iodev_sqe *iodev_sqe, int result)
iodev_sqe->sqe = rtio_spsc_consume(r->sq);
if (iodev_sqe->sqe != NULL) {
rtio_iodev_submit(iodev_sqe);
rtio_executor_submit(iodev_sqe);
}
}

View file

@ -13,6 +13,8 @@
* the iodev is a valid accessible k_object (if given) and
* the buffer pointers are valid accessible memory by the calling
* thread.
*
* Each op code that is acceptable from user mode must also be validated.
*/
static inline bool rtio_vrfy_sqe(struct rtio_sqe *sqe)
{
@ -34,7 +36,9 @@ static inline bool rtio_vrfy_sqe(struct rtio_sqe *sqe)
case RTIO_OP_TINY_TX:
break;
default:
/* RTIO OP must be known */
/* RTIO OP must be known and allowable from user mode
* otherwise it is invalid
*/
valid_sqe = false;
}