logging: cAVS HDA based logger

Adds a log backend that maintains a ringbuffer in coordination
with cAVS HDA.

The DMA channel is expected to be given some time after the logger
starts so a seperate step to initialize the dma channel is required.

Signed-off-by: Tom Burdick <thomas.burdick@intel.com>
This commit is contained in:
Tom Burdick 2022-03-02 15:31:11 -06:00 committed by Anas Nashif
parent aceefa90ec
commit 6913da9ddd
15 changed files with 521 additions and 11 deletions

View file

@ -26,9 +26,6 @@
#include "dma_cavs_hda.h"
#include <logging/log.h>
LOG_MODULE_REGISTER(dma_cavs_hda_dma, CONFIG_DMA_LOG_LEVEL);
/* Define low level driver required values */
#define HDA_HOST_IN_BASE DT_PROP_BY_IDX(DT_NODELABEL(hda_host_in), reg, 0)
#define HDA_HOST_OUT_BASE DT_PROP_BY_IDX(DT_NODELABEL(hda_host_out), reg, 0)
@ -257,7 +254,5 @@ int cavs_hda_dma_init(const struct device *dev)
data->ctx.atomic = data->channels_atomic;
data->ctx.magic = DMA_MAGIC;
LOG_INF("Intel cAVS HDA %s initialized", dev->name);
return 0;
}

View file

@ -9,10 +9,6 @@
#include <drivers/dma.h>
#include "dma_cavs_hda.h"
#define LOG_LEVEL CONFIG_DMA_LOG_LEVEL
#include <logging/log.h>
LOG_MODULE_REGISTER(dma_cavs_hda_dma_host_in);
static const struct dma_driver_api cavs_hda_dma_host_in_api = {
.config = cavs_hda_dma_host_in_config,
.reload = cavs_hda_dma_host_reload,

View file

@ -0,0 +1,32 @@
/*
* Copyright (c) 2022 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_LOG_BACKEND_CAVS_HDA_H_
#define ZEPHYR_LOG_BACKEND_CAVS_HDA_H_
#include <stdint.h>
/**
*@brief HDA logger requires a hook for IPC messages
*
* When the log is flushed and written with DMA an IPC message should
* be sent to inform the host. This hook function pointer allows for that
*/
typedef void(*cavs_hda_log_hook_t)(uint32_t written);
/**
* @brief Initialize the cavs hda logger
*
* @param hook Function is called after each hda flush in order to
* inform the Host of DMA log data. This hook may be called
* from multiple CPUs and multiple calling contexts concurrently.
* It is up to the author of the hook to serialize if needed.
* It is guaranteed to be called once for every flush.
* @param channel HDA stream (DMA Channel) to use for logging
*/
void cavs_hda_log_init(cavs_hda_log_hook_t hook, uint32_t channel);
#endif /* ZEPHYR_LOG_BACKEND_CAVS_HDA_H_ */

View file

@ -46,7 +46,10 @@ enum cavstool_cmd {
IPCCMD_HDA_VALIDATE,
/* Host sends some data */
IPCCMD_HDA_SEND
IPCCMD_HDA_SEND,
/* Host prints some data */
IPCCMD_HDA_PRINT
};
#endif /* ZEPHYR_INCLUDE_CAVS_TEST_H */

View file

@ -478,7 +478,10 @@ def winstream_read(last_seq):
result += win_read(16, behind - suffix)
(wlen, start1, end, seq1) = win_hdr()
if start1 == start and seq1 == seq:
return (seq, result.decode("utf-8"))
# Best effort attempt at decoding, replacing unusable characters
# Found to be useful when it really goes wrong
return (seq, result.decode("utf-8", "replace"))
async def ipc_delay_done():
await asyncio.sleep(0.1)
@ -527,6 +530,7 @@ def ipc_command(data, ext_data):
elif data == 8: # HDA START
stream_id = ext_data & 0xFF
hda_streams[stream_id].start()
hda_streams[stream_id].mem.seek(0)
elif data == 9: # HDA STOP
stream_id = ext_data & 0xFF
@ -551,6 +555,18 @@ def ipc_command(data, ext_data):
for i in range(0, 256):
buf[i] = i
hda_streams[stream_id].write(buf)
elif data == 12: # HDA PRINT
log.info("Doing HDA Print")
stream_id = ext_data & 0xFF
buf_len = ext_data >> 8 & 0xFFFF
hda_str = hda_streams[stream_id]
pos = hda_str.mem.tell()
buf_data = hda_str.mem.read(buf_len).decode("utf-8", "replace")
log.info(f"DSP LOG MSG (idx: {pos}, len: {buf_len}): {buf_data}")
pos = hda_str.mem.tell()
if pos >= hda_str.buf_len*2:
log.info(f"Wrapping log reader, pos {pos} len {hda_str.buf_len}")
hda_str.mem.seek(0)
else:
log.warning(f"cavstool: Unrecognized IPC command 0x{data:x} ext 0x{ext_data:x}")
@ -589,6 +605,7 @@ async def main():
sys.stdout.write("--\n")
hda_streams = dict()
last_seq = 0
while True:
await asyncio.sleep(0.03)

View file

@ -82,6 +82,11 @@ if(NOT CONFIG_LOG_MODE_MINIMAL)
zephyr_sources_ifdef(
CONFIG_LOG_BACKEND_ADSP
log_backend_adsp.c
)
zephyr_sources_ifdef(
CONFIG_LOG_BACKEND_CAVS_HDA
log_backend_cavs_hda.c
)
if(CONFIG_LOG_BACKEND_SPINEL)

View file

@ -325,6 +325,41 @@ source "subsys/logging/Kconfig.template.log_format_config"
endif # LOG_BACKEND_ADSP
config LOG_BACKEND_CAVS_HDA
bool "cAVS HDA backend"
depends on SOC_FAMILY_INTEL_ADSP && DMA && DMA_CAVS_HDA && LOG2
help
Provide a logging backend which writes to a buffer and
periodically flushes to hardware using ringbuffer like
semantics provided by DMA_CAVS_HDA.
if LOG_BACKEND_CAVS_HDA
backend = CAVS_HDA
backend-str = cavs_hda
source "subsys/logging/Kconfig.template.log_format_config"
config LOG_BACKEND_CAVS_HDA_SIZE
int "Size of ring buffer"
range 128 8192
default 4096
help
Sets the ring buffer size cAVS HDA uses for logging. Effectively
determines how many log messages may be written to in a period of time.
The period of time is decided by how often to inform the hardware of
writes to the buffer.
config LOG_BACKEND_CAVS_HDA_FLUSH_TIME
int "Time in milliseconds to periodically flush writes to hardware"
range 1 10000
default 500
help
The cAVS HDA backend periodically writes out its buffer contents
to hardware by informing the DMA hardware the contents of the ring
buffer.
endif # LOG_BACKEND_CAVS_HDA
config LOG_BACKEND_FS
bool "File system backend"
depends on FILE_SYSTEM

View file

@ -0,0 +1,292 @@
/*
* Copyright (c) 2022 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "arch/xtensa/cache.h"
#include <logging/log_backend.h>
#include <logging/log_core.h>
#include <logging/log_msg.h>
#include <logging/log_output.h>
#include <logging/log_backend_std.h>
#include <logging/log_backend_cavs_hda.h>
#include <drivers/dma.h>
#include <zephyr/kernel.h>
static uint32_t log_format_current = CONFIG_LOG_BACKEND_CAVS_HDA_OUTPUT_DEFAULT;
static const struct device *hda_log_dev;
static uint32_t hda_log_chan;
/*
* HDA requires 128 byte aligned data and 128 byte aligned transfers.
*/
static __aligned(128) uint8_t hda_log_buf[CONFIG_LOG_BACKEND_CAVS_HDA_SIZE];
static volatile uint32_t hda_log_buffered;
static struct k_spinlock hda_log_lock;
static struct k_timer hda_log_timer;
static cavs_hda_log_hook_t hook;
/* atomic bit flags for state */
#define HDA_LOG_DMA_READY 0
#define HDA_LOG_PANIC_MODE 1
static atomic_t hda_log_flags;
static uint32_t hda_log_flush(void)
{
uint32_t nearest128 = hda_log_buffered & ~((128) - 1);
if (nearest128 > 0) {
hda_log_buffered = hda_log_buffered - nearest128;
#if !(IS_ENABLED(CONFIG_KERNEL_COHERENCE))
z_xtensa_cache_flush(hda_log_buf, CONFIG_LOG_BACKEND_CAVS_HDA_SIZE);
#endif
dma_reload(hda_log_dev, hda_log_chan, 0, 0, nearest128);
}
return nearest128;
}
static int hda_log_out(uint8_t *data, size_t length, void *ctx)
{
int ret;
bool do_log_flush;
struct dma_status status;
k_spinlock_key_t key = k_spin_lock(&hda_log_lock);
/* Defaults when DMA not yet initialized */
uint32_t dma_free = sizeof(hda_log_buf);
uint32_t write_pos = 0;
if (atomic_test_bit(&hda_log_flags, HDA_LOG_DMA_READY)) {
ret = dma_get_status(hda_log_dev, hda_log_chan, &status);
if (ret != 0) {
ret = length;
goto out;
}
/* The hardware tells us what space we have available, and where to
* start writing. If the buffer is full we have no space.
*/
if (status.free <= 0) {
ret = length;
goto out;
}
dma_free = status.free;
write_pos = status.write_position;
}
/* Account for buffered writes since last dma_reload
*
* No underflow should be possible here, status.free is the apparent
* free space in the buffer from the DMA's read/write positions.
* When dma_reload is called status.free may be reduced by
* the nearest 128 divisible value of hda_log_buffered,
* where hda_log_buffered is then subtracted by the same amount.
* After which status.free should only increase in value.
*
* Assert this trueth though, just in case.
*/
__ASSERT_NO_MSG(dma_free > hda_log_buffered);
uint32_t available = dma_free - hda_log_buffered;
/* If there isn't enough space for the message there's an overflow */
if (available < length) {
ret = length;
goto out;
}
/* Copy over the message to the buffer */
uint32_t idx = write_pos + hda_log_buffered;
if (idx > sizeof(hda_log_buf)) {
idx -= sizeof(hda_log_buf);
}
size_t copy_len = (idx + length) < sizeof(hda_log_buf) ? length : sizeof(hda_log_buf) - idx;
memcpy(&hda_log_buf[idx], data, copy_len);
/* There may be a wrapped copy */
size_t wrap_copy_len = length - copy_len;
if (wrap_copy_len != 0) {
memcpy(&hda_log_buf[0], &data[copy_len], wrap_copy_len);
}
ret = length;
hda_log_buffered += length;
uint32_t written = 0;
out:
/* If DMA_READY changes from unset to set during this call, that is
* perfectly acceptable. The default values for write_pos and dma_free
* are the correct values if that occurs.
*/
do_log_flush = ((hda_log_buffered > sizeof(hda_log_buf)/2) ||
atomic_test_bit(&hda_log_flags, HDA_LOG_PANIC_MODE))
&& atomic_test_bit(&hda_log_flags, HDA_LOG_DMA_READY);
if (do_log_flush) {
written = hda_log_flush();
}
k_spin_unlock(&hda_log_lock, key);
/* The hook may have log calls and needs to be done outside of the spin
* lock to avoid recursion on the spin lock (deadlocks) in cases of
* direct logging.
*/
if (hook != NULL && written > 0) {
hook(written);
}
return ret;
}
/**
* 128 bytes is the smallest transferrable size for HDA so use that
* and encompass almost all log lines in the formatter before flushing
* and memcpy'ing to the HDA buffer.
*/
#define LOG_BUF_SIZE 128
static uint8_t log_buf[LOG_BUF_SIZE];
LOG_OUTPUT_DEFINE(log_output_cavs_hda, hda_log_out, log_buf, LOG_BUF_SIZE);
static void hda_log_periodic(struct k_timer *tm)
{
ARG_UNUSED(tm);
k_spinlock_key_t key = k_spin_lock(&hda_log_lock);
uint32_t written = hda_log_flush();
k_spin_unlock(&hda_log_lock, key);
/* The hook may have log calls and needs to be done outside of the spin
* lock to avoid recursion on the spin lock (deadlocks) in cases of
* direct logging.
*/
if (hook != NULL && written > 0) {
hook(written);
}
}
static inline void dropped(const struct log_backend *const backend,
uint32_t cnt)
{
ARG_UNUSED(backend);
log_output_dropped_process(&log_output_cavs_hda, cnt);
}
static void panic(struct log_backend const *const backend)
{
ARG_UNUSED(backend);
/* will immediately flush all future writes once set */
atomic_set_bit(&hda_log_flags, HDA_LOG_PANIC_MODE);
/* flushes the log queue */
log_backend_std_panic(&log_output_cavs_hda);
}
static int format_set(const struct log_backend *const backend, uint32_t log_type)
{
ARG_UNUSED(backend);
log_format_current = log_type;
return 0;
}
static uint32_t format_flags(void)
{
uint32_t flags = LOG_OUTPUT_FLAG_LEVEL | LOG_OUTPUT_FLAG_TIMESTAMP;
if (IS_ENABLED(CONFIG_LOG_BACKEND_FORMAT_TIMESTAMP)) {
flags |= LOG_OUTPUT_FLAG_FORMAT_TIMESTAMP;
}
return flags;
}
static volatile uint32_t counter;
static void process(const struct log_backend *const backend,
union log_msg2_generic *msg)
{
ARG_UNUSED(backend);
log_format_func_t log_output_func = log_format_func_t_get(log_format_current);
log_output_func(&log_output_cavs_hda, &msg->log, format_flags());
}
/**
* Lazily initialized, while the DMA may not be setup we continue
* to buffer log messages untilt he buffer is full.
*/
static void init(const struct log_backend *const backend)
{
ARG_UNUSED(backend);
hda_log_buffered = 0;
}
const struct log_backend_api log_backend_cavs_hda_api = {
.process = process,
.dropped = IS_ENABLED(CONFIG_LOG_MODE_IMMEDIATE) ? NULL : dropped,
.panic = panic,
.format_set = format_set,
.init = init,
};
LOG_BACKEND_DEFINE(log_backend_cavs_hda, log_backend_cavs_hda_api, true);
void cavs_hda_log_init(cavs_hda_log_hook_t fn, uint32_t channel)
{
hook = fn;
int res;
hda_log_dev = device_get_binding("HDA_HOST_IN");
__ASSERT(hda_log_dev, "No valid DMA device found");
hda_log_chan = dma_request_channel(hda_log_dev, &channel);
__ASSERT(hda_log_chan >= 0, "No valid DMA channel");
__ASSERT(hda_log_chan == channel, "Not requested channel");
hda_log_buffered = 0;
/* configure channel */
struct dma_block_config hda_log_dma_blk_cfg = {
.block_size = CONFIG_LOG_BACKEND_CAVS_HDA_SIZE,
.source_address = (uint32_t)(uintptr_t)&hda_log_buf,
};
struct dma_config hda_log_dma_cfg = {
.channel_direction = MEMORY_TO_HOST,
.block_count = 1,
.head_block = &hda_log_dma_blk_cfg,
.source_data_size = 4,
};
res = dma_config(hda_log_dev, hda_log_chan, &hda_log_dma_cfg);
__ASSERT(res == 0, "DMA config failed");
res = dma_start(hda_log_dev, hda_log_chan);
__ASSERT(res == 0, "DMA start failed");
atomic_set_bit(&hda_log_flags, HDA_LOG_DMA_READY);
k_timer_init(&hda_log_timer, hda_log_periodic, NULL);
k_timer_start(&hda_log_timer,
K_MSEC(CONFIG_LOG_BACKEND_CAVS_HDA_FLUSH_TIME),
K_MSEC(CONFIG_LOG_BACKEND_CAVS_HDA_FLUSH_TIME));
printk("hda log initialized\n");
}

View file

@ -1,2 +1,8 @@
CONFIG_ZTEST=y
CONFIG_DMA=y
#CONFIG_LOG=y
#CONFIG_LOG_MODE_DEFERRED=y
#CONFIG_LOG_BACKEND_ADSP=n
#CONFIG_LOG_BACKEND_CAVS_HDA=y
#CONFIG_LOG_BACKEND_CAVS_HDA_SIZE=8192
#CONFIG_LOG_BACKEND_CAVS_HDA_FLUSH_TIME=100

View file

@ -0,0 +1,7 @@
# SPDX-License-Identifier: Apache-2.0
cmake_minimum_required(VERSION 3.20.0)
find_package(Zephyr REQUIRED HINTS $ENV{ZEPHYR_BASE})
project(intel_adsp_hda_log)
target_sources(app PRIVATE src/main.c src/logger.c)

View file

@ -0,0 +1,10 @@
CONFIG_ZTEST=y
CONFIG_DMA=y
CONFIG_ASSERT=y
CONFIG_LOG=y
CONFIG_LOG_MODE_DEFERRED=y
CONFIG_LOG_BACKEND_ADSP=n
CONFIG_LOG_BACKEND_CAVS_HDA=y
CONFIG_LOG_BACKEND_CAVS_HDA_SIZE=512
CONFIG_LOG_BACKEND_CAVS_HDA_FLUSH_TIME=100
CONFIG_XTENSA_ENABLE_BACKTRACE=n

View file

@ -0,0 +1,67 @@
/* Copyright (c) 2022 Intel Corporation
* SPDX-License-Identifier: Apache-2.0
*/
#include <logging/log_backend_cavs_hda.h>
#include <logging/log_backend.h>
#include <logging/log_ctrl.h>
#include <kernel.h>
#include <string.h>
#include <ztest.h>
#include <cavs_ipc.h>
#include "tests.h"
#define CHANNEL 6
#define HOST_BUF_SIZE 512
#include <logging/log.h>
LOG_MODULE_REGISTER(hda_test, LOG_LEVEL_DBG);
#define IPC_TIMEOUT K_MSEC(500)
void hda_log_hook(uint32_t written)
{
/* We *must* send this, but we may be in a timer ISR, so we are
* forced into a retry loop without timeouts and such.
*/
bool done = false;
/* Previous message may not be done yet, wait for that */
do {
done = cavs_ipc_is_complete(CAVS_HOST_DEV);
} while (!done);
/* Now send the next one */
do {
done = cavs_ipc_send_message(CAVS_HOST_DEV, IPCCMD_HDA_PRINT,
(written << 8) | CHANNEL);
} while (!done);
}
void test_hda_logger(void)
{
const struct log_backend *hda_log_backend = log_backend_get(0);
zassert_not_null(hda_log_backend, "Expected hda log backend");
hda_ipc_msg(CAVS_HOST_DEV, IPCCMD_HDA_RESET, CHANNEL, IPC_TIMEOUT);
hda_ipc_msg(CAVS_HOST_DEV, IPCCMD_HDA_CONFIG,
CHANNEL | (HOST_BUF_SIZE << 8), IPC_TIMEOUT);
cavs_hda_log_init(hda_log_hook, CHANNEL);
hda_ipc_msg(CAVS_HOST_DEV, IPCCMD_HDA_START, CHANNEL, IPC_TIMEOUT);
hda_ipc_msg(CAVS_HOST_DEV, IPCCMD_HDA_PRINT,
((HOST_BUF_SIZE*2) << 8) | CHANNEL, IPC_TIMEOUT);
printk("Testing log backend\n");
for (int i = 0; i < 512; i++) {
LOG_DBG("test hda log message %d", i);
}
printk("Sleeping to let the log flush\n");
k_sleep(K_MSEC(500));
printk("Done\n");
}

View file

@ -0,0 +1,16 @@
/* Copyright (c) 2022 Intel Corporation
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr.h>
#include <ztest.h>
#include <stdlib.h>
#include "tests.h"
void test_main(void)
{
ztest_test_suite(intel_adsp_hda,
ztest_unit_test(test_hda_logger)
);
ztest_run_test_suite(intel_adsp_hda);
}

View file

@ -0,0 +1,22 @@
/* Copyright (c) 2022 Intel Corporation
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_TESTS_INTEL_ADSP_TESTS_H
#define ZEPHYR_TESTS_INTEL_ADSP_TESTS_H
#include <cavs_ipc.h>
#include <cavstool.h>
#include <stdint.h>
#include <device.h>
#include <ztest.h>
void test_hda_logger(void);
static inline void hda_ipc_msg(const struct device *dev, uint32_t data,
uint32_t ext, k_timeout_t timeout)
{
zassert_true(cavs_ipc_send_message_sync(dev, data, ext, timeout),
"Unexpected ipc send message failure, try increasing IPC_TIMEOUT");
}
#endif /* ZEPHYR_TESTS_INTEL_ADSP_TESTS_H */

View file

@ -0,0 +1,7 @@
tests:
boards.intel_adsp:
platform_allow: intel_adsp_cavs15 intel_adsp_cavs18 intel_adsp_cavs20 intel_adsp_cavs25
boards.intel_adsp.1cpu:
platform_allow: intel_adsp_cavs15 intel_adsp_cavs18 intel_adsp_cavs20 intel_adsp_cavs25
extra_configs:
- CONFIG_MP_NUM_CPUS=1