pcie: add initial controller support

This adds :
- Generic PCIe Controller layer implementing the current PCIe API
- Generic PCIe Controller in ECAM mode driver

The Generic PCIe Controller layer provides:
- Configuration space read/write
- single bus endpoint enumerations
- Endpoint I/O, MEM & MEM64 BARs allocation
- Endpoint I/O, MEM & MEM64 BARs get & translation for drivers

The Generic PCIe Controller in ECAM mode driver provides:
- Raw DT RANGES properties into usable PCIe regions
- Configuration space read/write into ECAM config space
- PCIe regions allocation & translation

The limitations are:
- No support for PCIe prefetchable regions
- No support for PCIe bus configuration (only bus0 is supported)
- No support for multiple controllers (no domain-id in BDF)

Support has been designed to initially support Root Complexes with
Root Complex Integrated Endpoint, which was designed for Embedded
Systems with internal-only PCIe Endpoints on bus 0.

Signed-off-by: Neil Armstrong <narmstrong@baylibre.com>
This commit is contained in:
Neil Armstrong 2021-06-23 15:13:47 +02:00 committed by Carles Cufí
parent c714368691
commit f8f2936dba
6 changed files with 580 additions and 4 deletions

View file

@ -1,6 +1,8 @@
zephyr_library()
zephyr_library_sources(pcie.c)
zephyr_library_sources_ifdef(CONFIG_PCIE_CONTROLLER controller.c)
zephyr_library_sources_ifdef(CONFIG_PCIE_ECAM pcie_ecam.c)
zephyr_library_sources_ifdef(CONFIG_PCIE_MSI msi.c)
zephyr_library_sources_ifdef(CONFIG_PCIE_SHELL shell.c)
zephyr_library_sources_ifdef(CONFIG_PCIE_PTM ptm.c)

View file

@ -14,6 +14,23 @@ module = PCIE
module-str = pcie
source "subsys/logging/Kconfig.template.log_config"
config PCIE_CONTROLLER
bool "Enable PCIe Controller management"
help
Add support for PCIe Controller management when not handled by a
system firmware like on x86 platforms.
if PCIE_CONTROLLER
config PCIE_ECAM
bool "Enable support for PCIe ECAM Controllers"
help
Add support for Enhanced Configuration Address Mapping configured
PCIe Controllers allowing all outgoing I/O and MEM TLPs to be mapped
from memory space into any 256 MB region of the PCIe configuration space.
endif # PCIE_CONTROLLER
config PCIE_MSI
bool "Enable support for PCI(e) MSI"
help

View file

@ -0,0 +1,217 @@
/*
* Copyright (c) 2021 BayLibre, SAS
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <logging/log.h>
LOG_MODULE_REGISTER(pcie_core, LOG_LEVEL_INF);
#include <kernel.h>
#include <drivers/pcie/pcie.h>
#include <drivers/pcie/controller.h>
#if CONFIG_PCIE_MSI
#include <drivers/pcie/msi.h>
#endif
/* arch agnostic PCIe API implementation */
uint32_t pcie_conf_read(pcie_bdf_t bdf, unsigned int reg)
{
const struct device *dev;
dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_pcie_controller));
if (!dev) {
LOG_ERR("Failed to get PCIe root complex");
return 0xffffffff;
}
return pcie_ctrl_conf_read(dev, bdf, reg);
}
void pcie_conf_write(pcie_bdf_t bdf, unsigned int reg, uint32_t data)
{
const struct device *dev;
dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_pcie_controller));
if (!dev) {
LOG_ERR("Failed to get PCIe root complex");
return;
}
pcie_ctrl_conf_write(dev, bdf, reg, data);
}
uint32_t generic_pcie_ctrl_conf_read(mm_reg_t cfg_addr, pcie_bdf_t bdf, unsigned int reg)
{
volatile uint32_t *bdf_cfg_mem = (volatile uint32_t *)((uintptr_t)cfg_addr + (bdf << 4));
if (!cfg_addr) {
return 0xffffffff;
}
return bdf_cfg_mem[reg];
}
void generic_pcie_ctrl_conf_write(mm_reg_t cfg_addr, pcie_bdf_t bdf,
unsigned int reg, uint32_t data)
{
volatile uint32_t *bdf_cfg_mem = (volatile uint32_t *)((uintptr_t)cfg_addr + (bdf << 4));
if (!cfg_addr) {
return;
}
bdf_cfg_mem[reg] = data;
}
static void generic_pcie_ctrl_enumerate_type1(const struct device *ctrl_dev, pcie_bdf_t bdf)
{
/* Not yet supported */
}
static void generic_pcie_ctrl_type0_enumerate_bars(const struct device *ctrl_dev, pcie_bdf_t bdf)
{
unsigned int bar, reg, data;
uintptr_t scratch, bar_bus_addr;
size_t size, bar_size;
for (bar = 0, reg = PCIE_CONF_BAR0; reg <= PCIE_CONF_BAR5; reg ++, bar++) {
bool found_mem64 = false;
bool found_mem = false;
data = scratch = pcie_conf_read(bdf, reg);
if (PCIE_CONF_BAR_INVAL_FLAGS(data)) {
continue;
}
if (PCIE_CONF_BAR_MEM(data)) {
found_mem = true;
if (PCIE_CONF_BAR_64(data)) {
found_mem64 = true;
scratch |= ((uint64_t)pcie_conf_read(bdf, reg + 1)) << 32;
if (PCIE_CONF_BAR_ADDR(scratch) == PCIE_CONF_BAR_INVAL64) {
continue;
}
} else {
if (PCIE_CONF_BAR_ADDR(scratch) == PCIE_CONF_BAR_INVAL) {
continue;
}
}
}
pcie_conf_write(bdf, reg, 0xFFFFFFFF);
size = pcie_conf_read(bdf, reg);
pcie_conf_write(bdf, reg, scratch & 0xFFFFFFFF);
if (found_mem64) {
pcie_conf_write(bdf, reg + 1, 0xFFFFFFFF);
size |= ((uint64_t)pcie_conf_read(bdf, reg + 1)) << 32;
pcie_conf_write(bdf, reg + 1, scratch >> 32);
}
if (!PCIE_CONF_BAR_ADDR(size)) {
if (found_mem64) {
reg++;
}
continue;
}
if (found_mem) {
if (found_mem64) {
bar_size = (uint64_t)~PCIE_CONF_BAR_ADDR(size) + 1;
} else {
bar_size = (uint32_t)~PCIE_CONF_BAR_ADDR(size) + 1;
}
} else {
bar_size = (uint32_t)~PCIE_CONF_BAR_IO_ADDR(size) + 1;
}
if (pcie_ctrl_region_allocate(ctrl_dev, bdf, found_mem,
found_mem64, bar_size, &bar_bus_addr)) {
uintptr_t bar_phys_addr;
pcie_ctrl_region_xlate(ctrl_dev, bdf, found_mem,
found_mem64, bar_bus_addr, &bar_phys_addr);
LOG_INF("[%02x:%02x.%x] BAR%d size 0x%lx "
"assigned [%s 0x%lx-0x%lx -> 0x%lx-0x%lx]",
PCIE_BDF_TO_BUS(bdf), PCIE_BDF_TO_DEV(bdf), PCIE_BDF_TO_FUNC(bdf),
bar, bar_size,
found_mem ? (found_mem64 ? "mem64" : "mem") : "io",
bar_bus_addr, bar_bus_addr + bar_size - 1,
bar_phys_addr, bar_phys_addr + bar_size - 1);
pcie_conf_write(bdf, reg, bar_bus_addr & 0xFFFFFFFF);
if (found_mem64) {
pcie_conf_write(bdf, reg + 1, bar_bus_addr >> 32);
}
} else {
LOG_INF("[%02x:%02x.%x] BAR%d size 0x%lx Failed memory allocation.",
PCIE_BDF_TO_BUS(bdf), PCIE_BDF_TO_DEV(bdf), PCIE_BDF_TO_FUNC(bdf),
bar, bar_size);
}
if (found_mem64) {
reg++;
}
}
}
static void generic_pcie_ctrl_enumerate_type0(const struct device *ctrl_dev, pcie_bdf_t bdf)
{
/* Setup Type0 BARs */
generic_pcie_ctrl_type0_enumerate_bars(ctrl_dev, bdf);
}
void generic_pcie_ctrl_enumerate(const struct device *ctrl_dev, pcie_bdf_t bdf_start)
{
uint32_t data, class, id;
unsigned int dev = PCIE_BDF_TO_DEV(bdf_start),
func = 0,
bus = PCIE_BDF_TO_BUS(bdf_start);
for (; dev <= PCIE_MAX_DEV; dev++) {
func = 0;
for (; func <= PCIE_MAX_FUNC; func++) {
pcie_bdf_t bdf = PCIE_BDF(bus, dev, func);
bool multifunction_device = false;
bool layout_type_1 = false;
id = pcie_conf_read(bdf, PCIE_CONF_ID);
if (id == PCIE_ID_NONE) {
continue;
}
class = pcie_conf_read(bdf, PCIE_CONF_CLASSREV);
data = pcie_conf_read(bdf, PCIE_CONF_TYPE);
multifunction_device = PCIE_CONF_MULTIFUNCTION(data);
layout_type_1 = PCIE_CONF_TYPE_BRIDGE(data);
LOG_INF("[%02x:%02x.%x] %04x:%04x class %x subclass %x progif %x "
"rev %x Type%x multifunction %s",
PCIE_BDF_TO_BUS(bdf), PCIE_BDF_TO_DEV(bdf), PCIE_BDF_TO_FUNC(bdf),
id & 0xffff, id >> 16,
PCIE_CONF_CLASSREV_CLASS(class),
PCIE_CONF_CLASSREV_SUBCLASS(class),
PCIE_CONF_CLASSREV_PROGIF(class),
PCIE_CONF_CLASSREV_REV(class),
layout_type_1 ? 1 : 0,
multifunction_device ? "true" : "false");
if (layout_type_1) {
generic_pcie_ctrl_enumerate_type1(ctrl_dev, bdf);
} else {
generic_pcie_ctrl_enumerate_type0(ctrl_dev, bdf);
}
/* Do not enumerate sub-functions if not a multifunction device */
if (PCIE_BDF_TO_FUNC(bdf) == 0 && !multifunction_device) {
break;
}
}
}
}

View file

@ -5,6 +5,9 @@
* SPDX-License-Identifier: Apache-2.0
*/
#include <logging/log.h>
LOG_MODULE_REGISTER(pcie, LOG_LEVEL_ERR);
#include <kernel.h>
#include <stdbool.h>
#include <drivers/pcie/pcie.h>
@ -13,6 +16,10 @@
#include <drivers/pcie/msi.h>
#endif
#ifdef CONFIG_PCIE_CONTROLLER
#include <drivers/pcie/controller.h>
#endif
/* functions documented in drivers/pcie/pcie.h */
bool pcie_probe(pcie_bdf_t bdf, pcie_id_t id)
@ -101,18 +108,31 @@ bool pcie_get_mbar(pcie_bdf_t bdf,
struct pcie_mbar *mbar)
{
uint32_t reg = bar_index + PCIE_CONF_BAR0;
#ifdef CONFIG_PCIE_CONTROLLER
const struct device *dev;
#endif
uintptr_t phys_addr;
size_t size;
#ifdef CONFIG_PCIE_CONTROLLER
dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_pcie_controller));
if (!dev) {
LOG_ERR("Failed to get PCIe root complex");
return false;
}
#endif
if (reg > PCIE_CONF_BAR5) {
return false;
}
phys_addr = pcie_conf_read(bdf, reg);
#ifndef CONFIG_PCIE_CONTROLLER
if (PCIE_CONF_BAR_IO(phys_addr)) {
/* Discard I/O bars */
return false;
}
#endif
if (PCIE_CONF_BAR_INVAL_FLAGS(phys_addr)) {
/* Discard on invalid flags */
@ -142,13 +162,33 @@ bool pcie_get_mbar(pcie_bdf_t bdf,
return false;
}
if (PCIE_CONF_BAR_IO(phys_addr)) {
size = PCIE_CONF_BAR_IO_ADDR(size);
if (size == 0) {
/* Discard on invalid size */
return false;
}
} else {
size = PCIE_CONF_BAR_ADDR(size);
if (size == 0) {
/* Discard on invalid size */
return false;
}
}
#ifdef CONFIG_PCIE_CONTROLLER
/* Translate to physical memory address from bus address */
if (!pcie_ctrl_region_xlate(dev, bdf, PCIE_CONF_BAR_MEM(phys_addr),
PCIE_CONF_BAR_64(phys_addr),
PCIE_CONF_BAR_MEM(phys_addr) ?
PCIE_CONF_BAR_IO_ADDR(phys_addr)
: PCIE_CONF_BAR_ADDR(phys_addr),
&mbar->phys_addr)) {
return false;
}
#else
mbar->phys_addr = PCIE_CONF_BAR_ADDR(phys_addr);
#endif /* CONFIG_PCIE_CONTROLLER */
mbar->size = size & ~(size-1);
return true;
@ -182,6 +222,11 @@ bool pcie_probe_mbar(pcie_bdf_t bdf,
*/
#define IRQ_LIST_INITIALIZED 0
#ifndef CONFIG_MAX_IRQ_LINES
#warning TOFIX for non-x86
#define CONFIG_MAX_IRQ_LINES 0
#endif
static ATOMIC_DEFINE(irq_reserved, CONFIG_MAX_IRQ_LINES);
static unsigned int irq_alloc(void)

View file

@ -0,0 +1,293 @@
/*
* Copyright (c) 2021 BayLibre, SAS
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <logging/log.h>
LOG_MODULE_REGISTER(pcie_ecam, LOG_LEVEL_ERR);
#include <kernel.h>
#include <device.h>
#include <drivers/pcie/pcie.h>
#include <drivers/pcie/controller.h>
#define DT_DRV_COMPAT pci_host_ecam_generic
/*
* PCIe Controllers Regions
*
* TOFIX:
* - handle prefetchable regions
*/
enum pcie_region_type {
PCIE_REGION_IO = 0,
PCIE_REGION_MEM,
PCIE_REGION_MEM64,
PCIE_REGION_MAX,
};
struct pcie_ecam_data {
uintptr_t cfg_phys_addr;
mm_reg_t cfg_addr;
size_t cfg_size;
struct {
uintptr_t phys_start;
uintptr_t bus_start;
size_t size;
size_t allocation_offset;
} regions[PCIE_REGION_MAX];
};
static int pcie_ecam_init(const struct device *dev)
{
const struct pcie_ctrl_config *cfg = (const struct pcie_ctrl_config *)dev->config;
struct pcie_ecam_data *data = (struct pcie_ecam_data *)dev->data;
int i;
/*
* Flags defined in the PCI Bus Binding to IEEE Std 1275-1994 :
* Bit# 33222222 22221111 11111100 00000000
* 10987654 32109876 54321098 76543210
*
* phys.hi cell: npt000ss bbbbbbbb dddddfff rrrrrrrr
* phys.mid cell: hhhhhhhh hhhhhhhh hhhhhhhh hhhhhhhh
* phys.lo cell: llllllll llllllll llllllll llllllll
*
* where:
*
* n is 0 if the address is relocatable, 1 otherwise
* p is 1 if the addressable region is "prefetchable", 0 otherwise
* t is 1 if the address is aliased (for non-relocatable I/O), below 1 MB (for Memory),
* or below 64 KB (for relocatable I/O).
* ss is the space code, denoting the address space
* 00 denotes Configuration Space
* 01 denotes I/O Space
* 10 denotes 32-bit-address Memory Space
* 11 denotes 64-bit-address Memory Space
* bbbbbbbb is the 8-bit Bus Number
* ddddd is the 5-bit Device Number
* fff is the 3-bit Function Number
* rrrrrrrr is the 8-bit Register Number
* hh...hh is a 32-bit unsigned number
* ll...ll is a 32-bit unsigned number
* for I/O Space is the 32-bit offset from the start of the region
* for 32-bit-address Memory Space is the 32-bit offset from the start of the region
* for 64-bit-address Memory Space is the 64-bit offset from the start of the region
*
* Here we only handle the p, ss, hh and ll fields.
*
* TOFIX:
* - handle prefetchable bit
*/
for (i = 0 ; i < cfg->ranges_count ; ++i) {
switch ((cfg->ranges[i].flags >> 24) & 0x03) {
case 0x01:
data->regions[PCIE_REGION_IO].bus_start = cfg->ranges[i].pcie_bus_addr;
data->regions[PCIE_REGION_IO].phys_start = cfg->ranges[i].host_map_addr;
data->regions[PCIE_REGION_IO].size = cfg->ranges[i].map_length;
/* Linux & U-Boot avoids allocating PCI resources from address 0 */
if (data->regions[PCIE_REGION_IO].bus_start < 0x1000)
data->regions[PCIE_REGION_IO].allocation_offset = 0x1000;
break;
case 0x02:
data->regions[PCIE_REGION_MEM].bus_start = cfg->ranges[i].pcie_bus_addr;
data->regions[PCIE_REGION_MEM].phys_start = cfg->ranges[i].host_map_addr;
data->regions[PCIE_REGION_MEM].size = cfg->ranges[i].map_length;
/* Linux & U-Boot avoids allocating PCI resources from address 0 */
if (data->regions[PCIE_REGION_MEM].bus_start < 0x1000)
data->regions[PCIE_REGION_MEM].allocation_offset = 0x1000;
break;
case 0x03:
data->regions[PCIE_REGION_MEM64].bus_start = cfg->ranges[i].pcie_bus_addr;
data->regions[PCIE_REGION_MEM64].phys_start = cfg->ranges[i].host_map_addr;
data->regions[PCIE_REGION_MEM64].size = cfg->ranges[i].map_length;
/* Linux & U-Boot avoids allocating PCI resources from address 0 */
if (data->regions[PCIE_REGION_MEM64].bus_start < 0x1000)
data->regions[PCIE_REGION_MEM64].allocation_offset = 0x1000;
break;
}
}
if (!data->regions[PCIE_REGION_IO].size &&
!data->regions[PCIE_REGION_MEM].size &&
!data->regions[PCIE_REGION_MEM64].size) {
LOG_ERR("No regions defined");
return -EINVAL;
}
/* Get Config address space physical address & size */
data->cfg_phys_addr = cfg->cfg_addr;
data->cfg_size = cfg->cfg_size;
if (data->regions[PCIE_REGION_IO].size) {
LOG_DBG("IO bus [0x%lx - 0x%lx, size 0x%lx]",
data->regions[PCIE_REGION_IO].bus_start,
(data->regions[PCIE_REGION_IO].bus_start +
data->regions[PCIE_REGION_IO].size - 1),
data->regions[PCIE_REGION_IO].size);
LOG_DBG("IO space [0x%lx - 0x%lx, size 0x%lx]",
data->regions[PCIE_REGION_IO].phys_start,
(data->regions[PCIE_REGION_IO].phys_start +
data->regions[PCIE_REGION_IO].size - 1),
data->regions[PCIE_REGION_IO].size);
}
if (data->regions[PCIE_REGION_MEM].size) {
LOG_DBG("MEM bus [0x%lx - 0x%lx, size 0x%lx]",
data->regions[PCIE_REGION_MEM].bus_start,
(data->regions[PCIE_REGION_MEM].bus_start +
data->regions[PCIE_REGION_MEM].size - 1),
data->regions[PCIE_REGION_MEM].size);
LOG_DBG("MEM space [0x%lx - 0x%lx, size 0x%lx]",
data->regions[PCIE_REGION_MEM].phys_start,
(data->regions[PCIE_REGION_MEM].phys_start +
data->regions[PCIE_REGION_MEM].size - 1),
data->regions[PCIE_REGION_MEM].size);
}
if (data->regions[PCIE_REGION_MEM64].size) {
LOG_DBG("MEM64 bus [0x%lx - 0x%lx, size 0x%lx]",
data->regions[PCIE_REGION_MEM64].bus_start,
(data->regions[PCIE_REGION_MEM64].bus_start +
data->regions[PCIE_REGION_MEM64].size - 1),
data->regions[PCIE_REGION_MEM64].size);
LOG_DBG("MEM64 space [0x%lx - 0x%lx, size 0x%lx]",
data->regions[PCIE_REGION_MEM64].phys_start,
(data->regions[PCIE_REGION_MEM64].phys_start +
data->regions[PCIE_REGION_MEM64].size - 1),
data->regions[PCIE_REGION_MEM64].size);
}
/* Map config space to be used by the generic_pcie_ctrl_conf_read/write callbacks */
device_map(&data->cfg_addr, data->cfg_phys_addr, data->cfg_size, K_MEM_CACHE_NONE);
LOG_DBG("Config space [0x%lx - 0x%lx, size 0x%lx]",
data->cfg_phys_addr, (data->cfg_phys_addr + data->cfg_size - 1), data->cfg_size);
LOG_DBG("Config mapped [0x%lx - 0x%lx, size 0x%lx]",
data->cfg_addr, (data->cfg_addr + data->cfg_size - 1), data->cfg_size);
generic_pcie_ctrl_enumerate(dev, PCIE_BDF(0, 0, 0));
return 0;
}
static uint32_t pcie_ecam_ctrl_conf_read(const struct device *dev, pcie_bdf_t bdf, unsigned int reg)
{
struct pcie_ecam_data *data = (struct pcie_ecam_data *)dev->data;
return generic_pcie_ctrl_conf_read(data->cfg_addr, bdf, reg);
}
static void pcie_ecam_ctrl_conf_write(const struct device *dev, pcie_bdf_t bdf, unsigned int reg,
uint32_t reg_data)
{
struct pcie_ecam_data *data = (struct pcie_ecam_data *)dev->data;
generic_pcie_ctrl_conf_write(data->cfg_addr, bdf, reg, reg_data);
}
static bool pcie_ecam_region_allocate_type(struct pcie_ecam_data *data, pcie_bdf_t bdf,
size_t bar_size, uintptr_t *bar_bus_addr,
enum pcie_region_type type)
{
uintptr_t addr;
addr = (((data->regions[type].bus_start + data->regions[type].allocation_offset) - 1) |
((bar_size) - 1)) + 1;
if (addr - data->regions[type].bus_start + bar_size > data->regions[type].size)
return false;
*bar_bus_addr = addr;
data->regions[type].allocation_offset = addr - data->regions[type].bus_start + bar_size;
return true;
}
static bool pcie_ecam_region_allocate(const struct device *dev, pcie_bdf_t bdf,
bool mem, bool mem64, size_t bar_size,
uintptr_t *bar_bus_addr)
{
struct pcie_ecam_data *data = (struct pcie_ecam_data *)dev->data;
enum pcie_region_type type;
if (mem && !data->regions[PCIE_REGION_MEM64].size &&
!data->regions[PCIE_REGION_MEM].size) {
LOG_DBG("bdf %x no mem region defined for allocation", bdf);
return false;
}
if (!mem && !data->regions[PCIE_REGION_IO].size) {
LOG_DBG("bdf %x no io region defined for allocation", bdf);
return false;
}
/*
* Allocate into mem64 region if available or is the only available
*
* TOFIX:
* - handle allocation from/to mem/mem64 when a region is full
*/
if (mem && ((mem64 && data->regions[PCIE_REGION_MEM64].size) ||
(data->regions[PCIE_REGION_MEM64].size &&
!data->regions[PCIE_REGION_MEM].size))) {
type = PCIE_REGION_MEM64;
} else if (mem) {
type = PCIE_REGION_MEM;
} else {
type = PCIE_REGION_IO;
}
return pcie_ecam_region_allocate_type(data, bdf, bar_size, bar_bus_addr, type);
}
static bool pcie_ecam_region_xlate(const struct device *dev, pcie_bdf_t bdf,
bool mem, bool mem64, uintptr_t bar_bus_addr,
uintptr_t *bar_addr)
{
struct pcie_ecam_data *data = (struct pcie_ecam_data *)dev->data;
enum pcie_region_type type;
/* Means it hasn't been allocated */
if (!bar_bus_addr)
return false;
if (mem && ((mem64 && data->regions[PCIE_REGION_MEM64].size) ||
(data->regions[PCIE_REGION_MEM64].size &&
!data->regions[PCIE_REGION_MEM].size))) {
type = PCIE_REGION_MEM64;
} else if (mem) {
type = PCIE_REGION_MEM;
} else {
type = PCIE_REGION_IO;
}
*bar_addr = data->regions[type].phys_start + (bar_bus_addr - data->regions[type].bus_start);
return true;
}
static const struct pcie_ctrl_driver_api pcie_ecam_api = {
.conf_read = pcie_ecam_ctrl_conf_read,
.conf_write = pcie_ecam_ctrl_conf_write,
.region_allocate = pcie_ecam_region_allocate,
.region_xlate = pcie_ecam_region_xlate,
};
#define PCIE_ECAM_INIT(n) \
static struct pcie_ecam_data pcie_ecam_data##n; \
static const struct pcie_ctrl_config pcie_ecam_config##n = { \
.cfg_addr = DT_INST_REG_ADDR(n), \
.cfg_size = DT_INST_REG_SIZE(n), \
.ranges_count = DT_NUM_RANGES(DT_DRV_INST(n)), \
.ranges = { \
DT_FOREACH_RANGE(DT_DRV_INST(n), PCIE_RANGE_FORMAT) \
}, \
}; \
DEVICE_DT_INST_DEFINE(n, &pcie_ecam_init, NULL, \
&pcie_ecam_data##n, \
&pcie_ecam_config##n, \
PRE_KERNEL_1, \
CONFIG_KERNEL_INIT_PRIORITY_DEFAULT, \
&pcie_ecam_api);
DT_INST_FOREACH_STATUS_OKAY(PCIE_ECAM_INIT)

View file

@ -243,6 +243,7 @@ extern uint32_t pcie_get_ext_cap(pcie_bdf_t bdf, uint32_t cap_id);
#define PCIE_CONF_TYPE 3U
#define PCIE_CONF_MULTIFUNCTION(w) (((w) & 0x00800000U) != 0U)
#define PCIE_CONF_TYPE_BRIDGE(w) (((w) & 0x007F0000U) != 0U)
/*
@ -262,6 +263,7 @@ extern uint32_t pcie_get_ext_cap(pcie_bdf_t bdf, uint32_t cap_id);
#define PCIE_CONF_BAR_MEM(w) (((w) & 0x00000001U) != 0x00000001U)
#define PCIE_CONF_BAR_64(w) (((w) & 0x00000006U) == 0x00000004U)
#define PCIE_CONF_BAR_ADDR(w) ((w) & ~0xfUL)
#define PCIE_CONF_BAR_IO_ADDR(w) ((w) & ~0x3UL)
#define PCIE_CONF_BAR_FLAGS(w) ((w) & 0xfUL)
#define PCIE_CONF_BAR_NONE 0U