drivers: virtualization: Map ivshmem-v2 sections individually
Recent changes to the arm64 MMU code mean that you can no longer map R/O memory as R/W. Mapping R/W memory now causes a cache invalidation instruction (DC IVAC) that requires write permissions or else a fault is generated. Modify ivshmem-v2 to map each R/O and R/W section individually Signed-off-by: Grant Ramsay <gramsay@enphaseenergy.com>
This commit is contained in:
parent
d36e085ecc
commit
64cc0764ee
|
@ -66,4 +66,10 @@ config IVSHMEM_V2
|
||||||
Enable ivshmem-v2 support.
|
Enable ivshmem-v2 support.
|
||||||
ivshmem-v2 is primarily used for IPC in the Jailhouse hypervisor.
|
ivshmem-v2 is primarily used for IPC in the Jailhouse hypervisor.
|
||||||
|
|
||||||
|
config IVSHMEM_V2_MAX_PEERS
|
||||||
|
int "Maximum number of ivshmem-v2 peers"
|
||||||
|
depends on IVSHMEM_V2
|
||||||
|
default 2
|
||||||
|
range 2 65536
|
||||||
|
|
||||||
endif # VIRTUALIZATION
|
endif # VIRTUALIZATION
|
||||||
|
|
|
@ -198,7 +198,7 @@ static bool ivshmem_configure(const struct device *dev)
|
||||||
(volatile struct ivshmem_v2_reg *)DEVICE_MMIO_GET(dev);
|
(volatile struct ivshmem_v2_reg *)DEVICE_MMIO_GET(dev);
|
||||||
|
|
||||||
data->max_peers = regs->max_peers;
|
data->max_peers = regs->max_peers;
|
||||||
if (!IN_RANGE(data->max_peers, 2, 0x10000)) {
|
if (!IN_RANGE(data->max_peers, 2, CONFIG_IVSHMEM_V2_MAX_PEERS)) {
|
||||||
LOG_ERR("Invalid max peers %u", data->max_peers);
|
LOG_ERR("Invalid max peers %u", data->max_peers);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -211,26 +211,49 @@ static bool ivshmem_configure(const struct device *dev)
|
||||||
shmem_phys_addr = pcie_conf_read_u64(data->pcie->bdf, cap_pos);
|
shmem_phys_addr = pcie_conf_read_u64(data->pcie->bdf, cap_pos);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* State table R/O */
|
||||||
cap_pos = vendor_cap + IVSHMEM_CFG_STATE_TAB_SZ / 4;
|
cap_pos = vendor_cap + IVSHMEM_CFG_STATE_TAB_SZ / 4;
|
||||||
size_t state_table_size = pcie_conf_read(data->pcie->bdf, cap_pos);
|
size_t state_table_size = pcie_conf_read(data->pcie->bdf, cap_pos);
|
||||||
|
|
||||||
LOG_INF("State table size 0x%zX", state_table_size);
|
LOG_INF("State table size 0x%zX", state_table_size);
|
||||||
if (state_table_size < sizeof(uint32_t) * data->max_peers) {
|
if (state_table_size < sizeof(uint32_t) * data->max_peers) {
|
||||||
LOG_ERR("Invalid state table size %zu", state_table_size);
|
LOG_ERR("Invalid state table size %zu", state_table_size);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
z_phys_map((uint8_t **)&data->state_table_shmem,
|
||||||
|
shmem_phys_addr, state_table_size,
|
||||||
|
K_MEM_CACHE_WB | K_MEM_PERM_USER);
|
||||||
|
|
||||||
|
/* R/W section (optional) */
|
||||||
cap_pos = vendor_cap + IVSHMEM_CFG_RW_SECTION_SZ / 4;
|
cap_pos = vendor_cap + IVSHMEM_CFG_RW_SECTION_SZ / 4;
|
||||||
data->rw_section_size = pcie_conf_read_u64(data->pcie->bdf, cap_pos);
|
data->rw_section_size = pcie_conf_read_u64(data->pcie->bdf, cap_pos);
|
||||||
data->rw_section_offset = state_table_size;
|
size_t rw_section_offset = state_table_size;
|
||||||
LOG_INF("RW section size 0x%zX", data->rw_section_size);
|
LOG_INF("RW section size 0x%zX", data->rw_section_size);
|
||||||
|
if (data->rw_section_size > 0) {
|
||||||
|
z_phys_map((uint8_t **)&data->rw_section_shmem,
|
||||||
|
shmem_phys_addr + rw_section_offset, data->rw_section_size,
|
||||||
|
K_MEM_CACHE_WB | K_MEM_PERM_RW | K_MEM_PERM_USER);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Output sections */
|
||||||
cap_pos = vendor_cap + IVSHMEM_CFG_OUTPUT_SECTION_SZ / 4;
|
cap_pos = vendor_cap + IVSHMEM_CFG_OUTPUT_SECTION_SZ / 4;
|
||||||
data->output_section_size = pcie_conf_read_u64(data->pcie->bdf, cap_pos);
|
data->output_section_size = pcie_conf_read_u64(data->pcie->bdf, cap_pos);
|
||||||
data->output_section_offset = data->rw_section_offset + data->rw_section_size;
|
size_t output_section_offset = rw_section_offset + data->rw_section_size;
|
||||||
LOG_INF("Output section size 0x%zX", data->output_section_size);
|
LOG_INF("Output section size 0x%zX", data->output_section_size);
|
||||||
|
for (uint32_t i = 0; i < data->max_peers; i++) {
|
||||||
|
uintptr_t phys_addr = shmem_phys_addr +
|
||||||
|
output_section_offset +
|
||||||
|
(data->output_section_size * i);
|
||||||
|
uint32_t flags = K_MEM_CACHE_WB | K_MEM_PERM_USER;
|
||||||
|
|
||||||
data->size = data->output_section_offset +
|
/* Only your own output section is R/W */
|
||||||
|
if (i == regs->id) {
|
||||||
|
flags |= K_MEM_PERM_RW;
|
||||||
|
}
|
||||||
|
z_phys_map((uint8_t **)&data->output_section_shmem[i],
|
||||||
|
phys_addr, data->output_section_size, flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
data->size = output_section_offset +
|
||||||
data->output_section_size * data->max_peers;
|
data->output_section_size * data->max_peers;
|
||||||
|
|
||||||
/* Ensure one-shot ISR mode is disabled */
|
/* Ensure one-shot ISR mode is disabled */
|
||||||
|
@ -249,11 +272,11 @@ static bool ivshmem_configure(const struct device *dev)
|
||||||
}
|
}
|
||||||
|
|
||||||
data->size = mbar_shmem.size;
|
data->size = mbar_shmem.size;
|
||||||
}
|
|
||||||
|
|
||||||
z_phys_map((uint8_t **)&data->shmem,
|
z_phys_map((uint8_t **)&data->shmem,
|
||||||
shmem_phys_addr, data->size,
|
shmem_phys_addr, data->size,
|
||||||
K_MEM_CACHE_WB | K_MEM_PERM_RW | K_MEM_PERM_USER);
|
K_MEM_CACHE_WB | K_MEM_PERM_RW | K_MEM_PERM_USER);
|
||||||
|
}
|
||||||
|
|
||||||
if (msi_x_bar_present) {
|
if (msi_x_bar_present) {
|
||||||
if (!ivshmem_configure_msi_x_interrupts(dev)) {
|
if (!ivshmem_configure_msi_x_interrupts(dev)) {
|
||||||
|
@ -284,6 +307,13 @@ static size_t ivshmem_api_get_mem(const struct device *dev,
|
||||||
{
|
{
|
||||||
struct ivshmem *data = dev->data;
|
struct ivshmem *data = dev->data;
|
||||||
|
|
||||||
|
#ifdef CONFIG_IVSHMEM_V2
|
||||||
|
if (data->ivshmem_v2) {
|
||||||
|
*memmap = 0;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
*memmap = data->shmem;
|
*memmap = data->shmem;
|
||||||
|
|
||||||
return data->size;
|
return data->size;
|
||||||
|
@ -389,11 +419,11 @@ static size_t ivshmem_api_get_rw_mem_section(const struct device *dev,
|
||||||
struct ivshmem *data = dev->data;
|
struct ivshmem *data = dev->data;
|
||||||
|
|
||||||
if (!data->ivshmem_v2) {
|
if (!data->ivshmem_v2) {
|
||||||
memmap = NULL;
|
*memmap = 0;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
*memmap = data->shmem + data->rw_section_offset;
|
*memmap = data->rw_section_shmem;
|
||||||
|
|
||||||
return data->rw_section_size;
|
return data->rw_section_size;
|
||||||
}
|
}
|
||||||
|
@ -405,12 +435,11 @@ static size_t ivshmem_api_get_output_mem_section(const struct device *dev,
|
||||||
struct ivshmem *data = dev->data;
|
struct ivshmem *data = dev->data;
|
||||||
|
|
||||||
if (!data->ivshmem_v2 || peer_id >= data->max_peers) {
|
if (!data->ivshmem_v2 || peer_id >= data->max_peers) {
|
||||||
memmap = NULL;
|
*memmap = 0;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
*memmap = data->shmem + data->output_section_offset +
|
*memmap = data->output_section_shmem[peer_id];
|
||||||
data->output_section_size * peer_id;
|
|
||||||
|
|
||||||
return data->output_section_size;
|
return data->output_section_size;
|
||||||
}
|
}
|
||||||
|
@ -425,7 +454,7 @@ static uint32_t ivshmem_api_get_state(const struct device *dev,
|
||||||
}
|
}
|
||||||
|
|
||||||
const volatile uint32_t *state_table =
|
const volatile uint32_t *state_table =
|
||||||
(const volatile uint32_t *)data->shmem;
|
(const volatile uint32_t *)data->state_table_shmem;
|
||||||
|
|
||||||
return state_table[peer_id];
|
return state_table[peer_id];
|
||||||
}
|
}
|
||||||
|
|
|
@ -57,9 +57,10 @@ struct ivshmem {
|
||||||
bool ivshmem_v2;
|
bool ivshmem_v2;
|
||||||
uint32_t max_peers;
|
uint32_t max_peers;
|
||||||
size_t rw_section_size;
|
size_t rw_section_size;
|
||||||
size_t rw_section_offset;
|
|
||||||
size_t output_section_size;
|
size_t output_section_size;
|
||||||
size_t output_section_offset;
|
uintptr_t state_table_shmem;
|
||||||
|
uintptr_t rw_section_shmem;
|
||||||
|
uintptr_t output_section_shmem[CONFIG_IVSHMEM_V2_MAX_PEERS];
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -84,6 +84,12 @@ __subsystem struct ivshmem_driver_api {
|
||||||
/**
|
/**
|
||||||
* @brief Get the inter-VM shared memory
|
* @brief Get the inter-VM shared memory
|
||||||
*
|
*
|
||||||
|
* Note: This API is not supported for ivshmem-v2, as
|
||||||
|
* the R/W and R/O areas may not be mapped contiguously.
|
||||||
|
* For ivshmem-v2, use the ivshmem_get_rw_mem_section,
|
||||||
|
* ivshmem_get_output_mem_section and ivshmem_get_state
|
||||||
|
* APIs to access the shared memory.
|
||||||
|
*
|
||||||
* @param dev Pointer to the device structure for the driver instance
|
* @param dev Pointer to the device structure for the driver instance
|
||||||
* @param memmap A pointer to fill in with the memory address
|
* @param memmap A pointer to fill in with the memory address
|
||||||
*
|
*
|
||||||
|
|
Loading…
Reference in a new issue