drivers: xen: gnttab: do Xen node mapping inside driver
Move memory mapping of Xen node to Grant Table driver system init function. After moving mapping we don't need anymore records of xen-xen node into 'mmu_regions' array, so they were deleted from all SoCs: Rcar Gen3/Gen4 and XenVM. We need at least 16M of virtual address space to map memory of Xen node, so the virtual memory sized has been increased to 32 MB, it should be enough for basic use-cases and mapping of 16M mem region of Xen node. Unfortunately, after moving we also need to increase number of XLAT tables. The previous code was more efficient if we talking about usage of XLAT tables, because it mapped grant tables using a higher- order table that allows mapping blocks of 2MB. And after the changes is maps every 4KB page, so we need more XLAT tables. Increase number of grant frames, it is needed to sync stage 1 and stage 2 memory mappings, previously we map only one page on stage 2 and further usage of unmap regions can cause MMU translation errors. Perform mapping stage 1 before mapping for stage 2 (add to physmap), because right after stage 1 we can try to access memory and if it is unmap in stage 2, error will be received during translation. Note: Xen Grant Table driver doesn't use Zephyr Device Model. Authored-by: Mykola Kvach <mykola_kvach@epam.com> Co-authored-by: Oleksii Moisieiev <oleksii_moisieiev@epam.com> Signed-off-by: Mykola Kvach <mykola_kvach@epam.com>
This commit is contained in:
parent
ef041c6b98
commit
62fd5ab3e1
|
@ -4,7 +4,7 @@ CONFIG_BOARD_XENVM=y
|
|||
# Enable UART driver
|
||||
CONFIG_SERIAL=y
|
||||
|
||||
CONFIG_MAX_XLAT_TABLES=10
|
||||
CONFIG_MAX_XLAT_TABLES=24
|
||||
CONFIG_HEAP_MEM_POOL_SIZE=16384
|
||||
|
||||
# Enable console
|
||||
|
|
|
@ -4,7 +4,7 @@ CONFIG_BOARD_XENVM=y
|
|||
# Enable UART driver
|
||||
CONFIG_SERIAL=y
|
||||
|
||||
CONFIG_MAX_XLAT_TABLES=10
|
||||
CONFIG_MAX_XLAT_TABLES=24
|
||||
|
||||
# Enable console
|
||||
CONFIG_CONSOLE=y
|
||||
|
|
|
@ -28,17 +28,24 @@
|
|||
#include <zephyr/init.h>
|
||||
#include <zephyr/kernel.h>
|
||||
#include <zephyr/logging/log.h>
|
||||
#include <zephyr/sys/device_mmio.h>
|
||||
|
||||
LOG_MODULE_REGISTER(xen_gnttab);
|
||||
|
||||
/* Timeout for grant table ops retrying */
|
||||
#define GOP_RETRY_DELAY 200
|
||||
|
||||
#define GNTTAB_SIZE DT_REG_SIZE_BY_IDX(DT_INST(0, xen_xen), 0)
|
||||
BUILD_ASSERT(!(GNTTAB_SIZE % XEN_PAGE_SIZE), "Size of gnttab have to be aligned on XEN_PAGE_SIZE");
|
||||
|
||||
/* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */
|
||||
#define NR_GRANT_FRAMES 1
|
||||
#define NR_GRANT_FRAMES (GNTTAB_SIZE / XEN_PAGE_SIZE)
|
||||
#define NR_GRANT_ENTRIES \
|
||||
(NR_GRANT_FRAMES * XEN_PAGE_SIZE / sizeof(grant_entry_v1_t))
|
||||
|
||||
BUILD_ASSERT(GNTTAB_SIZE <= CONFIG_KERNEL_VM_SIZE);
|
||||
DEVICE_MMIO_TOPLEVEL_STATIC(grant_tables, DT_INST(0, xen_xen));
|
||||
|
||||
static struct gnttab {
|
||||
struct k_sem sem;
|
||||
grant_entry_v1_t *table;
|
||||
|
@ -307,15 +314,12 @@ static int gnttab_init(void)
|
|||
put_free_entry(gref);
|
||||
}
|
||||
|
||||
gnttab.table = (grant_entry_v1_t *)
|
||||
DT_REG_ADDR_BY_IDX(DT_INST(0, xen_xen), 0);
|
||||
|
||||
for (i = 0; i < NR_GRANT_FRAMES; i++) {
|
||||
xatp.domid = DOMID_SELF;
|
||||
xatp.size = 0;
|
||||
xatp.space = XENMAPSPACE_grant_table;
|
||||
xatp.idx = i;
|
||||
xatp.gpfn = xen_virt_to_gfn(gnttab.table) + i;
|
||||
xatp.gpfn = xen_virt_to_gfn(Z_TOPLEVEL_ROM_NAME(grant_tables).phys_addr) + i;
|
||||
rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp);
|
||||
__ASSERT(!rc, "add_to_physmap failed; status = %d\n", rc);
|
||||
}
|
||||
|
@ -327,6 +331,9 @@ static int gnttab_init(void)
|
|||
__ASSERT((!rc) && (!setup.status), "Table setup failed; status = %s\n",
|
||||
gnttabop_error(setup.status));
|
||||
|
||||
DEVICE_MMIO_TOPLEVEL_MAP(grant_tables, K_MEM_CACHE_WB | K_MEM_PERM_RW);
|
||||
gnttab.table = (grant_entry_v1_t *)DEVICE_MMIO_TOPLEVEL_GET(grant_tables);
|
||||
|
||||
LOG_DBG("%s: grant table mapped\n", __func__);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -14,4 +14,9 @@ config SYS_CLOCK_HW_CYCLES_PER_SEC
|
|||
int
|
||||
default 8320000
|
||||
|
||||
# We need at least 16M of virtual address space to map memory of Xen node
|
||||
# 32M should be enough for basic use-cases
|
||||
config KERNEL_VM_SIZE
|
||||
default 0x2000000
|
||||
|
||||
endif
|
||||
|
|
|
@ -18,11 +18,6 @@ static const struct arm_mmu_region mmu_regions[] = {
|
|||
DT_REG_ADDR_BY_IDX(DT_INST(0, arm_gic), 1),
|
||||
DT_REG_SIZE_BY_IDX(DT_INST(0, arm_gic), 1),
|
||||
MT_DEVICE_nGnRnE | MT_P_RW_U_NA | MT_NS),
|
||||
|
||||
MMU_REGION_FLAT_ENTRY("HYPERVISOR",
|
||||
DT_REG_ADDR_BY_IDX(DT_INST(0, xen_xen), 0),
|
||||
DT_REG_SIZE_BY_IDX(DT_INST(0, xen_xen), 0),
|
||||
MT_NORMAL | MT_P_RW_U_NA | MT_NS),
|
||||
};
|
||||
|
||||
const struct arm_mmu_config mmu_config = {
|
||||
|
|
Loading…
Reference in a new issue