arm64/vmm: Move nVHE-only code to the new file

There are some functions that are only needed in non-VHE mode. These
are used to handle hypervisor calls from the kernel, and to manage the
page tables in EL2. As these won't be used by the VHE code we can move
them to the non-VHE specific files.

Sponsored by:	Arm Ltd
Differential Revision:	https://reviews.freebsd.org/D46073
This commit is contained in:
Andrew Turner 2024-08-19 13:43:31 +01:00
parent 3d61bcf1eb
commit 4b6c9f84de
4 changed files with 173 additions and 162 deletions

View file

@ -39,8 +39,6 @@
struct hypctx;
uint64_t vmm_hyp_enter(uint64_t, uint64_t, uint64_t, uint64_t, uint64_t,
uint64_t, uint64_t, uint64_t);
uint64_t VMM_HYP_FUNC(do_call_guest)(struct hypctx *);
static void
@ -665,75 +663,3 @@ VMM_HYP_FUNC(s2_tlbi_all)(uint64_t vttbr)
WRITE_SPECIALREG(vttbr_el2, host_vttbr);
isb();
}
static int
vmm_dc_civac(uint64_t start, uint64_t len)
{
size_t line_size, end;
uint64_t ctr;
ctr = READ_SPECIALREG(ctr_el0);
line_size = sizeof(int) << CTR_DLINE_SIZE(ctr);
end = start + len;
dsb(ishst);
/* Clean and Invalidate the D-cache */
for (; start < end; start += line_size)
__asm __volatile("dc civac, %0" :: "r" (start) : "memory");
dsb(ish);
return (0);
}
static int
vmm_el2_tlbi(uint64_t type, uint64_t start, uint64_t len)
{
uint64_t end, r;
dsb(ishst);
switch (type) {
default:
case HYP_EL2_TLBI_ALL:
__asm __volatile("tlbi alle2" ::: "memory");
break;
case HYP_EL2_TLBI_VA:
end = TLBI_VA(start + len);
start = TLBI_VA(start);
for (r = start; r < end; r += TLBI_VA_L3_INCR) {
__asm __volatile("tlbi vae2is, %0" :: "r"(r));
}
break;
}
dsb(ish);
return (0);
}
uint64_t
vmm_hyp_enter(uint64_t handle, uint64_t x1, uint64_t x2, uint64_t x3,
uint64_t x4, uint64_t x5, uint64_t x6, uint64_t x7)
{
switch (handle) {
case HYP_ENTER_GUEST:
return (VMM_HYP_FUNC(enter_guest)((struct hyp *)x1,
(struct hypctx *)x2));
case HYP_READ_REGISTER:
return (VMM_HYP_FUNC(read_reg)(x1));
case HYP_CLEAN_S2_TLBI:
VMM_HYP_FUNC(clean_s2_tlbi());
return (0);
case HYP_DC_CIVAC:
return (vmm_dc_civac(x1, x2));
case HYP_EL2_TLBI:
return (vmm_el2_tlbi(x1, x2, x3));
case HYP_S2_TLBI_RANGE:
VMM_HYP_FUNC(s2_tlbi_range)(x1, x2, x3, x4);
return (0);
case HYP_S2_TLBI_ALL:
VMM_HYP_FUNC(s2_tlbi_all)(x1);
return (0);
case HYP_CLEANUP: /* Handled in vmm_hyp_exception.S */
default:
break;
}
return (0);
}

View file

@ -145,29 +145,6 @@
b handle_\name
.endm
.section ".vmm_vectors","ax"
.align 11
hyp_init_vectors:
vempty /* Synchronous EL2t */
vempty /* IRQ EL2t */
vempty /* FIQ EL2t */
vempty /* Error EL2t */
vempty /* Synchronous EL2h */
vempty /* IRQ EL2h */
vempty /* FIQ EL2h */
vempty /* Error EL2h */
vector hyp_init /* Synchronous 64-bit EL1 */
vempty /* IRQ 64-bit EL1 */
vempty /* FIQ 64-bit EL1 */
vempty /* Error 64-bit EL1 */
vempty /* Synchronous 32-bit EL1 */
vempty /* IRQ 32-bit EL1 */
vempty /* FIQ 32-bit EL1 */
vempty /* Error 32-bit EL1 */
.text
.align 11
hyp_vectors:
@ -191,50 +168,6 @@ hyp_vectors:
vempty /* FIQ 32-bit EL1 */
vempty /* Error 32-bit EL1 */
/*
* Initialize the hypervisor mode with a new exception vector table, translation
* table and stack.
*
* Expecting:
* x0 - translation tables physical address
* x1 - stack top virtual address
* x2 - TCR_EL2 value
* x3 - SCTLR_EL2 value
* x4 - VTCR_EL2 value
*/
LENTRY(handle_hyp_init)
/* Install the new exception vectors */
adrp x6, hyp_vectors
add x6, x6, :lo12:hyp_vectors
msr vbar_el2, x6
/* Set the stack top address */
mov sp, x1
/* Use the host VTTBR_EL2 to tell the host and the guests apart */
mov x9, #VTTBR_HOST
msr vttbr_el2, x9
/* Load the base address for the translation tables */
msr ttbr0_el2, x0
/* Invalidate the TLB */
dsb ish
tlbi alle2
dsb ishst
isb
/* Use the same memory attributes as EL1 */
mrs x9, mair_el1
msr mair_el2, x9
/* Configure address translation */
msr tcr_el2, x2
isb
/* Set the system control register for EL2 */
msr sctlr_el2, x3
/* Set the Stage 2 translation control register */
msr vtcr_el2, x4
/* Return success */
mov x0, #0
/* MMU is up and running */
ERET
LEND(handle_hyp_init)
.macro do_world_switch_to_host
save_guest_registers
restore_host_registers
@ -364,24 +297,3 @@ ENTRY(VMM_HYP_FUNC(do_call_guest))
/* Enter guest */
ERET
END(VMM_HYP_FUNC(do_call_guest))
/*
* Usage:
* void vmm_cleanup(uint64_t handle, void *hyp_stub_vectors)
*
* Expecting:
* x1 - physical address of hyp_stub_vectors
*/
LENTRY(vmm_cleanup)
/* Restore the stub vectors */
msr vbar_el2, x1
/* Disable the MMU */
dsb sy
mrs x2, sctlr_el2
bic x2, x2, #SCTLR_EL2_M
msr sctlr_el2, x2
isb
ERET
LEND(vmm_cleanup)

View file

@ -1,8 +1,12 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2021 Andrew Turner
* Copyright (c) 2024 Arm Ltd
*
* This work was supported by Innovate UK project 105694, "Digital Security
* by Design (DSbD) Technology Platform Prototype".
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@ -29,3 +33,82 @@
#define VMM_HYP_FUNC(func) vmm_nvhe_ ## func
#include "vmm_hyp.c"
uint64_t vmm_hyp_enter(uint64_t, uint64_t, uint64_t, uint64_t, uint64_t,
uint64_t, uint64_t, uint64_t);
/*
* Handlers for EL2 addres space. Only needed by non-VHE code as in VHE the
* kernel is in EL2 so pmap will manage the address space.
*/
static int
vmm_dc_civac(uint64_t start, uint64_t len)
{
size_t line_size, end;
uint64_t ctr;
ctr = READ_SPECIALREG(ctr_el0);
line_size = sizeof(int) << CTR_DLINE_SIZE(ctr);
end = start + len;
dsb(ishst);
/* Clean and Invalidate the D-cache */
for (; start < end; start += line_size)
__asm __volatile("dc civac, %0" :: "r" (start) : "memory");
dsb(ish);
return (0);
}
static int
vmm_el2_tlbi(uint64_t type, uint64_t start, uint64_t len)
{
uint64_t end, r;
dsb(ishst);
switch (type) {
default:
case HYP_EL2_TLBI_ALL:
__asm __volatile("tlbi alle2" ::: "memory");
break;
case HYP_EL2_TLBI_VA:
end = TLBI_VA(start + len);
start = TLBI_VA(start);
for (r = start; r < end; r += TLBI_VA_L3_INCR) {
__asm __volatile("tlbi vae2is, %0" :: "r"(r));
}
break;
}
dsb(ish);
return (0);
}
uint64_t
vmm_hyp_enter(uint64_t handle, uint64_t x1, uint64_t x2, uint64_t x3,
uint64_t x4, uint64_t x5, uint64_t x6, uint64_t x7)
{
switch (handle) {
case HYP_ENTER_GUEST:
return (VMM_HYP_FUNC(enter_guest)((struct hyp *)x1,
(struct hypctx *)x2));
case HYP_READ_REGISTER:
return (VMM_HYP_FUNC(read_reg)(x1));
case HYP_CLEAN_S2_TLBI:
VMM_HYP_FUNC(clean_s2_tlbi());
return (0);
case HYP_DC_CIVAC:
return (vmm_dc_civac(x1, x2));
case HYP_EL2_TLBI:
return (vmm_el2_tlbi(x1, x2, x3));
case HYP_S2_TLBI_RANGE:
VMM_HYP_FUNC(s2_tlbi_range)(x1, x2, x3, x4);
return (0);
case HYP_S2_TLBI_ALL:
VMM_HYP_FUNC(s2_tlbi_all)(x1);
return (0);
case HYP_CLEANUP: /* Handled in vmm_hyp_exception.S */
default:
break;
}
return (0);
}

View file

@ -28,3 +28,93 @@
#define VMM_HYP_FUNC(func) vmm_nvhe_ ## func
#include "vmm_hyp_exception.S"
.section ".vmm_vectors","ax"
.align 11
hyp_init_vectors:
vempty /* Synchronous EL2t */
vempty /* IRQ EL2t */
vempty /* FIQ EL2t */
vempty /* Error EL2t */
vempty /* Synchronous EL2h */
vempty /* IRQ EL2h */
vempty /* FIQ EL2h */
vempty /* Error EL2h */
vector hyp_init /* Synchronous 64-bit EL1 */
vempty /* IRQ 64-bit EL1 */
vempty /* FIQ 64-bit EL1 */
vempty /* Error 64-bit EL1 */
vempty /* Synchronous 32-bit EL1 */
vempty /* IRQ 32-bit EL1 */
vempty /* FIQ 32-bit EL1 */
vempty /* Error 32-bit EL1 */
.text
/*
* Initialize the hypervisor mode with a new exception vector table, translation
* table and stack.
*
* Expecting:
* x0 - translation tables physical address
* x1 - stack top virtual address
* x2 - TCR_EL2 value
* x3 - SCTLR_EL2 value
* x4 - VTCR_EL2 value
*/
LENTRY(handle_hyp_init)
/* Install the new exception vectors */
adrp x6, hyp_vectors
add x6, x6, :lo12:hyp_vectors
msr vbar_el2, x6
/* Set the stack top address */
mov sp, x1
/* Use the host VTTBR_EL2 to tell the host and the guests apart */
mov x9, #VTTBR_HOST
msr vttbr_el2, x9
/* Load the base address for the translation tables */
msr ttbr0_el2, x0
/* Invalidate the TLB */
dsb ish
tlbi alle2
dsb ishst
isb
/* Use the same memory attributes as EL1 */
mrs x9, mair_el1
msr mair_el2, x9
/* Configure address translation */
msr tcr_el2, x2
isb
/* Set the system control register for EL2 */
msr sctlr_el2, x3
/* Set the Stage 2 translation control register */
msr vtcr_el2, x4
/* Return success */
mov x0, #0
/* MMU is up and running */
ERET
LEND(handle_hyp_init)
/*
* Usage:
* void vmm_cleanup(uint64_t handle, void *hyp_stub_vectors)
*
* Expecting:
* x1 - physical address of hyp_stub_vectors
*/
LENTRY(vmm_cleanup)
/* Restore the stub vectors */
msr vbar_el2, x1
/* Disable the MMU */
dsb sy
mrs x2, sctlr_el2
bic x2, x2, #SCTLR_EL2_M
msr sctlr_el2, x2
isb
ERET
LEND(vmm_cleanup)