Date: Tue, 23 Sep 2025 17:09:38 GMT From: Andrew Turner <andrew@FreeBSD.org> To: src-committers@FreeBSD.org, dev-commits-src-all@FreeBSD.org, dev-commits-src-main@FreeBSD.org Subject: git: fbe076b2c837 - main - arm64/vmm: Use FEAT_ECV_POFF to support a timer Message-ID: <202509231709.58NH9c9N077401@gitrepo.freebsd.org>
next in thread | raw e-mail | index | archive | help
The branch main has been updated by andrew: URL: https://cgit.FreeBSD.org/src/commit/?id=fbe076b2c837f396f96d4725a43745e741557df1 commit fbe076b2c837f396f96d4725a43745e741557df1 Author: Andrew Turner <andrew@FreeBSD.org> AuthorDate: 2025-09-22 17:09:54 +0000 Commit: Andrew Turner <andrew@FreeBSD.org> CommitDate: 2025-09-23 17:08:37 +0000 arm64/vmm: Use FEAT_ECV_POFF to support a timer Support guest access to the physical timer when FEAT_ECV_POFF is supported. In this case we can set an offset for the physical timer. We can reuse the virtual timer support to also support the physical timer, with a few more registers needing to be handled when switching to a guest. As it is not clear how this will affect performance when the guest doesn't use it hide enabling it behind a sysctl. It is expected this will be useful when Nested Virtualisation is supported as guests are expected to use the physical timer registers. Sponsored by: Arm Ltd Differential Revision: https://reviews.freebsd.org/D51821 --- sys/arm64/vmm/arm64.h | 1 + sys/arm64/vmm/io/vtimer.c | 74 +++++++++++++++++++++++++++++++++++++---------- sys/arm64/vmm/vmm_arm64.c | 5 ++++ sys/arm64/vmm/vmm_hyp.c | 54 +++++++++++++++++++++++++++++----- 4 files changed, 110 insertions(+), 24 deletions(-) diff --git a/sys/arm64/vmm/arm64.h b/sys/arm64/vmm/arm64.h index 0bd5a933bee1..82c4481b8692 100644 --- a/sys/arm64/vmm/arm64.h +++ b/sys/arm64/vmm/arm64.h @@ -127,6 +127,7 @@ struct hyp { uint64_t el2_addr; /* The address of this in el2 space */ uint64_t feats; /* Which features are enabled */ #define HYP_FEAT_HCX (0x1ul << 0) +#define HYP_FEAT_ECV_POFF (0x1ul << 1) bool vgic_attached; struct vgic_v3 *vgic; struct hypctx *ctx[]; diff --git a/sys/arm64/vmm/io/vtimer.c b/sys/arm64/vmm/io/vtimer.c index ddc9e6e840a5..da0f0d96c431 100644 --- a/sys/arm64/vmm/io/vtimer.c +++ b/sys/arm64/vmm/io/vtimer.c @@ -36,6 +36,7 @@ #include <sys/module.h> #include <sys/mutex.h> #include <sys/rman.h> +#include <sys/sysctl.h> #include <sys/time.h> #include <sys/timeet.h> #include <sys/timetc.h> @@ -59,6 +60,14 @@ static uint32_t tmr_frq; #define timer_condition_met(ctl) ((ctl) & CNTP_CTL_ISTATUS) +SYSCTL_DECL(_hw_vmm); +SYSCTL_NODE(_hw_vmm, OID_AUTO, vtimer, CTLFLAG_RW, NULL, NULL); + +static bool allow_ecv_phys = false; +SYSCTL_BOOL(_hw_vmm_vtimer, OID_AUTO, allow_ecv_phys, CTLFLAG_RW, + &allow_ecv_phys, 0, + "Enable hardware access to the physical timer if FEAT_ECV_POFF is supported"); + static void vtimer_schedule_irq(struct hypctx *hypctx, bool phys); static int @@ -126,7 +135,12 @@ void vtimer_vminit(struct hyp *hyp) { uint64_t now; + bool ecv_poff; + + ecv_poff = false; + if (allow_ecv_phys && (hyp->feats & HYP_FEAT_ECV_POFF) != 0) + ecv_poff = true; /* * Configure the Counter-timer Hypervisor Control Register for the VM. @@ -151,23 +165,41 @@ vtimer_vminit(struct hyp *hyp) * TODO: Don't trap when FEAT_ECV is present */ hyp->vtimer.cnthctl_el2 = - CNTHCTL_E2H_EL0PCTEN_TRAP | CNTHCTL_E2H_EL0VCTEN_NOTRAP | - CNTHCTL_E2H_EL0VTEN_NOTRAP | - CNTHCTL_E2H_EL0PTEN_TRAP | - CNTHCTL_E2H_EL1PCTEN_TRAP | - CNTHCTL_E2H_EL1PTEN_TRAP; + CNTHCTL_E2H_EL0VTEN_NOTRAP; + if (ecv_poff) { + hyp->vtimer.cnthctl_el2 |= + CNTHCTL_E2H_EL0PCTEN_NOTRAP | + CNTHCTL_E2H_EL0PTEN_NOTRAP | + CNTHCTL_E2H_EL1PCTEN_NOTRAP | + CNTHCTL_E2H_EL1PTEN_NOTRAP; + } else { + hyp->vtimer.cnthctl_el2 |= + CNTHCTL_E2H_EL0PCTEN_TRAP | + CNTHCTL_E2H_EL0PTEN_TRAP | + CNTHCTL_E2H_EL1PCTEN_TRAP | + CNTHCTL_E2H_EL1PTEN_TRAP; + } } else { /* * CNTHCTL_EL1PCEN: trap access to CNTP_{CTL, CVAL, TVAL}_EL0 * from EL1 * CNTHCTL_EL1PCTEN: trap access to CNTPCT_EL0 */ - hyp->vtimer.cnthctl_el2 = - CNTHCTL_EL1PCTEN_TRAP | - CNTHCTL_EL1PCEN_TRAP; + if (ecv_poff) { + hyp->vtimer.cnthctl_el2 = + CNTHCTL_EL1PCTEN_NOTRAP | + CNTHCTL_EL1PCEN_NOTRAP; + } else { + hyp->vtimer.cnthctl_el2 = + CNTHCTL_EL1PCTEN_TRAP | + CNTHCTL_EL1PCEN_TRAP; + } } + if (ecv_poff) + hyp->vtimer.cnthctl_el2 |= CNTHCTL_ECV_EN; + now = READ_SPECIALREG(cntpct_el0); hyp->vtimer.cntvoff_el2 = now; @@ -233,15 +265,10 @@ vtimer_cleanup(void) { } -void -vtimer_sync_hwstate(struct hypctx *hypctx) +static void +vtime_sync_timer(struct hypctx *hypctx, struct vtimer_timer *timer, + uint64_t cntpct_el0) { - struct vtimer_timer *timer; - uint64_t cntpct_el0; - - timer = &hypctx->vtimer_cpu.virt_timer; - cntpct_el0 = READ_SPECIALREG(cntpct_el0) - - hypctx->hyp->vtimer.cntvoff_el2; if (!timer_enabled(timer->cntx_ctl_el0)) { vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu), timer->irqid, false); @@ -255,6 +282,21 @@ vtimer_sync_hwstate(struct hypctx *hypctx) } } +void +vtimer_sync_hwstate(struct hypctx *hypctx) +{ + uint64_t cntpct_el0; + + cntpct_el0 = READ_SPECIALREG(cntpct_el0) - + hypctx->hyp->vtimer.cntvoff_el2; + vtime_sync_timer(hypctx, &hypctx->vtimer_cpu.virt_timer, cntpct_el0); + /* If FEAT_ECV_POFF is in use then we need to sync the physical timer */ + if ((hypctx->hyp->vtimer.cnthctl_el2 & CNTHCTL_ECV_EN) != 0) { + vtime_sync_timer(hypctx, &hypctx->vtimer_cpu.phys_timer, + cntpct_el0); + } +} + static void vtimer_inject_irq_callout_phys(void *context) { diff --git a/sys/arm64/vmm/vmm_arm64.c b/sys/arm64/vmm/vmm_arm64.c index fa13fc76677a..618f4afaf8ee 100644 --- a/sys/arm64/vmm/vmm_arm64.c +++ b/sys/arm64/vmm/vmm_arm64.c @@ -523,6 +523,11 @@ vmmops_init(struct vm *vm, pmap_t pmap) hyp->vm = vm; hyp->vgic_attached = false; + if (get_kernel_reg(ID_AA64MMFR0_EL1, &idreg)) { + if (ID_AA64MMFR0_ECV_VAL(idreg) >= ID_AA64MMFR0_ECV_POFF) + hyp->feats |= HYP_FEAT_ECV_POFF; + } + if (get_kernel_reg(ID_AA64MMFR1_EL1, &idreg)) { if (ID_AA64MMFR1_HCX_VAL(idreg) >= ID_AA64MMFR1_HCX_IMPL) hyp->feats |= HYP_FEAT_HCX; diff --git a/sys/arm64/vmm/vmm_hyp.c b/sys/arm64/vmm/vmm_hyp.c index 6bbf0d7eb730..345535318f6e 100644 --- a/sys/arm64/vmm/vmm_hyp.c +++ b/sys/arm64/vmm/vmm_hyp.c @@ -42,11 +42,11 @@ struct hypctx; uint64_t VMM_HYP_FUNC(do_call_guest)(struct hypctx *); static void -vmm_hyp_reg_store(struct hypctx *hypctx, struct hyp *hyp, bool guest) +vmm_hyp_reg_store(struct hypctx *hypctx, struct hyp *hyp, bool guest, + bool ecv_poff) { uint64_t dfr0; - /* Store the guest VFP registers */ if (guest) { /* Store the timer registers */ hypctx->vtimer_cpu.cntkctl_el1 = @@ -55,7 +55,20 @@ vmm_hyp_reg_store(struct hypctx *hypctx, struct hyp *hyp, bool guest) READ_SPECIALREG(EL0_REG(CNTV_CVAL)); hypctx->vtimer_cpu.virt_timer.cntx_ctl_el0 = READ_SPECIALREG(EL0_REG(CNTV_CTL)); + } + if (guest_or_nonvhe(guest) && ecv_poff) { + /* + * If we have ECV then the guest could modify these registers. + * If VHE is enabled then the kernel will see a different view + * of the registers, so doesn't need to handle them. + */ + hypctx->vtimer_cpu.phys_timer.cntx_cval_el0 = + READ_SPECIALREG(EL0_REG(CNTP_CVAL)); + hypctx->vtimer_cpu.phys_timer.cntx_ctl_el0 = + READ_SPECIALREG(EL0_REG(CNTP_CTL)); + } + if (guest) { /* Store the GICv3 registers */ hypctx->vgic_v3_regs.ich_eisr_el2 = READ_SPECIALREG(ich_eisr_el2); @@ -262,7 +275,8 @@ vmm_hyp_reg_store(struct hypctx *hypctx, struct hyp *hyp, bool guest) } static void -vmm_hyp_reg_restore(struct hypctx *hypctx, struct hyp *hyp, bool guest) +vmm_hyp_reg_restore(struct hypctx *hypctx, struct hyp *hyp, bool guest, + bool ecv_poff) { uint64_t dfr0; @@ -440,6 +454,29 @@ vmm_hyp_reg_restore(struct hypctx *hypctx, struct hyp *hyp, bool guest) WRITE_SPECIALREG(cnthctl_el2, hyp->vtimer.cnthctl_el2); WRITE_SPECIALREG(cntvoff_el2, hyp->vtimer.cntvoff_el2); + if (ecv_poff) { + /* + * Load the same offset as the virtual timer + * to keep in sync. + */ + WRITE_SPECIALREG(CNTPOFF_EL2_REG, + hyp->vtimer.cntvoff_el2); + isb(); + } + } + if (guest_or_nonvhe(guest) && ecv_poff) { + /* + * If we have ECV then the guest could modify these registers. + * If VHE is enabled then the kernel will see a different view + * of the registers, so doesn't need to handle them. + */ + WRITE_SPECIALREG(EL0_REG(CNTP_CVAL), + hypctx->vtimer_cpu.phys_timer.cntx_cval_el0); + WRITE_SPECIALREG(EL0_REG(CNTP_CTL), + hypctx->vtimer_cpu.phys_timer.cntx_ctl_el0); + } + + if (guest) { /* Load the GICv3 registers */ WRITE_SPECIALREG(ich_hcr_el2, hypctx->vgic_v3_regs.ich_hcr_el2); WRITE_SPECIALREG(ich_vmcr_el2, @@ -497,9 +534,10 @@ vmm_hyp_call_guest(struct hyp *hyp, struct hypctx *hypctx) #endif uint64_t ret; uint64_t s1e1r, hpfar_el2; - bool hpfar_valid; + bool ecv_poff, hpfar_valid; - vmm_hyp_reg_store(&host_hypctx, NULL, false); + ecv_poff = (hyp->vtimer.cnthctl_el2 & CNTHCTL_ECV_EN) != 0; + vmm_hyp_reg_store(&host_hypctx, NULL, false, ecv_poff); #ifndef VMM_VHE if ((hyp->feats & HYP_FEAT_HCX) != 0) hcrx_el2 = READ_SPECIALREG(MRS_REG_ALT_NAME(HCRX_EL2)); @@ -513,7 +551,7 @@ vmm_hyp_call_guest(struct hyp *hyp, struct hypctx *hypctx) ich_hcr_el2 = READ_SPECIALREG(ich_hcr_el2); ich_vmcr_el2 = READ_SPECIALREG(ich_vmcr_el2); - vmm_hyp_reg_restore(hypctx, hyp, true); + vmm_hyp_reg_restore(hypctx, hyp, true, ecv_poff); /* Load the common hypervisor registers */ WRITE_SPECIALREG(vttbr_el2, hyp->vttbr_el2); @@ -529,7 +567,7 @@ vmm_hyp_call_guest(struct hyp *hyp, struct hypctx *hypctx) /* Store the exit info */ hypctx->exit_info.far_el2 = READ_SPECIALREG(far_el2); - vmm_hyp_reg_store(hypctx, hyp, true); + vmm_hyp_reg_store(hypctx, hyp, true, ecv_poff); hpfar_valid = true; if (ret == EXCP_TYPE_EL1_SYNC) { @@ -579,7 +617,7 @@ vmm_hyp_call_guest(struct hyp *hyp, struct hypctx *hypctx) } } - vmm_hyp_reg_restore(&host_hypctx, NULL, false); + vmm_hyp_reg_restore(&host_hypctx, NULL, false, ecv_poff); #ifndef VMM_VHE if ((hyp->feats & HYP_FEAT_HCX) != 0)
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?202509231709.58NH9c9N077401>