diff options
author | Oliver Upton <oliver.upton@linux.dev> | 2024-12-17 18:55:32 +0100 |
---|---|---|
committer | Oliver Upton <oliver.upton@linux.dev> | 2024-12-18 22:22:25 +0100 |
commit | adf8623b3f51e9c7eb18a7bb0381093f31053e38 (patch) | |
tree | 1cbdf8523f14701dfbae64e6e09410df7f43c6db /arch | |
parent | KVM: arm64: Add unified helper for reprogramming counters by mask (diff) | |
download | linux-adf8623b3f51e9c7eb18a7bb0381093f31053e38.tar.xz linux-adf8623b3f51e9c7eb18a7bb0381093f31053e38.zip |
KVM: arm64: Use KVM_REQ_RELOAD_PMU to handle PMCR_EL0.E change
Nested virt introduces yet another set of 'global' knobs for controlling
event counters that are reserved for EL2 (i.e. >= HPMN). Get ready to
share some plumbing with the NV controls by offloading counter
reprogramming to KVM_REQ_RELOAD_PMU.
Reviewed-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20241217175532.3658134-1-oliver.upton@linux.dev
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm64/kvm/pmu-emul.c | 17 |
1 files changed, 6 insertions, 11 deletions
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index 6b3ec956a6e2..c6423782a8aa 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -606,17 +606,13 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) if (!kvm_has_feat(vcpu->kvm, ID_AA64DFR0_EL1, PMUVer, V3P5)) val &= ~ARMV8_PMU_PMCR_LP; + /* Request a reload of the PMU to enable/disable affected counters */ + if ((__vcpu_sys_reg(vcpu, PMCR_EL0) ^ val) & ARMV8_PMU_PMCR_E) + kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu); + /* The reset bits don't indicate any state, and shouldn't be saved. */ __vcpu_sys_reg(vcpu, PMCR_EL0) = val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P); - if (val & ARMV8_PMU_PMCR_E) { - kvm_pmu_reprogram_counter_mask(vcpu, - __vcpu_sys_reg(vcpu, PMCNTENSET_EL0)); - } else { - kvm_pmu_reprogram_counter_mask(vcpu, - __vcpu_sys_reg(vcpu, PMCNTENSET_EL0)); - } - if (val & ARMV8_PMU_PMCR_C) kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0); @@ -626,7 +622,6 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) for_each_set_bit(i, &mask, 32) kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, i), 0, true); } - kvm_vcpu_pmu_restore_guest(vcpu); } static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc) @@ -890,11 +885,11 @@ void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu) { u64 mask = kvm_pmu_implemented_counter_mask(vcpu); - kvm_pmu_handle_pmcr(vcpu, kvm_vcpu_read_pmcr(vcpu)); - __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= mask; __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= mask; __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= mask; + + kvm_pmu_reprogram_counter_mask(vcpu, mask); } int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu) |