diff options
author | Sean Christopherson <seanjc@google.com> | 2024-10-10 20:23:39 +0200 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2024-10-25 18:59:08 +0200 |
commit | 447c375c9104b5c2881a5806f26f967492cab867 (patch) | |
tree | dc830b5e6756d7f0d860f8be36c4c29d13a9ecf7 /arch/x86/kvm/mmu/mmu.c | |
parent | KVM: x86: Don't fault-in APIC access page during initial allocation (diff) | |
download | linux-447c375c9104b5c2881a5806f26f967492cab867.tar.xz linux-447c375c9104b5c2881a5806f26f967492cab867.zip |
KVM: x86/mmu: Add "mmu" prefix fault-in helpers to free up generic names
Prefix x86's faultin_pfn helpers with "mmu" so that the mmu-less names can
be used by common KVM for similar APIs.
No functional change intended.
Tested-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Tested-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-ID: <20241010182427.1434605-38-seanjc@google.com>
Diffstat (limited to 'arch/x86/kvm/mmu/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu/mmu.c | 19 |
1 files changed, 10 insertions, 9 deletions
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 688202ac50c2..7dcd34628f49 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4354,8 +4354,8 @@ static u8 kvm_max_private_mapping_level(struct kvm *kvm, kvm_pfn_t pfn, return max_level; } -static int kvm_faultin_pfn_private(struct kvm_vcpu *vcpu, - struct kvm_page_fault *fault) +static int kvm_mmu_faultin_pfn_private(struct kvm_vcpu *vcpu, + struct kvm_page_fault *fault) { int max_order, r; @@ -4378,10 +4378,11 @@ static int kvm_faultin_pfn_private(struct kvm_vcpu *vcpu, return RET_PF_CONTINUE; } -static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) +static int __kvm_mmu_faultin_pfn(struct kvm_vcpu *vcpu, + struct kvm_page_fault *fault) { if (fault->is_private) - return kvm_faultin_pfn_private(vcpu, fault); + return kvm_mmu_faultin_pfn_private(vcpu, fault); fault->pfn = __gfn_to_pfn_memslot(fault->slot, fault->gfn, false, true, fault->write, &fault->map_writable); @@ -4416,8 +4417,8 @@ static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault return RET_PF_CONTINUE; } -static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, - unsigned int access) +static int kvm_mmu_faultin_pfn(struct kvm_vcpu *vcpu, + struct kvm_page_fault *fault, unsigned int access) { struct kvm_memory_slot *slot = fault->slot; int ret; @@ -4500,7 +4501,7 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, if (mmu_invalidate_retry_gfn_unsafe(vcpu->kvm, fault->mmu_seq, fault->gfn)) return RET_PF_RETRY; - ret = __kvm_faultin_pfn(vcpu, fault); + ret = __kvm_mmu_faultin_pfn(vcpu, fault); if (ret != RET_PF_CONTINUE) return ret; @@ -4577,7 +4578,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault if (r) return r; - r = kvm_faultin_pfn(vcpu, fault, ACC_ALL); + r = kvm_mmu_faultin_pfn(vcpu, fault, ACC_ALL); if (r != RET_PF_CONTINUE) return r; @@ -4668,7 +4669,7 @@ static int kvm_tdp_mmu_page_fault(struct kvm_vcpu *vcpu, if (r) return r; - r = kvm_faultin_pfn(vcpu, fault, ACC_ALL); + r = kvm_mmu_faultin_pfn(vcpu, fault, ACC_ALL); if (r != RET_PF_CONTINUE) return r; |