summaryrefslogtreecommitdiffstats
path: root/arch/loongarch
diff options
context:
space:
mode:
authorSean Christopherson <seanjc@google.com>2024-10-10 20:24:07 +0200
committerPaolo Bonzini <pbonzini@redhat.com>2024-10-25 19:00:49 +0200
commit0fe133a33e4ce333e6f8795e3ec50d94a17c9c16 (patch)
tree09b450065c8d4aaaebdad695a7a009576a7315b6 /arch/loongarch
parentKVM: PPC: Use kvm_faultin_pfn() to handle page faults on Book3s PR (diff)
downloadlinux-0fe133a33e4ce333e6f8795e3ec50d94a17c9c16.tar.xz
linux-0fe133a33e4ce333e6f8795e3ec50d94a17c9c16.zip
KVM: LoongArch: Mark "struct page" pfns dirty only in "slow" page fault path
Mark pages/folios dirty only the slow page fault path, i.e. only when mmu_lock is held and the operation is mmu_notifier-protected, as marking a page/folio dirty after it has been written back can make some filesystems unhappy (backing KVM guests will such filesystem files is uncommon, and the race is minuscule, hence the lack of complaints). See the link below for details. Link: https://lore.kernel.org/all/cover.1683044162.git.lstoakes@gmail.com Reviewed-by: Bibo Mao <maobibo@loongson.cn> Signed-off-by: Sean Christopherson <seanjc@google.com> Tested-by: Dmitry Osipenko <dmitry.osipenko@collabora.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Message-ID: <20241010182427.1434605-66-seanjc@google.com>
Diffstat (limited to 'arch/loongarch')
-rw-r--r--arch/loongarch/kvm/mmu.c16
1 files changed, 9 insertions, 7 deletions
diff --git a/arch/loongarch/kvm/mmu.c b/arch/loongarch/kvm/mmu.c
index 28681dfb4b85..cc2a5f289b14 100644
--- a/arch/loongarch/kvm/mmu.c
+++ b/arch/loongarch/kvm/mmu.c
@@ -608,13 +608,13 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ
if (kvm_pte_young(changed))
kvm_set_pfn_accessed(pfn);
- if (kvm_pte_dirty(changed)) {
- mark_page_dirty(kvm, gfn);
- kvm_set_pfn_dirty(pfn);
- }
if (page)
put_page(page);
}
+
+ if (kvm_pte_dirty(changed))
+ mark_page_dirty(kvm, gfn);
+
return ret;
out:
spin_unlock(&kvm->mmu_lock);
@@ -915,12 +915,14 @@ retry:
else
++kvm->stat.pages;
kvm_set_pte(ptep, new_pte);
+
+ if (writeable)
+ kvm_set_pfn_dirty(pfn);
+
spin_unlock(&kvm->mmu_lock);
- if (prot_bits & _PAGE_DIRTY) {
+ if (prot_bits & _PAGE_DIRTY)
mark_page_dirty_in_slot(kvm, memslot, gfn);
- kvm_set_pfn_dirty(pfn);
- }
kvm_release_pfn_clean(pfn);
out: