summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2025-01-12 21:04:53 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2025-01-12 21:04:53 +0100
commitbe548645527a131a097fdc884b7fca40c8b86231 (patch)
tree7ce4d60c8a57b53a3a128375cfd086a67ff2c5ee /arch
parentMerge tag 'perf_urgent_for_v6.13_rc7' of git://git.kernel.org/pub/scm/linux/k... (diff)
parentMerge tag 'kvm-s390-master-6.13-1' of https://git.kernel.org/pub/scm/linux/ke... (diff)
downloadlinux-be548645527a131a097fdc884b7fca40c8b86231.tar.xz
linux-be548645527a131a097fdc884b7fca40c8b86231.zip
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm fixes from Paolo Bonzini: "The largest part here is for KVM/PPC, where a NULL pointer dereference was introduced in the 6.13 merge window and is now fixed. There's some "holiday-induced lateness", as the s390 submaintainer put it, but otherwise things looks fine. s390: - fix a latent bug when the kernel is compiled in debug mode - two small UCONTROL fixes and their selftests arm64: - always check page state in hyp_ack_unshare() - align set_id_regs selftest with the fact that ASIDBITS field is RO - various vPMU fixes for bugs that only affect nested virt PPC e500: - Fix a mostly impossible (but just wrong) case where IRQs were never re-enabled - Observe host permissions instead of mapping readonly host pages as guest-writable. This fixes a NULL-pointer dereference in 6.13 - Replace brittle VMA-based attempts at building huge shadow TLB entries with PTE lookups" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: e500: perform hugepage check after looking up the PFN KVM: e500: map readonly host pages for read KVM: e500: track host-writability of pages KVM: e500: use shadow TLB entry as witness for writability KVM: e500: always restore irqs KVM: s390: selftests: Add has device attr check to uc_attr_mem_limit selftest KVM: s390: selftests: Add ucontrol gis routing test KVM: s390: Reject KVM_SET_GSI_ROUTING on ucontrol VMs KVM: s390: selftests: Add ucontrol flic attr selftests KVM: s390: Reject setting flic pfault attributes on ucontrol VMs KVM: s390: vsie: fix virtual/physical address in unpin_scb() KVM: arm64: Only apply PMCR_EL0.P to the guest range of counters KVM: arm64: nv: Reload PMU events upon MDCR_EL2.HPME change KVM: arm64: Use KVM_REQ_RELOAD_PMU to handle PMCR_EL0.E change KVM: arm64: Add unified helper for reprogramming counters by mask KVM: arm64: Always check the state from hyp_ack_unshare() KVM: arm64: Fix set_id_regs selftest for ASIDBITS becoming unwritable
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/kvm/hyp/nvhe/mem_protect.c3
-rw-r--r--arch/arm64/kvm/pmu-emul.c89
-rw-r--r--arch/arm64/kvm/sys_regs.c32
-rw-r--r--arch/powerpc/kvm/e500.h2
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c199
-rw-r--r--arch/s390/kvm/interrupt.c6
-rw-r--r--arch/s390/kvm/vsie.c2
7 files changed, 152 insertions, 181 deletions
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index caba3e4bd09e..e75374d682f4 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -783,9 +783,6 @@ static int hyp_ack_unshare(u64 addr, const struct pkvm_mem_transition *tx)
if (tx->initiator.id == PKVM_ID_HOST && hyp_page_count((void *)addr))
return -EBUSY;
- if (__hyp_ack_skip_pgtable_check(tx))
- return 0;
-
return __hyp_check_page_state_range(addr, size,
PKVM_PAGE_SHARED_BORROWED);
}
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index 456102bc0b55..6c5950b9ceac 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -24,6 +24,7 @@ static DEFINE_MUTEX(arm_pmus_lock);
static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc);
static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc);
+static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc);
static struct kvm_vcpu *kvm_pmc_to_vcpu(const struct kvm_pmc *pmc)
{
@@ -327,48 +328,25 @@ u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu)
return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
}
-/**
- * kvm_pmu_enable_counter_mask - enable selected PMU counters
- * @vcpu: The vcpu pointer
- * @val: the value guest writes to PMCNTENSET register
- *
- * Call perf_event_enable to start counting the perf event
- */
-void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
+static void kvm_pmc_enable_perf_event(struct kvm_pmc *pmc)
{
- int i;
- if (!kvm_vcpu_has_pmu(vcpu))
- return;
-
- if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E) || !val)
+ if (!pmc->perf_event) {
+ kvm_pmu_create_perf_event(pmc);
return;
+ }
- for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++) {
- struct kvm_pmc *pmc;
-
- if (!(val & BIT(i)))
- continue;
-
- pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
+ perf_event_enable(pmc->perf_event);
+ if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
+ kvm_debug("fail to enable perf event\n");
+}
- if (!pmc->perf_event) {
- kvm_pmu_create_perf_event(pmc);
- } else {
- perf_event_enable(pmc->perf_event);
- if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
- kvm_debug("fail to enable perf event\n");
- }
- }
+static void kvm_pmc_disable_perf_event(struct kvm_pmc *pmc)
+{
+ if (pmc->perf_event)
+ perf_event_disable(pmc->perf_event);
}
-/**
- * kvm_pmu_disable_counter_mask - disable selected PMU counters
- * @vcpu: The vcpu pointer
- * @val: the value guest writes to PMCNTENCLR register
- *
- * Call perf_event_disable to stop counting the perf event
- */
-void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
+void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val)
{
int i;
@@ -376,16 +354,18 @@ void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
return;
for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++) {
- struct kvm_pmc *pmc;
+ struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
if (!(val & BIT(i)))
continue;
- pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
-
- if (pmc->perf_event)
- perf_event_disable(pmc->perf_event);
+ if (kvm_pmu_counter_is_enabled(pmc))
+ kvm_pmc_enable_perf_event(pmc);
+ else
+ kvm_pmc_disable_perf_event(pmc);
}
+
+ kvm_vcpu_pmu_restore_guest(vcpu);
}
/*
@@ -626,27 +606,28 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
if (!kvm_has_feat(vcpu->kvm, ID_AA64DFR0_EL1, PMUVer, V3P5))
val &= ~ARMV8_PMU_PMCR_LP;
+ /* Request a reload of the PMU to enable/disable affected counters */
+ if ((__vcpu_sys_reg(vcpu, PMCR_EL0) ^ val) & ARMV8_PMU_PMCR_E)
+ kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
+
/* The reset bits don't indicate any state, and shouldn't be saved. */
__vcpu_sys_reg(vcpu, PMCR_EL0) = val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P);
- if (val & ARMV8_PMU_PMCR_E) {
- kvm_pmu_enable_counter_mask(vcpu,
- __vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
- } else {
- kvm_pmu_disable_counter_mask(vcpu,
- __vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
- }
-
if (val & ARMV8_PMU_PMCR_C)
kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
if (val & ARMV8_PMU_PMCR_P) {
- unsigned long mask = kvm_pmu_accessible_counter_mask(vcpu);
- mask &= ~BIT(ARMV8_PMU_CYCLE_IDX);
+ /*
+ * Unlike other PMU sysregs, the controls in PMCR_EL0 always apply
+ * to the 'guest' range of counters and never the 'hyp' range.
+ */
+ unsigned long mask = kvm_pmu_implemented_counter_mask(vcpu) &
+ ~kvm_pmu_hyp_counter_mask(vcpu) &
+ ~BIT(ARMV8_PMU_CYCLE_IDX);
+
for_each_set_bit(i, &mask, 32)
kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, i), 0, true);
}
- kvm_vcpu_pmu_restore_guest(vcpu);
}
static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc)
@@ -910,11 +891,11 @@ void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu)
{
u64 mask = kvm_pmu_implemented_counter_mask(vcpu);
- kvm_pmu_handle_pmcr(vcpu, kvm_vcpu_read_pmcr(vcpu));
-
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= mask;
__vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= mask;
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= mask;
+
+ kvm_pmu_reprogram_counter_mask(vcpu, mask);
}
int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index e2a5c2918d9e..634ff18a59a1 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1208,16 +1208,14 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
mask = kvm_pmu_accessible_counter_mask(vcpu);
if (p->is_write) {
val = p->regval & mask;
- if (r->Op2 & 0x1) {
+ if (r->Op2 & 0x1)
/* accessing PMCNTENSET_EL0 */
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
- kvm_pmu_enable_counter_mask(vcpu, val);
- kvm_vcpu_pmu_restore_guest(vcpu);
- } else {
+ else
/* accessing PMCNTENCLR_EL0 */
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
- kvm_pmu_disable_counter_mask(vcpu, val);
- }
+
+ kvm_pmu_reprogram_counter_mask(vcpu, val);
} else {
p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
}
@@ -2450,6 +2448,26 @@ static unsigned int s1pie_el2_visibility(const struct kvm_vcpu *vcpu,
return __el2_visibility(vcpu, rd, s1pie_visibility);
}
+static bool access_mdcr(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ u64 old = __vcpu_sys_reg(vcpu, MDCR_EL2);
+
+ if (!access_rw(vcpu, p, r))
+ return false;
+
+ /*
+ * Request a reload of the PMU to enable/disable the counters affected
+ * by HPME.
+ */
+ if ((old ^ __vcpu_sys_reg(vcpu, MDCR_EL2)) & MDCR_EL2_HPME)
+ kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
+
+ return true;
+}
+
+
/*
* Architected system registers.
* Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
@@ -2983,7 +3001,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
EL2_REG(SCTLR_EL2, access_rw, reset_val, SCTLR_EL2_RES1),
EL2_REG(ACTLR_EL2, access_rw, reset_val, 0),
EL2_REG_VNCR(HCR_EL2, reset_hcr, 0),
- EL2_REG(MDCR_EL2, access_rw, reset_val, 0),
+ EL2_REG(MDCR_EL2, access_mdcr, reset_val, 0),
EL2_REG(CPTR_EL2, access_rw, reset_val, CPTR_NVHE_EL2_RES1),
EL2_REG_VNCR(HSTR_EL2, reset_val, 0),
EL2_REG_VNCR(HFGRTR_EL2, reset_val, 0),
diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h
index 6d0d329cbb35..f9acf866c709 100644
--- a/arch/powerpc/kvm/e500.h
+++ b/arch/powerpc/kvm/e500.h
@@ -34,6 +34,8 @@ enum vcpu_ftr {
#define E500_TLB_BITMAP (1 << 30)
/* TLB1 entry is mapped by host TLB0 */
#define E500_TLB_TLB0 (1 << 29)
+/* entry is writable on the host */
+#define E500_TLB_WRITABLE (1 << 28)
/* bits [6-5] MAS2_X1 and MAS2_X0 and [4-0] bits for WIMGE */
#define E500_TLB_MAS2_ATTR (0x7f)
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index e5a145b578a4..06caf8bbbe2b 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -45,11 +45,14 @@ static inline unsigned int tlb1_max_shadow_size(void)
return host_tlb_params[1].entries - tlbcam_index - 1;
}
-static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
+static inline u32 e500_shadow_mas3_attrib(u32 mas3, bool writable, int usermode)
{
/* Mask off reserved bits. */
mas3 &= MAS3_ATTRIB_MASK;
+ if (!writable)
+ mas3 &= ~(MAS3_UW|MAS3_SW);
+
#ifndef CONFIG_KVM_BOOKE_HV
if (!usermode) {
/* Guest is in supervisor mode,
@@ -242,17 +245,18 @@ static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe)
return tlbe->mas7_3 & (MAS3_SW|MAS3_UW);
}
-static inline bool kvmppc_e500_ref_setup(struct tlbe_ref *ref,
+static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
struct kvm_book3e_206_tlb_entry *gtlbe,
- kvm_pfn_t pfn, unsigned int wimg)
+ kvm_pfn_t pfn, unsigned int wimg,
+ bool writable)
{
ref->pfn = pfn;
ref->flags = E500_TLB_VALID;
+ if (writable)
+ ref->flags |= E500_TLB_WRITABLE;
/* Use guest supplied MAS2_G and MAS2_E */
ref->flags |= (gtlbe->mas2 & MAS2_ATTRIB_MASK) | wimg;
-
- return tlbe_is_writable(gtlbe);
}
static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
@@ -305,6 +309,7 @@ static void kvmppc_e500_setup_stlbe(
{
kvm_pfn_t pfn = ref->pfn;
u32 pr = vcpu->arch.shared->msr & MSR_PR;
+ bool writable = !!(ref->flags & E500_TLB_WRITABLE);
BUG_ON(!(ref->flags & E500_TLB_VALID));
@@ -312,7 +317,7 @@ static void kvmppc_e500_setup_stlbe(
stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID;
stlbe->mas2 = (gvaddr & MAS2_EPN) | (ref->flags & E500_TLB_MAS2_ATTR);
stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) |
- e500_shadow_mas3_attrib(gtlbe->mas7_3, pr);
+ e500_shadow_mas3_attrib(gtlbe->mas7_3, writable, pr);
}
static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
@@ -321,15 +326,14 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
struct tlbe_ref *ref)
{
struct kvm_memory_slot *slot;
- unsigned long pfn = 0; /* silence GCC warning */
+ unsigned int psize;
+ unsigned long pfn;
struct page *page = NULL;
unsigned long hva;
- int pfnmap = 0;
int tsize = BOOK3E_PAGESZ_4K;
int ret = 0;
unsigned long mmu_seq;
struct kvm *kvm = vcpu_e500->vcpu.kvm;
- unsigned long tsize_pages = 0;
pte_t *ptep;
unsigned int wimg = 0;
pgd_t *pgdir;
@@ -351,110 +355,12 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn);
hva = gfn_to_hva_memslot(slot, gfn);
- if (tlbsel == 1) {
- struct vm_area_struct *vma;
- mmap_read_lock(kvm->mm);
-
- vma = find_vma(kvm->mm, hva);
- if (vma && hva >= vma->vm_start &&
- (vma->vm_flags & VM_PFNMAP)) {
- /*
- * This VMA is a physically contiguous region (e.g.
- * /dev/mem) that bypasses normal Linux page
- * management. Find the overlap between the
- * vma and the memslot.
- */
-
- unsigned long start, end;
- unsigned long slot_start, slot_end;
-
- pfnmap = 1;
-
- start = vma->vm_pgoff;
- end = start +
- vma_pages(vma);
-
- pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT);
-
- slot_start = pfn - (gfn - slot->base_gfn);
- slot_end = slot_start + slot->npages;
-
- if (start < slot_start)
- start = slot_start;
- if (end > slot_end)
- end = slot_end;
-
- tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
- MAS1_TSIZE_SHIFT;
-
- /*
- * e500 doesn't implement the lowest tsize bit,
- * or 1K pages.
- */
- tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
-
- /*
- * Now find the largest tsize (up to what the guest
- * requested) that will cover gfn, stay within the
- * range, and for which gfn and pfn are mutually
- * aligned.
- */
-
- for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) {
- unsigned long gfn_start, gfn_end;
- tsize_pages = 1UL << (tsize - 2);
-
- gfn_start = gfn & ~(tsize_pages - 1);
- gfn_end = gfn_start + tsize_pages;
-
- if (gfn_start + pfn - gfn < start)
- continue;
- if (gfn_end + pfn - gfn > end)
- continue;
- if ((gfn & (tsize_pages - 1)) !=
- (pfn & (tsize_pages - 1)))
- continue;
-
- gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
- pfn &= ~(tsize_pages - 1);
- break;
- }
- } else if (vma && hva >= vma->vm_start &&
- is_vm_hugetlb_page(vma)) {
- unsigned long psize = vma_kernel_pagesize(vma);
-
- tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
- MAS1_TSIZE_SHIFT;
-
- /*
- * Take the largest page size that satisfies both host
- * and guest mapping
- */
- tsize = min(__ilog2(psize) - 10, tsize);
-
- /*
- * e500 doesn't implement the lowest tsize bit,
- * or 1K pages.
- */
- tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
- }
-
- mmap_read_unlock(kvm->mm);
- }
-
- if (likely(!pfnmap)) {
- tsize_pages = 1UL << (tsize + 10 - PAGE_SHIFT);
- pfn = __kvm_faultin_pfn(slot, gfn, FOLL_WRITE, NULL, &page);
- if (is_error_noslot_pfn(pfn)) {
- if (printk_ratelimit())
- pr_err("%s: real page not found for gfn %lx\n",
- __func__, (long)gfn);
- return -EINVAL;
- }
-
- /* Align guest and physical address to page map boundaries */
- pfn &= ~(tsize_pages - 1);
- gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
+ pfn = __kvm_faultin_pfn(slot, gfn, FOLL_WRITE, &writable, &page);
+ if (is_error_noslot_pfn(pfn)) {
+ if (printk_ratelimit())
+ pr_err("%s: real page not found for gfn %lx\n",
+ __func__, (long)gfn);
+ return -EINVAL;
}
spin_lock(&kvm->mmu_lock);
@@ -472,14 +378,13 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
* can't run hence pfn won't change.
*/
local_irq_save(flags);
- ptep = find_linux_pte(pgdir, hva, NULL, NULL);
+ ptep = find_linux_pte(pgdir, hva, NULL, &psize);
if (ptep) {
pte_t pte = READ_ONCE(*ptep);
if (pte_present(pte)) {
wimg = (pte_val(pte) >> PTE_WIMGE_SHIFT) &
MAS2_WIMGE_MASK;
- local_irq_restore(flags);
} else {
local_irq_restore(flags);
pr_err_ratelimited("%s: pte not present: gfn %lx,pfn %lx\n",
@@ -488,10 +393,72 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
goto out;
}
}
- writable = kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
+ local_irq_restore(flags);
+
+ if (psize && tlbsel == 1) {
+ unsigned long psize_pages, tsize_pages;
+ unsigned long start, end;
+ unsigned long slot_start, slot_end;
+
+ psize_pages = 1UL << (psize - PAGE_SHIFT);
+ start = pfn & ~(psize_pages - 1);
+ end = start + psize_pages;
+
+ slot_start = pfn - (gfn - slot->base_gfn);
+ slot_end = slot_start + slot->npages;
+
+ if (start < slot_start)
+ start = slot_start;
+ if (end > slot_end)
+ end = slot_end;
+
+ tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
+ MAS1_TSIZE_SHIFT;
+
+ /*
+ * Any page size that doesn't satisfy the host mapping
+ * will fail the start and end tests.
+ */
+ tsize = min(psize - PAGE_SHIFT + BOOK3E_PAGESZ_4K, tsize);
+
+ /*
+ * e500 doesn't implement the lowest tsize bit,
+ * or 1K pages.
+ */
+ tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
+
+ /*
+ * Now find the largest tsize (up to what the guest
+ * requested) that will cover gfn, stay within the
+ * range, and for which gfn and pfn are mutually
+ * aligned.
+ */
+
+ for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) {
+ unsigned long gfn_start, gfn_end;
+ tsize_pages = 1UL << (tsize - 2);
+
+ gfn_start = gfn & ~(tsize_pages - 1);
+ gfn_end = gfn_start + tsize_pages;
+
+ if (gfn_start + pfn - gfn < start)
+ continue;
+ if (gfn_end + pfn - gfn > end)
+ continue;
+ if ((gfn & (tsize_pages - 1)) !=
+ (pfn & (tsize_pages - 1)))
+ continue;
+
+ gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
+ pfn &= ~(tsize_pages - 1);
+ break;
+ }
+ }
+ kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg, writable);
kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
ref, gvaddr, stlbe);
+ writable = tlbe_is_writable(stlbe);
/* Clear i-cache for new pages */
kvmppc_mmu_flush_icache(pfn);
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index ea8dce299954..d4f031e086fc 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -2678,9 +2678,13 @@ static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
kvm_s390_clear_float_irqs(dev->kvm);
break;
case KVM_DEV_FLIC_APF_ENABLE:
+ if (kvm_is_ucontrol(dev->kvm))
+ return -EINVAL;
dev->kvm->arch.gmap->pfault_enabled = 1;
break;
case KVM_DEV_FLIC_APF_DISABLE_WAIT:
+ if (kvm_is_ucontrol(dev->kvm))
+ return -EINVAL;
dev->kvm->arch.gmap->pfault_enabled = 0;
/*
* Make sure no async faults are in transition when
@@ -2894,6 +2898,8 @@ int kvm_set_routing_entry(struct kvm *kvm,
switch (ue->type) {
/* we store the userspace addresses instead of the guest addresses */
case KVM_IRQ_ROUTING_S390_ADAPTER:
+ if (kvm_is_ucontrol(kvm))
+ return -EINVAL;
e->set = set_adapter_int;
uaddr = gmap_translate(kvm->arch.gmap, ue->u.adapter.summary_addr);
if (uaddr == -EFAULT)
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index 150b9387860a..a687695d8f68 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -854,7 +854,7 @@ unpin:
static void unpin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page,
gpa_t gpa)
{
- hpa_t hpa = (hpa_t) vsie_page->scb_o;
+ hpa_t hpa = virt_to_phys(vsie_page->scb_o);
if (hpa)
unpin_guest_page(vcpu->kvm, gpa, hpa);