diff options
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r-- | arch/powerpc/kvm/book3s.c | 1 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu_hv.c | 11 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv.c | 6 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_rm_mmu.c | 12 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_pr.c | 38 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_xics.c | 29 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_xics.h | 1 | ||||
-rw-r--r-- | arch/powerpc/kvm/booke.c | 1 | ||||
-rw-r--r-- | arch/powerpc/kvm/powerpc.c | 22 |
9 files changed, 85 insertions, 36 deletions
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index b34220d2a..47018fcbf 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -54,6 +54,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "queue_intr", VCPU_STAT(queue_intr) }, { "halt_successful_poll", VCPU_STAT(halt_successful_poll), }, { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), }, + { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) }, { "halt_wakeup", VCPU_STAT(halt_wakeup) }, { "pf_storage", VCPU_STAT(pf_storage) }, { "sp_storage", VCPU_STAT(sp_storage) }, diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index c7b78d833..05f09ae82 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -447,7 +447,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, struct revmap_entry *rev; struct page *page, *pages[1]; long index, ret, npages; - unsigned long is_io; + bool is_ci; unsigned int writing, write_ok; struct vm_area_struct *vma; unsigned long rcbits; @@ -503,7 +503,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, smp_rmb(); ret = -EFAULT; - is_io = 0; + is_ci = false; pfn = 0; page = NULL; pte_size = PAGE_SIZE; @@ -521,7 +521,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, pfn = vma->vm_pgoff + ((hva - vma->vm_start) >> PAGE_SHIFT); pte_size = psize; - is_io = hpte_cache_bits(pgprot_val(vma->vm_page_prot)); + is_ci = pte_ci(__pte((pgprot_val(vma->vm_page_prot)))); write_ok = vma->vm_flags & VM_WRITE; } up_read(¤t->mm->mmap_sem); @@ -558,10 +558,9 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, goto out_put; /* Check WIMG vs. the actual page we're accessing */ - if (!hpte_cache_flags_ok(r, is_io)) { - if (is_io) + if (!hpte_cache_flags_ok(r, is_ci)) { + if (is_ci) goto out_put; - /* * Allow guest to map emulated device memory as * uncacheable, but actually make it cacheable. diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 93243554c..e20beae5c 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -3272,6 +3272,12 @@ static int kvmppc_core_check_processor_compat_hv(void) if (!cpu_has_feature(CPU_FTR_HVMODE) || !cpu_has_feature(CPU_FTR_ARCH_206)) return -EIO; + /* + * Disable KVM for Power9, untill the required bits merged. + */ + if (cpu_has_feature(CPU_FTR_ARCH_300)) + return -EIO; + return 0; } diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index 4cb8db05f..99b4e9d5d 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -175,7 +175,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, unsigned long g_ptel; struct kvm_memory_slot *memslot; unsigned hpage_shift; - unsigned long is_io; + bool is_ci; unsigned long *rmap; pte_t *ptep; unsigned int writing; @@ -199,7 +199,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, gfn = gpa >> PAGE_SHIFT; memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); pa = 0; - is_io = ~0ul; + is_ci = false; rmap = NULL; if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID))) { /* Emulated MMIO - mark this with key=31 */ @@ -250,7 +250,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, if (writing && !pte_write(pte)) /* make the actual HPTE be read-only */ ptel = hpte_make_readonly(ptel); - is_io = hpte_cache_bits(pte_val(pte)); + is_ci = pte_ci(pte); pa = pte_pfn(pte) << PAGE_SHIFT; pa |= hva & (host_pte_size - 1); pa |= gpa & ~PAGE_MASK; @@ -267,9 +267,9 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, else pteh |= HPTE_V_ABSENT; - /* Check WIMG */ - if (is_io != ~0ul && !hpte_cache_flags_ok(ptel, is_io)) { - if (is_io) + /*If we had host pte mapping then Check WIMG */ + if (ptep && !hpte_cache_flags_ok(ptel, is_ci)) { + if (is_ci) return H_PARAMETER; /* * Allow guest to map emulated device memory as diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 95bceca8f..8e4f64f0b 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -882,6 +882,24 @@ void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr) } #endif +static void kvmppc_setup_debug(struct kvm_vcpu *vcpu) +{ + if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { + u64 msr = kvmppc_get_msr(vcpu); + + kvmppc_set_msr(vcpu, msr | MSR_SE); + } +} + +static void kvmppc_clear_debug(struct kvm_vcpu *vcpu) +{ + if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { + u64 msr = kvmppc_get_msr(vcpu); + + kvmppc_set_msr(vcpu, msr & ~MSR_SE); + } +} + int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, unsigned int exit_nr) { @@ -1207,10 +1225,18 @@ program_interrupt: break; #endif case BOOK3S_INTERRUPT_MACHINE_CHECK: - case BOOK3S_INTERRUPT_TRACE: kvmppc_book3s_queue_irqprio(vcpu, exit_nr); r = RESUME_GUEST; break; + case BOOK3S_INTERRUPT_TRACE: + if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { + run->exit_reason = KVM_EXIT_DEBUG; + r = RESUME_HOST; + } else { + kvmppc_book3s_queue_irqprio(vcpu, exit_nr); + r = RESUME_GUEST; + } + break; default: { ulong shadow_srr1 = vcpu->arch.shadow_srr1; @@ -1479,6 +1505,8 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) goto out; } + kvmppc_setup_debug(vcpu); + /* * Interrupts could be timers for the guest which we have to inject * again, so let's postpone them until we're in the guest and if we @@ -1501,6 +1529,8 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) ret = __kvmppc_vcpu_run(kvm_run, vcpu); + kvmppc_clear_debug(vcpu); + /* No need for kvm_guest_exit. It's done in handle_exit. We also get here with interrupts enabled. */ @@ -1683,7 +1713,11 @@ static void kvmppc_core_destroy_vm_pr(struct kvm *kvm) static int kvmppc_core_check_processor_compat_pr(void) { - /* we are always compatible */ + /* + * Disable KVM for Power9 untill the required bits merged. + */ + if (cpu_has_feature(CPU_FTR_ARCH_300)) + return -EIO; return 0; } diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c index 46871d554..a75ba38a2 100644 --- a/arch/powerpc/kvm/book3s_xics.c +++ b/arch/powerpc/kvm/book3s_xics.c @@ -92,7 +92,7 @@ static int ics_deliver_irq(struct kvmppc_xics *xics, u32 irq, u32 level) * we are the only setter, thus concurrent access is undefined * to begin with. */ - if (level == 1 || level == KVM_INTERRUPT_SET_LEVEL) + if ((level == 1 && state->lsi) || level == KVM_INTERRUPT_SET_LEVEL) state->asserted = 1; else if (level == 0 || level == KVM_INTERRUPT_UNSET) { state->asserted = 0; @@ -280,7 +280,7 @@ static inline bool icp_try_update(struct kvmppc_icp *icp, if (!success) goto bail; - XICS_DBG("UPD [%04x] - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n", + XICS_DBG("UPD [%04lx] - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n", icp->server_num, old.cppr, old.mfrr, old.pending_pri, old.xisr, old.need_resend, old.out_ee); @@ -336,7 +336,7 @@ static bool icp_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority, union kvmppc_icp_state old_state, new_state; bool success; - XICS_DBG("try deliver %#x(P:%#x) to server %#x\n", irq, priority, + XICS_DBG("try deliver %#x(P:%#x) to server %#lx\n", irq, priority, icp->server_num); do { @@ -1174,9 +1174,11 @@ static int xics_get_source(struct kvmppc_xics *xics, long irq, u64 addr) prio = irqp->saved_priority; } val |= prio << KVM_XICS_PRIORITY_SHIFT; - if (irqp->asserted) - val |= KVM_XICS_LEVEL_SENSITIVE | KVM_XICS_PENDING; - else if (irqp->masked_pending || irqp->resend) + if (irqp->lsi) { + val |= KVM_XICS_LEVEL_SENSITIVE; + if (irqp->asserted) + val |= KVM_XICS_PENDING; + } else if (irqp->masked_pending || irqp->resend) val |= KVM_XICS_PENDING; ret = 0; } @@ -1228,9 +1230,13 @@ static int xics_set_source(struct kvmppc_xics *xics, long irq, u64 addr) irqp->priority = prio; irqp->resend = 0; irqp->masked_pending = 0; + irqp->lsi = 0; irqp->asserted = 0; - if ((val & KVM_XICS_PENDING) && (val & KVM_XICS_LEVEL_SENSITIVE)) - irqp->asserted = 1; + if (val & KVM_XICS_LEVEL_SENSITIVE) { + irqp->lsi = 1; + if (val & KVM_XICS_PENDING) + irqp->asserted = 1; + } irqp->exists = 1; arch_spin_unlock(&ics->lock); local_irq_restore(flags); @@ -1249,11 +1255,10 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, return ics_deliver_irq(xics, irq, level); } -int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm, - int irq_source_id, int level, bool line_status) +int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *irq_entry, + struct kvm *kvm, int irq_source_id, + int level, bool line_status) { - if (!level) - return -1; return kvm_set_irq(kvm, irq_source_id, irq_entry->gsi, level, line_status); } diff --git a/arch/powerpc/kvm/book3s_xics.h b/arch/powerpc/kvm/book3s_xics.h index 56ea44f98..a46b95405 100644 --- a/arch/powerpc/kvm/book3s_xics.h +++ b/arch/powerpc/kvm/book3s_xics.h @@ -39,6 +39,7 @@ struct ics_irq_state { u8 saved_priority; u8 resend; u8 masked_pending; + u8 lsi; /* level-sensitive interrupt */ u8 asserted; /* Only for LSI */ u8 exists; }; diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 4d66f44a1..4afae6958 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -64,6 +64,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "ext_intr", VCPU_STAT(ext_intr_exits) }, { "halt_successful_poll", VCPU_STAT(halt_successful_poll) }, { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) }, + { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) }, { "halt_wakeup", VCPU_STAT(halt_wakeup) }, { "doorbell", VCPU_STAT(dbell_exits) }, { "guest doorbell", VCPU_STAT(gdbell_exits) }, diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 6a6873077..02416fea7 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -800,9 +800,9 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, } } -int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, - unsigned int rt, unsigned int bytes, - int is_default_endian) +static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, + unsigned int rt, unsigned int bytes, + int is_default_endian, int sign_extend) { int idx, ret; bool host_swabbed; @@ -827,7 +827,7 @@ int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, vcpu->arch.mmio_host_swabbed = host_swabbed; vcpu->mmio_needed = 1; vcpu->mmio_is_write = 0; - vcpu->arch.mmio_sign_extend = 0; + vcpu->arch.mmio_sign_extend = sign_extend; idx = srcu_read_lock(&vcpu->kvm->srcu); @@ -844,6 +844,13 @@ int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, return EMULATE_DO_MMIO; } + +int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, + unsigned int rt, unsigned int bytes, + int is_default_endian) +{ + return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 0); +} EXPORT_SYMBOL_GPL(kvmppc_handle_load); /* Same as above, but sign extends */ @@ -851,12 +858,7 @@ int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, unsigned int rt, unsigned int bytes, int is_default_endian) { - int r; - - vcpu->arch.mmio_sign_extend = 1; - r = kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian); - - return r; + return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1); } int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, |