diff options
Diffstat (limited to 'arch/mips/kvm/mips.c')
-rw-r--r-- | arch/mips/kvm/mips.c | 367 |
1 files changed, 256 insertions, 111 deletions
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 44da5259f..a6ea084b4 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -9,6 +9,7 @@ * Authors: Sanjay Lal <sanjayl@kymasys.com> */ +#include <linux/bitops.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/kdebug.h> @@ -147,7 +148,7 @@ void kvm_mips_free_vcpus(struct kvm *kvm) /* Put the pages we reserved for the guest pmap */ for (i = 0; i < kvm->arch.guest_pmap_npages; i++) { if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE) - kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]); + kvm_release_pfn_clean(kvm->arch.guest_pmap[i]); } kfree(kvm->arch.guest_pmap); @@ -244,10 +245,27 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, } } +static inline void dump_handler(const char *symbol, void *start, void *end) +{ + u32 *p; + + pr_debug("LEAF(%s)\n", symbol); + + pr_debug("\t.set push\n"); + pr_debug("\t.set noreorder\n"); + + for (p = start; p < (u32 *)end; ++p) + pr_debug("\t.word\t0x%08x\t\t# %p\n", *p, p); + + pr_debug("\t.set\tpop\n"); + + pr_debug("\tEND(%s)\n", symbol); +} + struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) { - int err, size, offset; - void *gebase; + int err, size; + void *gebase, *p, *handler; int i; struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL); @@ -273,9 +291,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) else size = 0x4000; - /* Save Linux EBASE */ - vcpu->arch.host_ebase = (void *)read_c0_ebase(); - gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL); if (!gebase) { @@ -285,44 +300,53 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n", ALIGN(size, PAGE_SIZE), gebase); + /* + * Check new ebase actually fits in CP0_EBase. The lack of a write gate + * limits us to the low 512MB of physical address space. If the memory + * we allocate is out of range, just give up now. + */ + if (!cpu_has_ebase_wg && virt_to_phys(gebase) >= 0x20000000) { + kvm_err("CP0_EBase.WG required for guest exception base %pK\n", + gebase); + err = -ENOMEM; + goto out_free_gebase; + } + /* Save new ebase */ vcpu->arch.guest_ebase = gebase; - /* Copy L1 Guest Exception handler to correct offset */ + /* Build guest exception vectors dynamically in unmapped memory */ + handler = gebase + 0x2000; /* TLB Refill, EXL = 0 */ - memcpy(gebase, mips32_exception, - mips32_exceptionEnd - mips32_exception); + kvm_mips_build_exception(gebase, handler); /* General Exception Entry point */ - memcpy(gebase + 0x180, mips32_exception, - mips32_exceptionEnd - mips32_exception); + kvm_mips_build_exception(gebase + 0x180, handler); /* For vectored interrupts poke the exception code @ all offsets 0-7 */ for (i = 0; i < 8; i++) { kvm_debug("L1 Vectored handler @ %p\n", gebase + 0x200 + (i * VECTORSPACING)); - memcpy(gebase + 0x200 + (i * VECTORSPACING), mips32_exception, - mips32_exceptionEnd - mips32_exception); + kvm_mips_build_exception(gebase + 0x200 + i * VECTORSPACING, + handler); } - /* General handler, relocate to unmapped space for sanity's sake */ - offset = 0x2000; - kvm_debug("Installing KVM Exception handlers @ %p, %#x bytes\n", - gebase + offset, - mips32_GuestExceptionEnd - mips32_GuestException); + /* General exit handler */ + p = handler; + p = kvm_mips_build_exit(p); - memcpy(gebase + offset, mips32_GuestException, - mips32_GuestExceptionEnd - mips32_GuestException); + /* Guest entry routine */ + vcpu->arch.vcpu_run = p; + p = kvm_mips_build_vcpu_run(p); -#ifdef MODULE - offset += mips32_GuestExceptionEnd - mips32_GuestException; - memcpy(gebase + offset, (char *)__kvm_mips_vcpu_run, - __kvm_mips_vcpu_run_end - (char *)__kvm_mips_vcpu_run); - vcpu->arch.vcpu_run = gebase + offset; -#else - vcpu->arch.vcpu_run = __kvm_mips_vcpu_run; -#endif + /* Dump the generated code */ + pr_debug("#include <asm/asm.h>\n"); + pr_debug("#include <asm/regdef.h>\n"); + pr_debug("\n"); + dump_handler("kvm_vcpu_run", vcpu->arch.vcpu_run, p); + dump_handler("kvm_gen_exc", gebase + 0x180, gebase + 0x200); + dump_handler("kvm_exit", gebase + 0x2000, vcpu->arch.vcpu_run); /* Invalidate the icache for these ranges */ local_flush_icache_range((unsigned long)gebase, @@ -408,17 +432,19 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) kvm_mips_deliver_interrupts(vcpu, kvm_read_c0_guest_cause(vcpu->arch.cop0)); - __kvm_guest_enter(); + guest_enter_irqoff(); /* Disable hardware page table walking while in guest */ htw_stop(); + trace_kvm_enter(vcpu); r = vcpu->arch.vcpu_run(run, vcpu); + trace_kvm_out(vcpu); /* Re-enable HTW before enabling interrupts */ htw_start(); - __kvm_guest_exit(); + guest_exit_irqoff(); local_irq_enable(); if (vcpu->sigset_active) @@ -507,8 +533,10 @@ static u64 kvm_mips_get_one_regs[] = { KVM_REG_MIPS_R30, KVM_REG_MIPS_R31, +#ifndef CONFIG_CPU_MIPSR6 KVM_REG_MIPS_HI, KVM_REG_MIPS_LO, +#endif KVM_REG_MIPS_PC, KVM_REG_MIPS_CP0_INDEX, @@ -539,6 +567,104 @@ static u64 kvm_mips_get_one_regs[] = { KVM_REG_MIPS_COUNT_HZ, }; +static u64 kvm_mips_get_one_regs_fpu[] = { + KVM_REG_MIPS_FCR_IR, + KVM_REG_MIPS_FCR_CSR, +}; + +static u64 kvm_mips_get_one_regs_msa[] = { + KVM_REG_MIPS_MSA_IR, + KVM_REG_MIPS_MSA_CSR, +}; + +static u64 kvm_mips_get_one_regs_kscratch[] = { + KVM_REG_MIPS_CP0_KSCRATCH1, + KVM_REG_MIPS_CP0_KSCRATCH2, + KVM_REG_MIPS_CP0_KSCRATCH3, + KVM_REG_MIPS_CP0_KSCRATCH4, + KVM_REG_MIPS_CP0_KSCRATCH5, + KVM_REG_MIPS_CP0_KSCRATCH6, +}; + +static unsigned long kvm_mips_num_regs(struct kvm_vcpu *vcpu) +{ + unsigned long ret; + + ret = ARRAY_SIZE(kvm_mips_get_one_regs); + if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) { + ret += ARRAY_SIZE(kvm_mips_get_one_regs_fpu) + 48; + /* odd doubles */ + if (boot_cpu_data.fpu_id & MIPS_FPIR_F64) + ret += 16; + } + if (kvm_mips_guest_can_have_msa(&vcpu->arch)) + ret += ARRAY_SIZE(kvm_mips_get_one_regs_msa) + 32; + ret += __arch_hweight8(vcpu->arch.kscratch_enabled); + ret += kvm_mips_callbacks->num_regs(vcpu); + + return ret; +} + +static int kvm_mips_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices) +{ + u64 index; + unsigned int i; + + if (copy_to_user(indices, kvm_mips_get_one_regs, + sizeof(kvm_mips_get_one_regs))) + return -EFAULT; + indices += ARRAY_SIZE(kvm_mips_get_one_regs); + + if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) { + if (copy_to_user(indices, kvm_mips_get_one_regs_fpu, + sizeof(kvm_mips_get_one_regs_fpu))) + return -EFAULT; + indices += ARRAY_SIZE(kvm_mips_get_one_regs_fpu); + + for (i = 0; i < 32; ++i) { + index = KVM_REG_MIPS_FPR_32(i); + if (copy_to_user(indices, &index, sizeof(index))) + return -EFAULT; + ++indices; + + /* skip odd doubles if no F64 */ + if (i & 1 && !(boot_cpu_data.fpu_id & MIPS_FPIR_F64)) + continue; + + index = KVM_REG_MIPS_FPR_64(i); + if (copy_to_user(indices, &index, sizeof(index))) + return -EFAULT; + ++indices; + } + } + + if (kvm_mips_guest_can_have_msa(&vcpu->arch)) { + if (copy_to_user(indices, kvm_mips_get_one_regs_msa, + sizeof(kvm_mips_get_one_regs_msa))) + return -EFAULT; + indices += ARRAY_SIZE(kvm_mips_get_one_regs_msa); + + for (i = 0; i < 32; ++i) { + index = KVM_REG_MIPS_VEC_128(i); + if (copy_to_user(indices, &index, sizeof(index))) + return -EFAULT; + ++indices; + } + } + + for (i = 0; i < 6; ++i) { + if (!(vcpu->arch.kscratch_enabled & BIT(i + 2))) + continue; + + if (copy_to_user(indices, &kvm_mips_get_one_regs_kscratch[i], + sizeof(kvm_mips_get_one_regs_kscratch[i]))) + return -EFAULT; + ++indices; + } + + return kvm_mips_callbacks->copy_reg_indices(vcpu, indices); +} + static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { @@ -554,12 +680,14 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31: v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0]; break; +#ifndef CONFIG_CPU_MIPSR6 case KVM_REG_MIPS_HI: v = (long)vcpu->arch.hi; break; case KVM_REG_MIPS_LO: v = (long)vcpu->arch.lo; break; +#endif case KVM_REG_MIPS_PC: v = (long)vcpu->arch.pc; break; @@ -688,17 +816,37 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, case KVM_REG_MIPS_CP0_ERROREPC: v = (long)kvm_read_c0_guest_errorepc(cop0); break; + case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6: + idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2; + if (!(vcpu->arch.kscratch_enabled & BIT(idx))) + return -EINVAL; + switch (idx) { + case 2: + v = (long)kvm_read_c0_guest_kscratch1(cop0); + break; + case 3: + v = (long)kvm_read_c0_guest_kscratch2(cop0); + break; + case 4: + v = (long)kvm_read_c0_guest_kscratch3(cop0); + break; + case 5: + v = (long)kvm_read_c0_guest_kscratch4(cop0); + break; + case 6: + v = (long)kvm_read_c0_guest_kscratch5(cop0); + break; + case 7: + v = (long)kvm_read_c0_guest_kscratch6(cop0); + break; + } + break; /* registers to be handled specially */ - case KVM_REG_MIPS_CP0_COUNT: - case KVM_REG_MIPS_COUNT_CTL: - case KVM_REG_MIPS_COUNT_RESUME: - case KVM_REG_MIPS_COUNT_HZ: + default: ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v); if (ret) return ret; break; - default: - return -EINVAL; } if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; @@ -755,12 +903,14 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31: vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v; break; +#ifndef CONFIG_CPU_MIPSR6 case KVM_REG_MIPS_HI: vcpu->arch.hi = v; break; case KVM_REG_MIPS_LO: vcpu->arch.lo = v; break; +#endif case KVM_REG_MIPS_PC: vcpu->arch.pc = v; break; @@ -859,22 +1009,34 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, case KVM_REG_MIPS_CP0_ERROREPC: kvm_write_c0_guest_errorepc(cop0, v); break; + case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6: + idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2; + if (!(vcpu->arch.kscratch_enabled & BIT(idx))) + return -EINVAL; + switch (idx) { + case 2: + kvm_write_c0_guest_kscratch1(cop0, v); + break; + case 3: + kvm_write_c0_guest_kscratch2(cop0, v); + break; + case 4: + kvm_write_c0_guest_kscratch3(cop0, v); + break; + case 5: + kvm_write_c0_guest_kscratch4(cop0, v); + break; + case 6: + kvm_write_c0_guest_kscratch5(cop0, v); + break; + case 7: + kvm_write_c0_guest_kscratch6(cop0, v); + break; + } + break; /* registers to be handled specially */ - case KVM_REG_MIPS_CP0_COUNT: - case KVM_REG_MIPS_CP0_COMPARE: - case KVM_REG_MIPS_CP0_CAUSE: - case KVM_REG_MIPS_CP0_CONFIG: - case KVM_REG_MIPS_CP0_CONFIG1: - case KVM_REG_MIPS_CP0_CONFIG2: - case KVM_REG_MIPS_CP0_CONFIG3: - case KVM_REG_MIPS_CP0_CONFIG4: - case KVM_REG_MIPS_CP0_CONFIG5: - case KVM_REG_MIPS_COUNT_CTL: - case KVM_REG_MIPS_COUNT_RESUME: - case KVM_REG_MIPS_COUNT_HZ: - return kvm_mips_callbacks->set_one_reg(vcpu, reg, v); default: - return -EINVAL; + return kvm_mips_callbacks->set_one_reg(vcpu, reg, v); } return 0; } @@ -927,23 +1089,18 @@ long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, } case KVM_GET_REG_LIST: { struct kvm_reg_list __user *user_list = argp; - u64 __user *reg_dest; struct kvm_reg_list reg_list; unsigned n; if (copy_from_user(®_list, user_list, sizeof(reg_list))) return -EFAULT; n = reg_list.n; - reg_list.n = ARRAY_SIZE(kvm_mips_get_one_regs); + reg_list.n = kvm_mips_num_regs(vcpu); if (copy_to_user(user_list, ®_list, sizeof(reg_list))) return -EFAULT; if (n < reg_list.n) return -E2BIG; - reg_dest = user_list->reg; - if (copy_to_user(reg_dest, kvm_mips_get_one_regs, - sizeof(kvm_mips_get_one_regs))) - return -EFAULT; - return 0; + return kvm_mips_copy_reg_indices(vcpu, user_list->reg); } case KVM_NMI: /* Treat the NMI as a CPU reset */ @@ -1222,7 +1379,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) static void kvm_mips_set_c0_status(void) { - uint32_t status = read_c0_status(); + u32 status = read_c0_status(); if (cpu_has_dsp) status |= (ST0_MX); @@ -1236,9 +1393,9 @@ static void kvm_mips_set_c0_status(void) */ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) { - uint32_t cause = vcpu->arch.host_cp0_cause; - uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; - uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; + u32 cause = vcpu->arch.host_cp0_cause; + u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; + u32 __user *opc = (u32 __user *) vcpu->arch.pc; unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; enum emulation_result er = EMULATE_DONE; int ret = RESUME_GUEST; @@ -1260,6 +1417,7 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n", cause, opc, run, vcpu); + trace_kvm_exit(vcpu, exccode); /* * Do a privilege check, if in UM most of these exit conditions end up @@ -1279,7 +1437,6 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu->vcpu_id, opc); ++vcpu->stat.int_exits; - trace_kvm_exit(vcpu, INT_EXITS); if (need_resched()) cond_resched(); @@ -1291,7 +1448,6 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) kvm_debug("EXCCODE_CPU: @ PC: %p\n", opc); ++vcpu->stat.cop_unusable_exits; - trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS); ret = kvm_mips_callbacks->handle_cop_unusable(vcpu); /* XXXKYMA: Might need to return to user space */ if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN) @@ -1300,7 +1456,6 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) case EXCCODE_MOD: ++vcpu->stat.tlbmod_exits; - trace_kvm_exit(vcpu, TLBMOD_EXITS); ret = kvm_mips_callbacks->handle_tlb_mod(vcpu); break; @@ -1310,7 +1465,6 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) badvaddr); ++vcpu->stat.tlbmiss_st_exits; - trace_kvm_exit(vcpu, TLBMISS_ST_EXITS); ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu); break; @@ -1319,61 +1473,51 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) cause, opc, badvaddr); ++vcpu->stat.tlbmiss_ld_exits; - trace_kvm_exit(vcpu, TLBMISS_LD_EXITS); ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu); break; case EXCCODE_ADES: ++vcpu->stat.addrerr_st_exits; - trace_kvm_exit(vcpu, ADDRERR_ST_EXITS); ret = kvm_mips_callbacks->handle_addr_err_st(vcpu); break; case EXCCODE_ADEL: ++vcpu->stat.addrerr_ld_exits; - trace_kvm_exit(vcpu, ADDRERR_LD_EXITS); ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu); break; case EXCCODE_SYS: ++vcpu->stat.syscall_exits; - trace_kvm_exit(vcpu, SYSCALL_EXITS); ret = kvm_mips_callbacks->handle_syscall(vcpu); break; case EXCCODE_RI: ++vcpu->stat.resvd_inst_exits; - trace_kvm_exit(vcpu, RESVD_INST_EXITS); ret = kvm_mips_callbacks->handle_res_inst(vcpu); break; case EXCCODE_BP: ++vcpu->stat.break_inst_exits; - trace_kvm_exit(vcpu, BREAK_INST_EXITS); ret = kvm_mips_callbacks->handle_break(vcpu); break; case EXCCODE_TR: ++vcpu->stat.trap_inst_exits; - trace_kvm_exit(vcpu, TRAP_INST_EXITS); ret = kvm_mips_callbacks->handle_trap(vcpu); break; case EXCCODE_MSAFPE: ++vcpu->stat.msa_fpe_exits; - trace_kvm_exit(vcpu, MSA_FPE_EXITS); ret = kvm_mips_callbacks->handle_msa_fpe(vcpu); break; case EXCCODE_FPE: ++vcpu->stat.fpe_exits; - trace_kvm_exit(vcpu, FPE_EXITS); ret = kvm_mips_callbacks->handle_fpe(vcpu); break; case EXCCODE_MSADIS: ++vcpu->stat.msa_disabled_exits; - trace_kvm_exit(vcpu, MSA_DISABLED_EXITS); ret = kvm_mips_callbacks->handle_msa_disabled(vcpu); break; @@ -1400,11 +1544,13 @@ skip_emul: run->exit_reason = KVM_EXIT_INTR; ret = (-EINTR << 2) | RESUME_HOST; ++vcpu->stat.signal_exits; - trace_kvm_exit(vcpu, SIGNAL_EXITS); + trace_kvm_exit(vcpu, KVM_TRACE_EXIT_SIGNAL); } } if (ret == RESUME_GUEST) { + trace_kvm_reenter(vcpu); + /* * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context * is live), restore FCR31 / MSACSR. @@ -1450,7 +1596,7 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu) * not to clobber the status register directly via the commpage. */ if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) && - vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) + vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) kvm_lose_fpu(vcpu); /* @@ -1465,9 +1611,12 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu) enable_fpu_hazard(); /* If guest FPU state not active, restore it now */ - if (!(vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)) { + if (!(vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) { __kvm_restore_fpu(&vcpu->arch); - vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU; + vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU; + trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU); + } else { + trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_FPU); } preempt_enable(); @@ -1494,8 +1643,8 @@ void kvm_own_msa(struct kvm_vcpu *vcpu) * interacts with MSA state, so play it safe and save it first. */ if (!(sr & ST0_FR) && - (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU | - KVM_MIPS_FPU_MSA)) == KVM_MIPS_FPU_FPU) + (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | + KVM_MIPS_AUX_MSA)) == KVM_MIPS_AUX_FPU) kvm_lose_fpu(vcpu); change_c0_status(ST0_CU1 | ST0_FR, sr); @@ -1509,22 +1658,26 @@ void kvm_own_msa(struct kvm_vcpu *vcpu) set_c0_config5(MIPS_CONF5_MSAEN); enable_fpu_hazard(); - switch (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA)) { - case KVM_MIPS_FPU_FPU: + switch (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA)) { + case KVM_MIPS_AUX_FPU: /* * Guest FPU state already loaded, only restore upper MSA state */ __kvm_restore_msa_upper(&vcpu->arch); - vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA; + vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA; + trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_MSA); break; case 0: /* Neither FPU or MSA already active, restore full MSA state */ __kvm_restore_msa(&vcpu->arch); - vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA; + vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA; if (kvm_mips_guest_has_fpu(&vcpu->arch)) - vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU; + vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU; + trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, + KVM_TRACE_AUX_FPU_MSA); break; default: + trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_MSA); break; } @@ -1536,13 +1689,15 @@ void kvm_own_msa(struct kvm_vcpu *vcpu) void kvm_drop_fpu(struct kvm_vcpu *vcpu) { preempt_disable(); - if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) { + if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) { disable_msa(); - vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_MSA; + trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_MSA); + vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_MSA; } - if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) { + if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) { clear_c0_status(ST0_CU1 | ST0_FR); - vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU; + trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_FPU); + vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU; } preempt_enable(); } @@ -1558,25 +1713,27 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu) */ preempt_disable(); - if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) { + if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) { set_c0_config5(MIPS_CONF5_MSAEN); enable_fpu_hazard(); __kvm_save_msa(&vcpu->arch); + trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA); /* Disable MSA & FPU */ disable_msa(); - if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) { + if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) { clear_c0_status(ST0_CU1 | ST0_FR); disable_fpu_hazard(); } - vcpu->arch.fpu_inuse &= ~(KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA); - } else if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) { + vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA); + } else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) { set_c0_status(ST0_CU1); enable_fpu_hazard(); __kvm_save_fpu(&vcpu->arch); - vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU; + vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU; + trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU); /* Disable FPU */ clear_c0_status(ST0_CU1 | ST0_FR); @@ -1638,6 +1795,10 @@ static int __init kvm_mips_init(void) { int ret; + ret = kvm_mips_entry_setup(); + if (ret) + return ret; + ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); if (ret) @@ -1645,18 +1806,6 @@ static int __init kvm_mips_init(void) register_die_notifier(&kvm_mips_csr_die_notifier); - /* - * On MIPS, kernel modules are executed from "mapped space", which - * requires TLBs. The TLB handling code is statically linked with - * the rest of the kernel (tlb.c) to avoid the possibility of - * double faulting. The issue is that the TLB code references - * routines that are part of the the KVM module, which are only - * available once the module is loaded. - */ - kvm_mips_gfn_to_pfn = gfn_to_pfn; - kvm_mips_release_pfn_clean = kvm_release_pfn_clean; - kvm_mips_is_error_pfn = is_error_pfn; - return 0; } @@ -1664,10 +1813,6 @@ static void __exit kvm_mips_exit(void) { kvm_exit(); - kvm_mips_gfn_to_pfn = NULL; - kvm_mips_release_pfn_clean = NULL; - kvm_mips_is_error_pfn = NULL; - unregister_die_notifier(&kvm_mips_csr_die_notifier); } |