diff options
Diffstat (limited to 'arch/mips/kvm')
-rw-r--r-- | arch/mips/kvm/emulate.c | 44 | ||||
-rw-r--r-- | arch/mips/kvm/locore.S | 94 | ||||
-rw-r--r-- | arch/mips/kvm/mips.c | 9 | ||||
-rw-r--r-- | arch/mips/kvm/tlb.c | 59 | ||||
-rw-r--r-- | arch/mips/kvm/trap_emul.c | 3 |
5 files changed, 96 insertions, 113 deletions
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c index b8b7860ec..645c8a198 100644 --- a/arch/mips/kvm/emulate.c +++ b/arch/mips/kvm/emulate.c @@ -1079,15 +1079,15 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, kvm_read_c0_guest_ebase(cop0)); } else if (rd == MIPS_CP0_TLB_HI && sel == 0) { uint32_t nasid = - vcpu->arch.gprs[rt] & ASID_MASK; + vcpu->arch.gprs[rt] & KVM_ENTRYHI_ASID; if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) && ((kvm_read_c0_guest_entryhi(cop0) & - ASID_MASK) != nasid)) { + KVM_ENTRYHI_ASID) != nasid)) { kvm_debug("MTCz, change ASID from %#lx to %#lx\n", kvm_read_c0_guest_entryhi(cop0) - & ASID_MASK, + & KVM_ENTRYHI_ASID, vcpu->arch.gprs[rt] - & ASID_MASK); + & KVM_ENTRYHI_ASID); /* Blow away the shadow host TLBs */ kvm_mips_flush_host_tlb(1); @@ -1631,11 +1631,12 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, */ index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) | (kvm_read_c0_guest_entryhi - (cop0) & ASID_MASK)); + (cop0) & KVM_ENTRYHI_ASID)); if (index < 0) { vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK); vcpu->arch.host_cp0_badvaddr = va; + vcpu->arch.pc = curr_pc; er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run, vcpu); preempt_enable(); @@ -1647,6 +1648,8 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, * invalid exception to the guest */ if (!TLB_IS_VALID(*tlb, va)) { + vcpu->arch.host_cp0_badvaddr = va; + vcpu->arch.pc = curr_pc; er = kvm_mips_emulate_tlbinv_ld(cause, NULL, run, vcpu); preempt_enable(); @@ -1666,7 +1669,7 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, cache, op, base, arch->gprs[base], offset); er = EMULATE_FAIL; preempt_enable(); - goto dont_update_pc; + goto done; } @@ -1694,16 +1697,20 @@ skip_fault: kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", cache, op, base, arch->gprs[base], offset); er = EMULATE_FAIL; - preempt_enable(); - goto dont_update_pc; } preempt_enable(); +done: + /* Rollback PC only if emulation was unsuccessful */ + if (er == EMULATE_FAIL) + vcpu->arch.pc = curr_pc; dont_update_pc: - /* Rollback PC */ - vcpu->arch.pc = curr_pc; -done: + /* + * This is for exceptions whose emulation updates the PC, so do not + * overwrite the PC under any circumstances + */ + return er; } @@ -1797,7 +1804,7 @@ enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause, struct mips_coproc *cop0 = vcpu->arch.cop0; struct kvm_vcpu_arch *arch = &vcpu->arch; unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) | - (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); + (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { /* save old pc */ @@ -1844,7 +1851,7 @@ enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause, struct kvm_vcpu_arch *arch = &vcpu->arch; unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | - (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); + (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { /* save old pc */ @@ -1889,7 +1896,7 @@ enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause, struct mips_coproc *cop0 = vcpu->arch.cop0; struct kvm_vcpu_arch *arch = &vcpu->arch; unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | - (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); + (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { /* save old pc */ @@ -1933,7 +1940,7 @@ enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause, struct mips_coproc *cop0 = vcpu->arch.cop0; struct kvm_vcpu_arch *arch = &vcpu->arch; unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | - (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); + (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { /* save old pc */ @@ -1978,7 +1985,7 @@ enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc, #ifdef DEBUG struct mips_coproc *cop0 = vcpu->arch.cop0; unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | - (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); + (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); int index; /* If address not in the guest TLB, then we are in trouble */ @@ -2005,7 +2012,7 @@ enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause, { struct mips_coproc *cop0 = vcpu->arch.cop0; unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | - (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); + (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); struct kvm_vcpu_arch *arch = &vcpu->arch; if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { @@ -2580,7 +2587,8 @@ enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause, */ index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) | - (kvm_read_c0_guest_entryhi(vcpu->arch.cop0) & ASID_MASK)); + (kvm_read_c0_guest_entryhi(vcpu->arch.cop0) & + KVM_ENTRYHI_ASID)); if (index < 0) { if (exccode == EXCCODE_TLBL) { er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu); diff --git a/arch/mips/kvm/locore.S b/arch/mips/kvm/locore.S index fc93a08b6..828fcfc1c 100644 --- a/arch/mips/kvm/locore.S +++ b/arch/mips/kvm/locore.S @@ -32,7 +32,6 @@ EXPORT(x); /* Overload, Danger Will Robinson!! */ -#define PT_HOST_ASID PT_BVADDR #define PT_HOST_USERLOCAL PT_EPC #define CP0_DDATA_LO $28,3 @@ -49,45 +48,18 @@ * a1: vcpu */ .set noreorder - .set noat FEXPORT(__kvm_mips_vcpu_run) /* k0/k1 not being used in host kernel context */ INT_ADDIU k1, sp, -PT_SIZE - LONG_S $0, PT_R0(k1) - LONG_S $1, PT_R1(k1) - LONG_S $2, PT_R2(k1) - LONG_S $3, PT_R3(k1) - - LONG_S $4, PT_R4(k1) - LONG_S $5, PT_R5(k1) - LONG_S $6, PT_R6(k1) - LONG_S $7, PT_R7(k1) - - LONG_S $8, PT_R8(k1) - LONG_S $9, PT_R9(k1) - LONG_S $10, PT_R10(k1) - LONG_S $11, PT_R11(k1) - LONG_S $12, PT_R12(k1) - LONG_S $13, PT_R13(k1) - LONG_S $14, PT_R14(k1) - LONG_S $15, PT_R15(k1) LONG_S $16, PT_R16(k1) LONG_S $17, PT_R17(k1) - LONG_S $18, PT_R18(k1) LONG_S $19, PT_R19(k1) LONG_S $20, PT_R20(k1) LONG_S $21, PT_R21(k1) LONG_S $22, PT_R22(k1) LONG_S $23, PT_R23(k1) - LONG_S $24, PT_R24(k1) - LONG_S $25, PT_R25(k1) - - /* - * XXXKYMA k0/k1 not saved, not being used if we got here through - * an ioctl() - */ LONG_S $28, PT_R28(k1) LONG_S $29, PT_R29(k1) @@ -104,11 +76,6 @@ FEXPORT(__kvm_mips_vcpu_run) mfc0 v0, CP0_STATUS LONG_S v0, PT_STATUS(k1) - /* Save host ASID, shove it into the BVADDR location */ - mfc0 v1, CP0_ENTRYHI - andi v1, 0xff - LONG_S v1, PT_HOST_ASID(k1) - /* Save DDATA_LO, will be used to store pointer to vcpu */ mfc0 v1, CP0_DDATA_LO LONG_S v1, PT_HOST_USERLOCAL(k1) @@ -170,13 +137,21 @@ FEXPORT(__kvm_mips_load_asid) INT_SLL t2, t2, 2 /* x4 */ REG_ADDU t3, t1, t2 LONG_L k0, (t3) - andi k0, k0, 0xff +#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE + li t3, CPUINFO_SIZE/4 + mul t2, t2, t3 /* x sizeof(struct cpuinfo_mips)/4 */ + LONG_L t2, (cpu_data + CPUINFO_ASID_MASK)(t2) + and k0, k0, t2 +#else + andi k0, k0, MIPS_ENTRYHI_ASID +#endif mtc0 k0, CP0_ENTRYHI ehb /* Disable RDHWR access */ mtc0 zero, CP0_HWRENA + .set noat /* Now load up the Guest Context from VCPU */ LONG_L $1, VCPU_R1(k1) LONG_L $2, VCPU_R2(k1) @@ -289,6 +264,8 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) LONG_S $30, VCPU_R30(k1) LONG_S $31, VCPU_R31(k1) + .set at + /* We need to save hi/lo and restore them on the way out */ mfhi t0 LONG_S t0, VCPU_HI(k1) @@ -340,9 +317,7 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) /* load up the host EBASE */ mfc0 v0, CP0_STATUS - .set at or k0, v0, ST0_BEV - .set noat mtc0 k0, CP0_STATUS ehb @@ -354,7 +329,6 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) * If FPU is enabled, save FCR31 and clear it so that later ctc1's don't * trigger FPE for pending exceptions. */ - .set at and v1, v0, ST0_CU1 beqz v1, 1f nop @@ -364,7 +338,6 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) sw t0, VCPU_FCR31(k1) ctc1 zero,fcr31 .set pop - .set noat 1: #ifdef CONFIG_CPU_HAS_MSA @@ -387,10 +360,8 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) #endif /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */ - .set at and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE) or v0, v0, ST0_CU0 - .set noat mtc0 v0, CP0_STATUS ehb @@ -457,18 +428,14 @@ __kvm_mips_return_to_guest: /* Switch EBASE back to the one used by KVM */ mfc0 v1, CP0_STATUS - .set at or k0, v1, ST0_BEV - .set noat mtc0 k0, CP0_STATUS ehb mtc0 t0, CP0_EBASE /* Setup status register for running guest in UM */ - .set at or v1, v1, (ST0_EXL | KSU_USER | ST0_IE) and v1, v1, ~(ST0_CU0 | ST0_MX) - .set noat mtc0 v1, CP0_STATUS ehb @@ -490,13 +457,21 @@ __kvm_mips_return_to_guest: INT_SLL t2, t2, 2 /* x4 */ REG_ADDU t3, t1, t2 LONG_L k0, (t3) - andi k0, k0, 0xff +#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE + li t3, CPUINFO_SIZE/4 + mul t2, t2, t3 /* x sizeof(struct cpuinfo_mips)/4 */ + LONG_L t2, (cpu_data + CPUINFO_ASID_MASK)(t2) + and k0, k0, t2 +#else + andi k0, k0, MIPS_ENTRYHI_ASID +#endif mtc0 k0, CP0_ENTRYHI ehb /* Disable RDHWR access */ mtc0 zero, CP0_HWRENA + .set noat /* load the guest context from VCPU and return */ LONG_L $0, VCPU_R0(k1) LONG_L $1, VCPU_R1(k1) @@ -542,6 +517,7 @@ FEXPORT(__kvm_mips_skip_guest_restore) LONG_L k1, VCPU_R27(k1) eret + .set at __kvm_mips_return_to_host: /* EBASE is already pointing to Linux */ @@ -552,16 +528,6 @@ __kvm_mips_return_to_host: LONG_L k0, PT_HOST_USERLOCAL(k1) mtc0 k0, CP0_DDATA_LO - /* Restore host ASID */ - LONG_L k0, PT_HOST_ASID(sp) - andi k0, 0xff - mtc0 k0,CP0_ENTRYHI - ehb - - /* Load context saved on the host stack */ - LONG_L $0, PT_R0(k1) - LONG_L $1, PT_R1(k1) - /* * r2/v0 is the return code, shift it down by 2 (arithmetic) * to recover the err code @@ -569,19 +535,7 @@ __kvm_mips_return_to_host: INT_SRA k0, v0, 2 move $2, k0 - LONG_L $3, PT_R3(k1) - LONG_L $4, PT_R4(k1) - LONG_L $5, PT_R5(k1) - LONG_L $6, PT_R6(k1) - LONG_L $7, PT_R7(k1) - LONG_L $8, PT_R8(k1) - LONG_L $9, PT_R9(k1) - LONG_L $10, PT_R10(k1) - LONG_L $11, PT_R11(k1) - LONG_L $12, PT_R12(k1) - LONG_L $13, PT_R13(k1) - LONG_L $14, PT_R14(k1) - LONG_L $15, PT_R15(k1) + /* Load context saved on the host stack */ LONG_L $16, PT_R16(k1) LONG_L $17, PT_R17(k1) LONG_L $18, PT_R18(k1) @@ -590,10 +544,6 @@ __kvm_mips_return_to_host: LONG_L $21, PT_R21(k1) LONG_L $22, PT_R22(k1) LONG_L $23, PT_R23(k1) - LONG_L $24, PT_R24(k1) - LONG_L $25, PT_R25(k1) - - /* Host k0/k1 were not saved */ LONG_L $28, PT_R28(k1) LONG_L $29, PT_R29(k1) diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index e223cb3d9..44da5259f 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -56,6 +56,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU }, { "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU }, { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), KVM_STAT_VCPU }, + { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid), KVM_STAT_VCPU }, { "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU }, {NULL} }; @@ -1088,7 +1089,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = KVM_COALESCED_MMIO_PAGE_OFFSET; break; case KVM_CAP_MIPS_FPU: - r = !!cpu_has_fpu; + /* We don't handle systems with inconsistent cpu_has_fpu */ + r = !!raw_cpu_has_fpu; break; case KVM_CAP_MIPS_MSA: /* @@ -1564,8 +1566,10 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu) /* Disable MSA & FPU */ disable_msa(); - if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) + if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) { clear_c0_status(ST0_CU1 | ST0_FR); + disable_fpu_hazard(); + } vcpu->arch.fpu_inuse &= ~(KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA); } else if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) { set_c0_status(ST0_CU1); @@ -1576,6 +1580,7 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu) /* Disable FPU */ clear_c0_status(ST0_CU1 | ST0_FR); + disable_fpu_hazard(); } preempt_enable(); } diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c index e0e1d0a61..ed021ae78 100644 --- a/arch/mips/kvm/tlb.c +++ b/arch/mips/kvm/tlb.c @@ -49,12 +49,18 @@ EXPORT_SYMBOL_GPL(kvm_mips_is_error_pfn); uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) { - return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK; + int cpu = smp_processor_id(); + + return vcpu->arch.guest_kernel_asid[cpu] & + cpu_asid_mask(&cpu_data[cpu]); } uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) { - return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK; + int cpu = smp_processor_id(); + + return vcpu->arch.guest_user_asid[cpu] & + cpu_asid_mask(&cpu_data[cpu]); } inline uint32_t kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu) @@ -78,7 +84,8 @@ void kvm_mips_dump_host_tlbs(void) old_pagemask = read_c0_pagemask(); kvm_info("HOST TLBs:\n"); - kvm_info("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK); + kvm_info("ASID: %#lx\n", read_c0_entryhi() & + cpu_asid_mask(¤t_cpu_data)); for (i = 0; i < current_cpu_data.tlbsize; i++) { write_c0_index(i); @@ -268,6 +275,7 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr, int even; struct kvm *kvm = vcpu->kvm; const int flush_dcache_mask = 0; + int ret; if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) { kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr); @@ -299,14 +307,18 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr, pfn1 = kvm->arch.guest_pmap[gfn]; } - entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu)); entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) | (0x1 << 1); entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) | (0x1 << 1); - return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1, - flush_dcache_mask); + preempt_disable(); + entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu)); + ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1, + flush_dcache_mask); + preempt_enable(); + + return ret; } EXPORT_SYMBOL_GPL(kvm_mips_handle_kseg0_tlb_fault); @@ -361,6 +373,7 @@ int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0; struct kvm *kvm = vcpu->kvm; kvm_pfn_t pfn0, pfn1; + int ret; if ((tlb->tlb_hi & VPN2_MASK) == 0) { pfn0 = 0; @@ -387,9 +400,6 @@ int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, *hpa1 = pfn1 << PAGE_SHIFT; /* Get attributes from the Guest TLB */ - entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ? - kvm_mips_get_kernel_asid(vcpu) : - kvm_mips_get_user_asid(vcpu)); entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V); entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | @@ -398,8 +408,15 @@ int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc, tlb->tlb_lo0, tlb->tlb_lo1); - return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1, - tlb->tlb_mask); + preempt_disable(); + entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ? + kvm_mips_get_kernel_asid(vcpu) : + kvm_mips_get_user_asid(vcpu)); + ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1, + tlb->tlb_mask); + preempt_enable(); + + return ret; } EXPORT_SYMBOL_GPL(kvm_mips_handle_mapped_seg_tlb_fault); @@ -564,15 +581,15 @@ void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, { unsigned long asid = asid_cache(cpu); - asid += ASID_INC; - if (!(asid & ASID_MASK)) { + asid += cpu_asid_inc(); + if (!(asid & cpu_asid_mask(&cpu_data[cpu]))) { if (cpu_has_vtag_icache) flush_icache_all(); kvm_local_flush_tlb_all(); /* start new asid cycle */ if (!asid) /* fix version if needed */ - asid = ASID_FIRST_VERSION; + asid = asid_first_version(cpu); } cpu_context(cpu, mm) = asid_cache(cpu) = asid; @@ -627,6 +644,7 @@ static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu) /* Restore ASID once we are scheduled back after preemption */ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { + unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]); unsigned long flags; int newasid = 0; @@ -637,7 +655,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) local_irq_save(flags); if ((vcpu->arch.guest_kernel_asid[cpu] ^ asid_cache(cpu)) & - ASID_VERSION_MASK) { + asid_version_mask(cpu)) { kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu); vcpu->arch.guest_kernel_asid[cpu] = vcpu->arch.guest_kernel_mm.context.asid[cpu]; @@ -672,7 +690,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) */ if (current->flags & PF_VCPU) { write_c0_entryhi(vcpu->arch. - preempt_entryhi & ASID_MASK); + preempt_entryhi & asid_mask); ehb(); } } else { @@ -687,11 +705,11 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) if (KVM_GUEST_KERNEL_MODE(vcpu)) write_c0_entryhi(vcpu->arch. guest_kernel_asid[cpu] & - ASID_MASK); + asid_mask); else write_c0_entryhi(vcpu->arch. guest_user_asid[cpu] & - ASID_MASK); + asid_mask); ehb(); } } @@ -721,7 +739,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) kvm_mips_callbacks->vcpu_get_regs(vcpu); if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) & - ASID_VERSION_MASK)) { + asid_version_mask(cpu))) { kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__, cpu_context(cpu, current->mm)); drop_mmu_context(current->mm, cpu); @@ -748,7 +766,8 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu) inst = *(opc); } else { vpn2 = (unsigned long) opc & VPN2_MASK; - asid = kvm_read_c0_guest_entryhi(cop0) & ASID_MASK; + asid = kvm_read_c0_guest_entryhi(cop0) & + KVM_ENTRYHI_ASID; index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid); if (index < 0) { kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n", diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c index caa5ea103..6ba0fafce 100644 --- a/arch/mips/kvm/trap_emul.c +++ b/arch/mips/kvm/trap_emul.c @@ -505,7 +505,8 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu) kvm_write_c0_guest_intctl(cop0, 0xFC000000); /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */ - kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 | (vcpu_id & 0xFF)); + kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 | + (vcpu_id & MIPS_EBASE_CPUNUM)); return 0; } |