summaryrefslogtreecommitdiff
path: root/arch/powerpc/kvm/book3s_pr.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kvm/book3s_pr.c')
-rw-r--r--arch/powerpc/kvm/book3s_pr.c25
1 files changed, 8 insertions, 17 deletions
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 64891b081..95bceca8f 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -512,7 +512,7 @@ static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
put_page(hpage);
}
-static int kvmppc_visible_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
+static bool kvmppc_visible_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
{
ulong mp_pa = vcpu->arch.magic_page_pa;
@@ -521,7 +521,7 @@ static int kvmppc_visible_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
gpa &= ~0xFFFULL;
if (unlikely(mp_pa) && unlikely((mp_pa & KVM_PAM) == (gpa & KVM_PAM))) {
- return 1;
+ return true;
}
return kvm_is_visible_gfn(vcpu->kvm, gpa >> PAGE_SHIFT);
@@ -751,6 +751,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
preempt_disable();
enable_kernel_fp();
load_fp_state(&vcpu->arch.fp);
+ disable_kernel_fp();
t->fp_save_area = &vcpu->arch.fp;
preempt_enable();
}
@@ -760,6 +761,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
preempt_disable();
enable_kernel_altivec();
load_vr_state(&vcpu->arch.vr);
+ disable_kernel_altivec();
t->vr_save_area = &vcpu->arch.vr;
preempt_enable();
#endif
@@ -788,6 +790,7 @@ static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
preempt_disable();
enable_kernel_fp();
load_fp_state(&vcpu->arch.fp);
+ disable_kernel_fp();
preempt_enable();
}
#ifdef CONFIG_ALTIVEC
@@ -795,6 +798,7 @@ static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
preempt_disable();
enable_kernel_altivec();
load_vr_state(&vcpu->arch.vr);
+ disable_kernel_altivec();
preempt_enable();
}
#endif
@@ -1486,21 +1490,8 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
goto out;
/* interrupts now hard-disabled */
- /* Save FPU state in thread_struct */
- if (current->thread.regs->msr & MSR_FP)
- giveup_fpu(current);
-
-#ifdef CONFIG_ALTIVEC
- /* Save Altivec state in thread_struct */
- if (current->thread.regs->msr & MSR_VEC)
- giveup_altivec(current);
-#endif
-
-#ifdef CONFIG_VSX
- /* Save VSX state in thread_struct */
- if (current->thread.regs->msr & MSR_VSX)
- __giveup_vsx(current);
-#endif
+ /* Save FPU, Altivec and VSX state */
+ giveup_all(current);
/* Preload FPU if it's enabled */
if (kvmppc_get_msr(vcpu) & MSR_FP)