summaryrefslogtreecommitdiff
path: root/arch/arm64/kvm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kvm')
-rw-r--r--arch/arm64/kvm/Kconfig11
-rw-r--r--arch/arm64/kvm/Makefile2
-rw-r--r--arch/arm64/kvm/debug.c217
-rw-r--r--arch/arm64/kvm/guest.c43
-rw-r--r--arch/arm64/kvm/handle_exit.c44
-rw-r--r--arch/arm64/kvm/hyp.S647
-rw-r--r--arch/arm64/kvm/reset.c20
-rw-r--r--arch/arm64/kvm/sys_regs.c298
-rw-r--r--arch/arm64/kvm/sys_regs.h6
-rw-r--r--arch/arm64/kvm/sys_regs_generic_v8.c2
-rw-r--r--arch/arm64/kvm/trace.h123
11 files changed, 951 insertions, 462 deletions
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index bfffe8f4b..5c7e920e4 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -41,15 +41,4 @@ config KVM_ARM_HOST
---help---
Provides host support for ARM processors.
-config KVM_ARM_MAX_VCPUS
- int "Number maximum supported virtual CPUs per VM"
- depends on KVM_ARM_HOST
- default 4
- help
- Static number of max supported virtual CPUs per VM.
-
- If you choose a high number, the vcpu structures will be quite
- large, so only choose a reasonable number that you expect to
- actually use.
-
endif # VIRTUALIZATION
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index f90f4aa7f..1949fe5f5 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -17,7 +17,7 @@ kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/psci.o $(ARM)/perf.o
kvm-$(CONFIG_KVM_ARM_HOST) += emulate.o inject_fault.o regmap.o
kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
-kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o
+kvm-$(CONFIG_KVM_ARM_HOST) += guest.o debug.o reset.o sys_regs.o sys_regs_generic_v8.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2.o
diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c
new file mode 100644
index 000000000..47e5f0fea
--- /dev/null
+++ b/arch/arm64/kvm/debug.c
@@ -0,0 +1,217 @@
+/*
+ * Debug and Guest Debug support
+ *
+ * Copyright (C) 2015 - Linaro Ltd
+ * Author: Alex Bennée <alex.bennee@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kvm_host.h>
+#include <linux/hw_breakpoint.h>
+
+#include <asm/debug-monitors.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_emulate.h>
+
+#include "trace.h"
+
+/* These are the bits of MDSCR_EL1 we may manipulate */
+#define MDSCR_EL1_DEBUG_MASK (DBG_MDSCR_SS | \
+ DBG_MDSCR_KDE | \
+ DBG_MDSCR_MDE)
+
+static DEFINE_PER_CPU(u32, mdcr_el2);
+
+/**
+ * save/restore_guest_debug_regs
+ *
+ * For some debug operations we need to tweak some guest registers. As
+ * a result we need to save the state of those registers before we
+ * make those modifications.
+ *
+ * Guest access to MDSCR_EL1 is trapped by the hypervisor and handled
+ * after we have restored the preserved value to the main context.
+ */
+static void save_guest_debug_regs(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.guest_debug_preserved.mdscr_el1 = vcpu_sys_reg(vcpu, MDSCR_EL1);
+
+ trace_kvm_arm_set_dreg32("Saved MDSCR_EL1",
+ vcpu->arch.guest_debug_preserved.mdscr_el1);
+}
+
+static void restore_guest_debug_regs(struct kvm_vcpu *vcpu)
+{
+ vcpu_sys_reg(vcpu, MDSCR_EL1) = vcpu->arch.guest_debug_preserved.mdscr_el1;
+
+ trace_kvm_arm_set_dreg32("Restored MDSCR_EL1",
+ vcpu_sys_reg(vcpu, MDSCR_EL1));
+}
+
+/**
+ * kvm_arm_init_debug - grab what we need for debug
+ *
+ * Currently the sole task of this function is to retrieve the initial
+ * value of mdcr_el2 so we can preserve MDCR_EL2.HPMN which has
+ * presumably been set-up by some knowledgeable bootcode.
+ *
+ * It is called once per-cpu during CPU hyp initialisation.
+ */
+
+void kvm_arm_init_debug(void)
+{
+ __this_cpu_write(mdcr_el2, kvm_call_hyp(__kvm_get_mdcr_el2));
+}
+
+/**
+ * kvm_arm_reset_debug_ptr - reset the debug ptr to point to the vcpu state
+ */
+
+void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.debug_ptr = &vcpu->arch.vcpu_debug_state;
+}
+
+/**
+ * kvm_arm_setup_debug - set up debug related stuff
+ *
+ * @vcpu: the vcpu pointer
+ *
+ * This is called before each entry into the hypervisor to setup any
+ * debug related registers. Currently this just ensures we will trap
+ * access to:
+ * - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR)
+ * - Debug ROM Address (MDCR_EL2_TDRA)
+ * - OS related registers (MDCR_EL2_TDOSA)
+ *
+ * Additionally, KVM only traps guest accesses to the debug registers if
+ * the guest is not actively using them (see the KVM_ARM64_DEBUG_DIRTY
+ * flag on vcpu->arch.debug_flags). Since the guest must not interfere
+ * with the hardware state when debugging the guest, we must ensure that
+ * trapping is enabled whenever we are debugging the guest using the
+ * debug registers.
+ */
+
+void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
+{
+ bool trap_debug = !(vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY);
+
+ trace_kvm_arm_setup_debug(vcpu, vcpu->guest_debug);
+
+ vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
+ vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
+ MDCR_EL2_TPMCR |
+ MDCR_EL2_TDRA |
+ MDCR_EL2_TDOSA);
+
+ /* Is Guest debugging in effect? */
+ if (vcpu->guest_debug) {
+ /* Route all software debug exceptions to EL2 */
+ vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE;
+
+ /* Save guest debug state */
+ save_guest_debug_regs(vcpu);
+
+ /*
+ * Single Step (ARM ARM D2.12.3 The software step state
+ * machine)
+ *
+ * If we are doing Single Step we need to manipulate
+ * the guest's MDSCR_EL1.SS and PSTATE.SS. Once the
+ * step has occurred the hypervisor will trap the
+ * debug exception and we return to userspace.
+ *
+ * If the guest attempts to single step its userspace
+ * we would have to deal with a trapped exception
+ * while in the guest kernel. Because this would be
+ * hard to unwind we suppress the guest's ability to
+ * do so by masking MDSCR_EL.SS.
+ *
+ * This confuses guest debuggers which use
+ * single-step behind the scenes but everything
+ * returns to normal once the host is no longer
+ * debugging the system.
+ */
+ if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
+ *vcpu_cpsr(vcpu) |= DBG_SPSR_SS;
+ vcpu_sys_reg(vcpu, MDSCR_EL1) |= DBG_MDSCR_SS;
+ } else {
+ vcpu_sys_reg(vcpu, MDSCR_EL1) &= ~DBG_MDSCR_SS;
+ }
+
+ trace_kvm_arm_set_dreg32("SPSR_EL2", *vcpu_cpsr(vcpu));
+
+ /*
+ * HW Breakpoints and watchpoints
+ *
+ * We simply switch the debug_ptr to point to our new
+ * external_debug_state which has been populated by the
+ * debug ioctl. The existing KVM_ARM64_DEBUG_DIRTY
+ * mechanism ensures the registers are updated on the
+ * world switch.
+ */
+ if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
+ /* Enable breakpoints/watchpoints */
+ vcpu_sys_reg(vcpu, MDSCR_EL1) |= DBG_MDSCR_MDE;
+
+ vcpu->arch.debug_ptr = &vcpu->arch.external_debug_state;
+ vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
+ trap_debug = true;
+
+ trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
+ &vcpu->arch.debug_ptr->dbg_bcr[0],
+ &vcpu->arch.debug_ptr->dbg_bvr[0]);
+
+ trace_kvm_arm_set_regset("WAPTS", get_num_wrps(),
+ &vcpu->arch.debug_ptr->dbg_wcr[0],
+ &vcpu->arch.debug_ptr->dbg_wvr[0]);
+ }
+ }
+
+ BUG_ON(!vcpu->guest_debug &&
+ vcpu->arch.debug_ptr != &vcpu->arch.vcpu_debug_state);
+
+ /* Trap debug register access */
+ if (trap_debug)
+ vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
+
+ trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2);
+ trace_kvm_arm_set_dreg32("MDSCR_EL1", vcpu_sys_reg(vcpu, MDSCR_EL1));
+}
+
+void kvm_arm_clear_debug(struct kvm_vcpu *vcpu)
+{
+ trace_kvm_arm_clear_debug(vcpu->guest_debug);
+
+ if (vcpu->guest_debug) {
+ restore_guest_debug_regs(vcpu);
+
+ /*
+ * If we were using HW debug we need to restore the
+ * debug_ptr to the guest debug state.
+ */
+ if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
+ kvm_arm_reset_debug_ptr(vcpu);
+
+ trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
+ &vcpu->arch.debug_ptr->dbg_bcr[0],
+ &vcpu->arch.debug_ptr->dbg_bvr[0]);
+
+ trace_kvm_arm_set_regset("WAPTS", get_num_wrps(),
+ &vcpu->arch.debug_ptr->dbg_wcr[0],
+ &vcpu->arch.debug_ptr->dbg_wvr[0]);
+ }
+ }
+}
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 9535bd555..d250160d3 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -32,6 +32,8 @@
#include <asm/kvm_emulate.h>
#include <asm/kvm_coproc.h>
+#include "trace.h"
+
struct kvm_stats_debugfs_item debugfs_entries[] = {
{ NULL }
};
@@ -293,7 +295,8 @@ int __attribute_const__ kvm_target_cpu(void)
break;
};
- return -EINVAL;
+ /* Return a default generic target */
+ return KVM_ARM_TARGET_GENERIC_V8;
}
int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
@@ -331,3 +334,41 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
{
return -EINVAL;
}
+
+#define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
+ KVM_GUESTDBG_USE_SW_BP | \
+ KVM_GUESTDBG_USE_HW | \
+ KVM_GUESTDBG_SINGLESTEP)
+
+/**
+ * kvm_arch_vcpu_ioctl_set_guest_debug - set up guest debugging
+ * @kvm: pointer to the KVM struct
+ * @kvm_guest_debug: the ioctl data buffer
+ *
+ * This sets up and enables the VM for guest debugging. Userspace
+ * passes in a control flag to enable different debug types and
+ * potentially other architecture specific information in the rest of
+ * the structure.
+ */
+int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
+ struct kvm_guest_debug *dbg)
+{
+ trace_kvm_set_guest_debug(vcpu, dbg->control);
+
+ if (dbg->control & ~KVM_GUESTDBG_VALID_MASK)
+ return -EINVAL;
+
+ if (dbg->control & KVM_GUESTDBG_ENABLE) {
+ vcpu->guest_debug = dbg->control;
+
+ /* Hardware assisted Break and Watch points */
+ if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
+ vcpu->arch.external_debug_state = dbg->arch;
+ }
+
+ } else {
+ /* If not enabled clear all flags */
+ vcpu->guest_debug = 0;
+ }
+ return 0;
+}
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 524fa2567..68a0759b1 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -82,6 +82,45 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
return 1;
}
+/**
+ * kvm_handle_guest_debug - handle a debug exception instruction
+ *
+ * @vcpu: the vcpu pointer
+ * @run: access to the kvm_run structure for results
+ *
+ * We route all debug exceptions through the same handler. If both the
+ * guest and host are using the same debug facilities it will be up to
+ * userspace to re-inject the correct exception for guest delivery.
+ *
+ * @return: 0 (while setting run->exit_reason), -1 for error
+ */
+static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ u32 hsr = kvm_vcpu_get_hsr(vcpu);
+ int ret = 0;
+
+ run->exit_reason = KVM_EXIT_DEBUG;
+ run->debug.arch.hsr = hsr;
+
+ switch (hsr >> ESR_ELx_EC_SHIFT) {
+ case ESR_ELx_EC_WATCHPT_LOW:
+ run->debug.arch.far = vcpu->arch.fault.far_el2;
+ /* fall through */
+ case ESR_ELx_EC_SOFTSTP_LOW:
+ case ESR_ELx_EC_BREAKPT_LOW:
+ case ESR_ELx_EC_BKPT32:
+ case ESR_ELx_EC_BRK64:
+ break;
+ default:
+ kvm_err("%s: un-handled case hsr: %#08x\n",
+ __func__, (unsigned int) hsr);
+ ret = -1;
+ break;
+ }
+
+ return ret;
+}
+
static exit_handle_fn arm_exit_handlers[] = {
[ESR_ELx_EC_WFx] = kvm_handle_wfx,
[ESR_ELx_EC_CP15_32] = kvm_handle_cp15_32,
@@ -96,6 +135,11 @@ static exit_handle_fn arm_exit_handlers[] = {
[ESR_ELx_EC_SYS64] = kvm_handle_sys_reg,
[ESR_ELx_EC_IABT_LOW] = kvm_handle_guest_abort,
[ESR_ELx_EC_DABT_LOW] = kvm_handle_guest_abort,
+ [ESR_ELx_EC_SOFTSTP_LOW]= kvm_handle_guest_debug,
+ [ESR_ELx_EC_WATCHPT_LOW]= kvm_handle_guest_debug,
+ [ESR_ELx_EC_BREAKPT_LOW]= kvm_handle_guest_debug,
+ [ESR_ELx_EC_BKPT32] = kvm_handle_guest_debug,
+ [ESR_ELx_EC_BRK64] = kvm_handle_guest_debug,
};
static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index 3c6051cbf..e5836138e 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -230,199 +230,52 @@
stp x24, x25, [x3, #160]
.endm
-.macro save_debug
- // x2: base address for cpu context
- // x3: tmp register
-
- mrs x26, id_aa64dfr0_el1
- ubfx x24, x26, #12, #4 // Extract BRPs
- ubfx x25, x26, #20, #4 // Extract WRPs
- mov w26, #15
- sub w24, w26, w24 // How many BPs to skip
- sub w25, w26, w25 // How many WPs to skip
-
- add x3, x2, #CPU_SYSREG_OFFSET(DBGBCR0_EL1)
-
- adr x26, 1f
- add x26, x26, x24, lsl #2
- br x26
-1:
- mrs x20, dbgbcr15_el1
- mrs x19, dbgbcr14_el1
- mrs x18, dbgbcr13_el1
- mrs x17, dbgbcr12_el1
- mrs x16, dbgbcr11_el1
- mrs x15, dbgbcr10_el1
- mrs x14, dbgbcr9_el1
- mrs x13, dbgbcr8_el1
- mrs x12, dbgbcr7_el1
- mrs x11, dbgbcr6_el1
- mrs x10, dbgbcr5_el1
- mrs x9, dbgbcr4_el1
- mrs x8, dbgbcr3_el1
- mrs x7, dbgbcr2_el1
- mrs x6, dbgbcr1_el1
- mrs x5, dbgbcr0_el1
-
- adr x26, 1f
- add x26, x26, x24, lsl #2
- br x26
-
-1:
- str x20, [x3, #(15 * 8)]
- str x19, [x3, #(14 * 8)]
- str x18, [x3, #(13 * 8)]
- str x17, [x3, #(12 * 8)]
- str x16, [x3, #(11 * 8)]
- str x15, [x3, #(10 * 8)]
- str x14, [x3, #(9 * 8)]
- str x13, [x3, #(8 * 8)]
- str x12, [x3, #(7 * 8)]
- str x11, [x3, #(6 * 8)]
- str x10, [x3, #(5 * 8)]
- str x9, [x3, #(4 * 8)]
- str x8, [x3, #(3 * 8)]
- str x7, [x3, #(2 * 8)]
- str x6, [x3, #(1 * 8)]
- str x5, [x3, #(0 * 8)]
-
- add x3, x2, #CPU_SYSREG_OFFSET(DBGBVR0_EL1)
-
- adr x26, 1f
- add x26, x26, x24, lsl #2
- br x26
-1:
- mrs x20, dbgbvr15_el1
- mrs x19, dbgbvr14_el1
- mrs x18, dbgbvr13_el1
- mrs x17, dbgbvr12_el1
- mrs x16, dbgbvr11_el1
- mrs x15, dbgbvr10_el1
- mrs x14, dbgbvr9_el1
- mrs x13, dbgbvr8_el1
- mrs x12, dbgbvr7_el1
- mrs x11, dbgbvr6_el1
- mrs x10, dbgbvr5_el1
- mrs x9, dbgbvr4_el1
- mrs x8, dbgbvr3_el1
- mrs x7, dbgbvr2_el1
- mrs x6, dbgbvr1_el1
- mrs x5, dbgbvr0_el1
-
- adr x26, 1f
- add x26, x26, x24, lsl #2
- br x26
-
-1:
- str x20, [x3, #(15 * 8)]
- str x19, [x3, #(14 * 8)]
- str x18, [x3, #(13 * 8)]
- str x17, [x3, #(12 * 8)]
- str x16, [x3, #(11 * 8)]
- str x15, [x3, #(10 * 8)]
- str x14, [x3, #(9 * 8)]
- str x13, [x3, #(8 * 8)]
- str x12, [x3, #(7 * 8)]
- str x11, [x3, #(6 * 8)]
- str x10, [x3, #(5 * 8)]
- str x9, [x3, #(4 * 8)]
- str x8, [x3, #(3 * 8)]
- str x7, [x3, #(2 * 8)]
- str x6, [x3, #(1 * 8)]
- str x5, [x3, #(0 * 8)]
-
- add x3, x2, #CPU_SYSREG_OFFSET(DBGWCR0_EL1)
-
- adr x26, 1f
- add x26, x26, x25, lsl #2
- br x26
-1:
- mrs x20, dbgwcr15_el1
- mrs x19, dbgwcr14_el1
- mrs x18, dbgwcr13_el1
- mrs x17, dbgwcr12_el1
- mrs x16, dbgwcr11_el1
- mrs x15, dbgwcr10_el1
- mrs x14, dbgwcr9_el1
- mrs x13, dbgwcr8_el1
- mrs x12, dbgwcr7_el1
- mrs x11, dbgwcr6_el1
- mrs x10, dbgwcr5_el1
- mrs x9, dbgwcr4_el1
- mrs x8, dbgwcr3_el1
- mrs x7, dbgwcr2_el1
- mrs x6, dbgwcr1_el1
- mrs x5, dbgwcr0_el1
-
- adr x26, 1f
- add x26, x26, x25, lsl #2
- br x26
-
+.macro save_debug type
+ // x4: pointer to register set
+ // x5: number of registers to skip
+ // x6..x22 trashed
+
+ adr x22, 1f
+ add x22, x22, x5, lsl #2
+ br x22
1:
- str x20, [x3, #(15 * 8)]
- str x19, [x3, #(14 * 8)]
- str x18, [x3, #(13 * 8)]
- str x17, [x3, #(12 * 8)]
- str x16, [x3, #(11 * 8)]
- str x15, [x3, #(10 * 8)]
- str x14, [x3, #(9 * 8)]
- str x13, [x3, #(8 * 8)]
- str x12, [x3, #(7 * 8)]
- str x11, [x3, #(6 * 8)]
- str x10, [x3, #(5 * 8)]
- str x9, [x3, #(4 * 8)]
- str x8, [x3, #(3 * 8)]
- str x7, [x3, #(2 * 8)]
- str x6, [x3, #(1 * 8)]
- str x5, [x3, #(0 * 8)]
-
- add x3, x2, #CPU_SYSREG_OFFSET(DBGWVR0_EL1)
-
- adr x26, 1f
- add x26, x26, x25, lsl #2
- br x26
+ mrs x21, \type\()15_el1
+ mrs x20, \type\()14_el1
+ mrs x19, \type\()13_el1
+ mrs x18, \type\()12_el1
+ mrs x17, \type\()11_el1
+ mrs x16, \type\()10_el1
+ mrs x15, \type\()9_el1
+ mrs x14, \type\()8_el1
+ mrs x13, \type\()7_el1
+ mrs x12, \type\()6_el1
+ mrs x11, \type\()5_el1
+ mrs x10, \type\()4_el1
+ mrs x9, \type\()3_el1
+ mrs x8, \type\()2_el1
+ mrs x7, \type\()1_el1
+ mrs x6, \type\()0_el1
+
+ adr x22, 1f
+ add x22, x22, x5, lsl #2
+ br x22
1:
- mrs x20, dbgwvr15_el1
- mrs x19, dbgwvr14_el1
- mrs x18, dbgwvr13_el1
- mrs x17, dbgwvr12_el1
- mrs x16, dbgwvr11_el1
- mrs x15, dbgwvr10_el1
- mrs x14, dbgwvr9_el1
- mrs x13, dbgwvr8_el1
- mrs x12, dbgwvr7_el1
- mrs x11, dbgwvr6_el1
- mrs x10, dbgwvr5_el1
- mrs x9, dbgwvr4_el1
- mrs x8, dbgwvr3_el1
- mrs x7, dbgwvr2_el1
- mrs x6, dbgwvr1_el1
- mrs x5, dbgwvr0_el1
-
- adr x26, 1f
- add x26, x26, x25, lsl #2
- br x26
-
-1:
- str x20, [x3, #(15 * 8)]
- str x19, [x3, #(14 * 8)]
- str x18, [x3, #(13 * 8)]
- str x17, [x3, #(12 * 8)]
- str x16, [x3, #(11 * 8)]
- str x15, [x3, #(10 * 8)]
- str x14, [x3, #(9 * 8)]
- str x13, [x3, #(8 * 8)]
- str x12, [x3, #(7 * 8)]
- str x11, [x3, #(6 * 8)]
- str x10, [x3, #(5 * 8)]
- str x9, [x3, #(4 * 8)]
- str x8, [x3, #(3 * 8)]
- str x7, [x3, #(2 * 8)]
- str x6, [x3, #(1 * 8)]
- str x5, [x3, #(0 * 8)]
-
- mrs x21, mdccint_el1
- str x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
+ str x21, [x4, #(15 * 8)]
+ str x20, [x4, #(14 * 8)]
+ str x19, [x4, #(13 * 8)]
+ str x18, [x4, #(12 * 8)]
+ str x17, [x4, #(11 * 8)]
+ str x16, [x4, #(10 * 8)]
+ str x15, [x4, #(9 * 8)]
+ str x14, [x4, #(8 * 8)]
+ str x13, [x4, #(7 * 8)]
+ str x12, [x4, #(6 * 8)]
+ str x11, [x4, #(5 * 8)]
+ str x10, [x4, #(4 * 8)]
+ str x9, [x4, #(3 * 8)]
+ str x8, [x4, #(2 * 8)]
+ str x7, [x4, #(1 * 8)]
+ str x6, [x4, #(0 * 8)]
.endm
.macro restore_sysregs
@@ -467,195 +320,52 @@
msr mdscr_el1, x25
.endm
-.macro restore_debug
- // x2: base address for cpu context
- // x3: tmp register
-
- mrs x26, id_aa64dfr0_el1
- ubfx x24, x26, #12, #4 // Extract BRPs
- ubfx x25, x26, #20, #4 // Extract WRPs
- mov w26, #15
- sub w24, w26, w24 // How many BPs to skip
- sub w25, w26, w25 // How many WPs to skip
-
- add x3, x2, #CPU_SYSREG_OFFSET(DBGBCR0_EL1)
+.macro restore_debug type
+ // x4: pointer to register set
+ // x5: number of registers to skip
+ // x6..x22 trashed
- adr x26, 1f
- add x26, x26, x24, lsl #2
- br x26
-1:
- ldr x20, [x3, #(15 * 8)]
- ldr x19, [x3, #(14 * 8)]
- ldr x18, [x3, #(13 * 8)]
- ldr x17, [x3, #(12 * 8)]
- ldr x16, [x3, #(11 * 8)]
- ldr x15, [x3, #(10 * 8)]
- ldr x14, [x3, #(9 * 8)]
- ldr x13, [x3, #(8 * 8)]
- ldr x12, [x3, #(7 * 8)]
- ldr x11, [x3, #(6 * 8)]
- ldr x10, [x3, #(5 * 8)]
- ldr x9, [x3, #(4 * 8)]
- ldr x8, [x3, #(3 * 8)]
- ldr x7, [x3, #(2 * 8)]
- ldr x6, [x3, #(1 * 8)]
- ldr x5, [x3, #(0 * 8)]
-
- adr x26, 1f
- add x26, x26, x24, lsl #2
- br x26
-1:
- msr dbgbcr15_el1, x20
- msr dbgbcr14_el1, x19
- msr dbgbcr13_el1, x18
- msr dbgbcr12_el1, x17
- msr dbgbcr11_el1, x16
- msr dbgbcr10_el1, x15
- msr dbgbcr9_el1, x14
- msr dbgbcr8_el1, x13
- msr dbgbcr7_el1, x12
- msr dbgbcr6_el1, x11
- msr dbgbcr5_el1, x10
- msr dbgbcr4_el1, x9
- msr dbgbcr3_el1, x8
- msr dbgbcr2_el1, x7
- msr dbgbcr1_el1, x6
- msr dbgbcr0_el1, x5
-
- add x3, x2, #CPU_SYSREG_OFFSET(DBGBVR0_EL1)
-
- adr x26, 1f
- add x26, x26, x24, lsl #2
- br x26
-1:
- ldr x20, [x3, #(15 * 8)]
- ldr x19, [x3, #(14 * 8)]
- ldr x18, [x3, #(13 * 8)]
- ldr x17, [x3, #(12 * 8)]
- ldr x16, [x3, #(11 * 8)]
- ldr x15, [x3, #(10 * 8)]
- ldr x14, [x3, #(9 * 8)]
- ldr x13, [x3, #(8 * 8)]
- ldr x12, [x3, #(7 * 8)]
- ldr x11, [x3, #(6 * 8)]
- ldr x10, [x3, #(5 * 8)]
- ldr x9, [x3, #(4 * 8)]
- ldr x8, [x3, #(3 * 8)]
- ldr x7, [x3, #(2 * 8)]
- ldr x6, [x3, #(1 * 8)]
- ldr x5, [x3, #(0 * 8)]
-
- adr x26, 1f
- add x26, x26, x24, lsl #2
- br x26
-1:
- msr dbgbvr15_el1, x20
- msr dbgbvr14_el1, x19
- msr dbgbvr13_el1, x18
- msr dbgbvr12_el1, x17
- msr dbgbvr11_el1, x16
- msr dbgbvr10_el1, x15
- msr dbgbvr9_el1, x14
- msr dbgbvr8_el1, x13
- msr dbgbvr7_el1, x12
- msr dbgbvr6_el1, x11
- msr dbgbvr5_el1, x10
- msr dbgbvr4_el1, x9
- msr dbgbvr3_el1, x8
- msr dbgbvr2_el1, x7
- msr dbgbvr1_el1, x6
- msr dbgbvr0_el1, x5
-
- add x3, x2, #CPU_SYSREG_OFFSET(DBGWCR0_EL1)
-
- adr x26, 1f
- add x26, x26, x25, lsl #2
- br x26
+ adr x22, 1f
+ add x22, x22, x5, lsl #2
+ br x22
1:
- ldr x20, [x3, #(15 * 8)]
- ldr x19, [x3, #(14 * 8)]
- ldr x18, [x3, #(13 * 8)]
- ldr x17, [x3, #(12 * 8)]
- ldr x16, [x3, #(11 * 8)]
- ldr x15, [x3, #(10 * 8)]
- ldr x14, [x3, #(9 * 8)]
- ldr x13, [x3, #(8 * 8)]
- ldr x12, [x3, #(7 * 8)]
- ldr x11, [x3, #(6 * 8)]
- ldr x10, [x3, #(5 * 8)]
- ldr x9, [x3, #(4 * 8)]
- ldr x8, [x3, #(3 * 8)]
- ldr x7, [x3, #(2 * 8)]
- ldr x6, [x3, #(1 * 8)]
- ldr x5, [x3, #(0 * 8)]
-
- adr x26, 1f
- add x26, x26, x25, lsl #2
- br x26
+ ldr x21, [x4, #(15 * 8)]
+ ldr x20, [x4, #(14 * 8)]
+ ldr x19, [x4, #(13 * 8)]
+ ldr x18, [x4, #(12 * 8)]
+ ldr x17, [x4, #(11 * 8)]
+ ldr x16, [x4, #(10 * 8)]
+ ldr x15, [x4, #(9 * 8)]
+ ldr x14, [x4, #(8 * 8)]
+ ldr x13, [x4, #(7 * 8)]
+ ldr x12, [x4, #(6 * 8)]
+ ldr x11, [x4, #(5 * 8)]
+ ldr x10, [x4, #(4 * 8)]
+ ldr x9, [x4, #(3 * 8)]
+ ldr x8, [x4, #(2 * 8)]
+ ldr x7, [x4, #(1 * 8)]
+ ldr x6, [x4, #(0 * 8)]
+
+ adr x22, 1f
+ add x22, x22, x5, lsl #2
+ br x22
1:
- msr dbgwcr15_el1, x20
- msr dbgwcr14_el1, x19
- msr dbgwcr13_el1, x18
- msr dbgwcr12_el1, x17
- msr dbgwcr11_el1, x16
- msr dbgwcr10_el1, x15
- msr dbgwcr9_el1, x14
- msr dbgwcr8_el1, x13
- msr dbgwcr7_el1, x12
- msr dbgwcr6_el1, x11
- msr dbgwcr5_el1, x10
- msr dbgwcr4_el1, x9
- msr dbgwcr3_el1, x8
- msr dbgwcr2_el1, x7
- msr dbgwcr1_el1, x6
- msr dbgwcr0_el1, x5
-
- add x3, x2, #CPU_SYSREG_OFFSET(DBGWVR0_EL1)
-
- adr x26, 1f
- add x26, x26, x25, lsl #2
- br x26
-1:
- ldr x20, [x3, #(15 * 8)]
- ldr x19, [x3, #(14 * 8)]
- ldr x18, [x3, #(13 * 8)]
- ldr x17, [x3, #(12 * 8)]
- ldr x16, [x3, #(11 * 8)]
- ldr x15, [x3, #(10 * 8)]
- ldr x14, [x3, #(9 * 8)]
- ldr x13, [x3, #(8 * 8)]
- ldr x12, [x3, #(7 * 8)]
- ldr x11, [x3, #(6 * 8)]
- ldr x10, [x3, #(5 * 8)]
- ldr x9, [x3, #(4 * 8)]
- ldr x8, [x3, #(3 * 8)]
- ldr x7, [x3, #(2 * 8)]
- ldr x6, [x3, #(1 * 8)]
- ldr x5, [x3, #(0 * 8)]
-
- adr x26, 1f
- add x26, x26, x25, lsl #2
- br x26
-1:
- msr dbgwvr15_el1, x20
- msr dbgwvr14_el1, x19
- msr dbgwvr13_el1, x18
- msr dbgwvr12_el1, x17
- msr dbgwvr11_el1, x16
- msr dbgwvr10_el1, x15
- msr dbgwvr9_el1, x14
- msr dbgwvr8_el1, x13
- msr dbgwvr7_el1, x12
- msr dbgwvr6_el1, x11
- msr dbgwvr5_el1, x10
- msr dbgwvr4_el1, x9
- msr dbgwvr3_el1, x8
- msr dbgwvr2_el1, x7
- msr dbgwvr1_el1, x6
- msr dbgwvr0_el1, x5
-
- ldr x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
- msr mdccint_el1, x21
+ msr \type\()15_el1, x21
+ msr \type\()14_el1, x20
+ msr \type\()13_el1, x19
+ msr \type\()12_el1, x18
+ msr \type\()11_el1, x17
+ msr \type\()10_el1, x16
+ msr \type\()9_el1, x15
+ msr \type\()8_el1, x14
+ msr \type\()7_el1, x13
+ msr \type\()6_el1, x12
+ msr \type\()5_el1, x11
+ msr \type\()4_el1, x10
+ msr \type\()3_el1, x9
+ msr \type\()2_el1, x8
+ msr \type\()1_el1, x7
+ msr \type\()0_el1, x6
.endm
.macro skip_32bit_state tmp, target
@@ -675,6 +385,14 @@
tbz \tmp, #KVM_ARM64_DEBUG_DIRTY_SHIFT, \target
.endm
+/*
+ * Branch to target if CPTR_EL2.TFP bit is set (VFP/SIMD trapping enabled)
+ */
+.macro skip_fpsimd_state tmp, target
+ mrs \tmp, cptr_el2
+ tbnz \tmp, #CPTR_EL2_TFP_SHIFT, \target
+.endm
+
.macro compute_debug_state target
// Compute debug state: If any of KDE, MDE or KVM_ARM64_DEBUG_DIRTY
// is set, we do a full save/restore cycle and disable trapping.
@@ -713,20 +431,15 @@
add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
mrs x4, dacr32_el2
mrs x5, ifsr32_el2
- mrs x6, fpexc32_el2
stp x4, x5, [x3]
- str x6, [x3, #16]
- skip_debug_state x8, 2f
+ skip_fpsimd_state x8, 2f
+ mrs x6, fpexc32_el2
+ str x6, [x3, #16]
+2:
+ skip_debug_state x8, 1f
mrs x7, dbgvcr32_el2
str x7, [x3, #24]
-2:
- skip_tee_state x8, 1f
-
- add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
- mrs x4, teecr32_el1
- mrs x5, teehbr32_el1
- stp x4, x5, [x3]
1:
.endm
@@ -743,51 +456,46 @@
add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
ldp x4, x5, [x3]
- ldr x6, [x3, #16]
msr dacr32_el2, x4
msr ifsr32_el2, x5
- msr fpexc32_el2, x6
- skip_debug_state x8, 2f
+ skip_debug_state x8, 1f
ldr x7, [x3, #24]
msr dbgvcr32_el2, x7
-2:
- skip_tee_state x8, 1f
-
- add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
- ldp x4, x5, [x3]
- msr teecr32_el1, x4
- msr teehbr32_el1, x5
1:
.endm
.macro activate_traps
ldr x2, [x0, #VCPU_HCR_EL2]
+
+ /*
+ * We are about to set CPTR_EL2.TFP to trap all floating point
+ * register accesses to EL2, however, the ARM ARM clearly states that
+ * traps are only taken to EL2 if the operation would not otherwise
+ * trap to EL1. Therefore, always make sure that for 32-bit guests,
+ * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
+ */
+ tbnz x2, #HCR_RW_SHIFT, 99f // open code skip_32bit_state
+ mov x3, #(1 << 30)
+ msr fpexc32_el2, x3
+ isb
+99:
msr hcr_el2, x2
mov x2, #CPTR_EL2_TTA
+ orr x2, x2, #CPTR_EL2_TFP
msr cptr_el2, x2
mov x2, #(1 << 15) // Trap CP15 Cr=15
msr hstr_el2, x2
- mrs x2, mdcr_el2
- and x2, x2, #MDCR_EL2_HPMN_MASK
- orr x2, x2, #(MDCR_EL2_TPM | MDCR_EL2_TPMCR)
- orr x2, x2, #(MDCR_EL2_TDRA | MDCR_EL2_TDOSA)
-
- // Check for KVM_ARM64_DEBUG_DIRTY, and set debug to trap
- // if not dirty.
- ldr x3, [x0, #VCPU_DEBUG_FLAGS]
- tbnz x3, #KVM_ARM64_DEBUG_DIRTY_SHIFT, 1f
- orr x2, x2, #MDCR_EL2_TDA
-1:
+ // Monitor Debug Config - see kvm_arm_setup_debug()
+ ldr x2, [x0, #VCPU_MDCR_EL2]
msr mdcr_el2, x2
.endm
.macro deactivate_traps
mov x2, #HCR_RW
msr hcr_el2, x2
- msr cptr_el2, xzr
msr hstr_el2, xzr
mrs x2, mdcr_el2
@@ -810,7 +518,11 @@
* Call into the vgic backend for state saving
*/
.macro save_vgic_state
- alternative_insn "bl __save_vgic_v2_state", "bl __save_vgic_v3_state", ARM64_HAS_SYSREG_GIC_CPUIF
+alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF
+ bl __save_vgic_v2_state
+alternative_else
+ bl __save_vgic_v3_state
+alternative_endif
mrs x24, hcr_el2
mov x25, #HCR_INT_OVERRIDE
neg x25, x25
@@ -827,7 +539,11 @@
orr x24, x24, #HCR_INT_OVERRIDE
orr x24, x24, x25
msr hcr_el2, x24
- alternative_insn "bl __restore_vgic_v2_state", "bl __restore_vgic_v3_state", ARM64_HAS_SYSREG_GIC_CPUIF
+alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF
+ bl __restore_vgic_v2_state
+alternative_else
+ bl __restore_vgic_v3_state
+alternative_endif
.endm
.macro save_timer_state
@@ -893,21 +609,101 @@ __restore_sysregs:
restore_sysregs
ret
+/* Save debug state */
__save_debug:
- save_debug
+ // x2: ptr to CPU context
+ // x3: ptr to debug reg struct
+ // x4/x5/x6-22/x24-26: trashed
+
+ mrs x26, id_aa64dfr0_el1
+ ubfx x24, x26, #12, #4 // Extract BRPs
+ ubfx x25, x26, #20, #4 // Extract WRPs
+ mov w26, #15
+ sub w24, w26, w24 // How many BPs to skip
+ sub w25, w26, w25 // How many WPs to skip
+
+ mov x5, x24
+ add x4, x3, #DEBUG_BCR
+ save_debug dbgbcr
+ add x4, x3, #DEBUG_BVR
+ save_debug dbgbvr
+
+ mov x5, x25
+ add x4, x3, #DEBUG_WCR
+ save_debug dbgwcr
+ add x4, x3, #DEBUG_WVR
+ save_debug dbgwvr
+
+ mrs x21, mdccint_el1
+ str x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
ret
+/* Restore debug state */
__restore_debug:
- restore_debug
+ // x2: ptr to CPU context
+ // x3: ptr to debug reg struct
+ // x4/x5/x6-22/x24-26: trashed
+
+ mrs x26, id_aa64dfr0_el1
+ ubfx x24, x26, #12, #4 // Extract BRPs
+ ubfx x25, x26, #20, #4 // Extract WRPs
+ mov w26, #15
+ sub w24, w26, w24 // How many BPs to skip
+ sub w25, w26, w25 // How many WPs to skip
+
+ mov x5, x24
+ add x4, x3, #DEBUG_BCR
+ restore_debug dbgbcr
+ add x4, x3, #DEBUG_BVR
+ restore_debug dbgbvr
+
+ mov x5, x25
+ add x4, x3, #DEBUG_WCR
+ restore_debug dbgwcr
+ add x4, x3, #DEBUG_WVR
+ restore_debug dbgwvr
+
+ ldr x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
+ msr mdccint_el1, x21
+
ret
__save_fpsimd:
+ skip_fpsimd_state x3, 1f
save_fpsimd
- ret
+1: ret
__restore_fpsimd:
+ skip_fpsimd_state x3, 1f
restore_fpsimd
- ret
+1: ret
+
+switch_to_guest_fpsimd:
+ push x4, lr
+
+ mrs x2, cptr_el2
+ bic x2, x2, #CPTR_EL2_TFP
+ msr cptr_el2, x2
+ isb
+
+ mrs x0, tpidr_el2
+
+ ldr x2, [x0, #VCPU_HOST_CONTEXT]
+ kern_hyp_va x2
+ bl __save_fpsimd
+
+ add x2, x0, #VCPU_CONTEXT
+ bl __restore_fpsimd
+
+ skip_32bit_state x3, 1f
+ ldr x4, [x2, #CPU_SYSREG_OFFSET(FPEXC32_EL2)]
+ msr fpexc32_el2, x4
+1:
+ pop x4, lr
+ pop x2, x3
+ pop x0, x1
+
+ eret
/*
* u64 __kvm_vcpu_run(struct kvm_vcpu *vcpu);
@@ -929,10 +725,10 @@ ENTRY(__kvm_vcpu_run)
kern_hyp_va x2
save_host_regs
- bl __save_fpsimd
bl __save_sysregs
compute_debug_state 1f
+ add x3, x0, #VCPU_HOST_DEBUG_STATE
bl __save_debug
1:
activate_traps
@@ -948,9 +744,10 @@ ENTRY(__kvm_vcpu_run)
// to Cortex-A57 erratum #852523.
restore_guest_32bit_state
bl __restore_sysregs
- bl __restore_fpsimd
skip_debug_state x3, 1f
+ ldr x3, [x0, #VCPU_DEBUG_PTR]
+ kern_hyp_va x3
bl __restore_debug
1:
restore_guest_regs
@@ -970,6 +767,8 @@ __kvm_vcpu_return:
bl __save_sysregs
skip_debug_state x3, 1f
+ ldr x3, [x0, #VCPU_DEBUG_PTR]
+ kern_hyp_va x3
bl __save_debug
1:
save_guest_32bit_state
@@ -986,12 +785,15 @@ __kvm_vcpu_return:
bl __restore_sysregs
bl __restore_fpsimd
+ /* Clear FPSIMD and Trace trapping */
+ msr cptr_el2, xzr
skip_debug_state x3, 1f
// Clear the dirty flag for the next run, as all the state has
// already been saved. Note that we nuke the whole 64bit word.
// If we ever add more flags, we'll have to be more careful...
str xzr, [x0, #VCPU_DEBUG_FLAGS]
+ add x3, x0, #VCPU_HOST_DEBUG_STATE
bl __restore_debug
1:
restore_host_regs
@@ -1194,6 +996,11 @@ el1_trap:
* x1: ESR
* x2: ESR_EC
*/
+
+ /* Guest accessed VFP/SIMD registers, save host, restore Guest */
+ cmp x2, #ESR_ELx_EC_FP_ASIMD
+ b.eq switch_to_guest_fpsimd
+
cmp x2, #ESR_ELx_EC_DABT_LOW
mov x0, #ESR_ELx_EC_IABT_LOW
ccmp x2, x0, #4, ne
@@ -1288,4 +1095,10 @@ ENTRY(__kvm_hyp_vector)
ventry el1_error_invalid // Error 32-bit EL1
ENDPROC(__kvm_hyp_vector)
+
+ENTRY(__kvm_get_mdcr_el2)
+ mrs x0, mdcr_el2
+ ret
+ENDPROC(__kvm_get_mdcr_el2)
+
.popsection
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 0b4326578..91cf5350b 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -22,6 +22,7 @@
#include <linux/errno.h>
#include <linux/kvm_host.h>
#include <linux/kvm.h>
+#include <linux/hw_breakpoint.h>
#include <kvm/arm_arch_timer.h>
@@ -56,6 +57,12 @@ static bool cpu_has_32bit_el1(void)
return !!(pfr0 & 0x20);
}
+/**
+ * kvm_arch_dev_ioctl_check_extension
+ *
+ * We currently assume that the number of HW registers is uniform
+ * across all CPUs (see cpuinfo_sanity_check).
+ */
int kvm_arch_dev_ioctl_check_extension(long ext)
{
int r;
@@ -64,6 +71,15 @@ int kvm_arch_dev_ioctl_check_extension(long ext)
case KVM_CAP_ARM_EL1_32BIT:
r = cpu_has_32bit_el1();
break;
+ case KVM_CAP_GUEST_DEBUG_HW_BPS:
+ r = get_num_brps();
+ break;
+ case KVM_CAP_GUEST_DEBUG_HW_WPS:
+ r = get_num_wrps();
+ break;
+ case KVM_CAP_SET_GUEST_DEBUG:
+ r = 1;
+ break;
default:
r = 0;
}
@@ -105,7 +121,5 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
kvm_reset_sys_regs(vcpu);
/* Reset timer */
- kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq);
-
- return 0;
+ return kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq);
}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index c370b4014..d03d3af17 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -38,6 +38,8 @@
#include "sys_regs.h"
+#include "trace.h"
+
/*
* All of this file is extremly similar to the ARM coproc.c, but the
* types are different. My gut feeling is that it should be pretty
@@ -208,9 +210,217 @@ static bool trap_debug_regs(struct kvm_vcpu *vcpu,
*vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, r->reg);
}
+ trace_trap_reg(__func__, r->reg, p->is_write, *vcpu_reg(vcpu, p->Rt));
+
+ return true;
+}
+
+/*
+ * reg_to_dbg/dbg_to_reg
+ *
+ * A 32 bit write to a debug register leave top bits alone
+ * A 32 bit read from a debug register only returns the bottom bits
+ *
+ * All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the
+ * hyp.S code switches between host and guest values in future.
+ */
+static inline void reg_to_dbg(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ u64 *dbg_reg)
+{
+ u64 val = *vcpu_reg(vcpu, p->Rt);
+
+ if (p->is_32bit) {
+ val &= 0xffffffffUL;
+ val |= ((*dbg_reg >> 32) << 32);
+ }
+
+ *dbg_reg = val;
+ vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
+}
+
+static inline void dbg_to_reg(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ u64 *dbg_reg)
+{
+ u64 val = *dbg_reg;
+
+ if (p->is_32bit)
+ val &= 0xffffffffUL;
+
+ *vcpu_reg(vcpu, p->Rt) = val;
+}
+
+static inline bool trap_bvr(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ const struct sys_reg_desc *rd)
+{
+ u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
+
+ if (p->is_write)
+ reg_to_dbg(vcpu, p, dbg_reg);
+ else
+ dbg_to_reg(vcpu, p, dbg_reg);
+
+ trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
+
return true;
}
+static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
+{
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
+
+ if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
+ return -EFAULT;
+ return 0;
+}
+
+static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
+{
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
+
+ if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
+ return -EFAULT;
+ return 0;
+}
+
+static inline void reset_bvr(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val;
+}
+
+static inline bool trap_bcr(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ const struct sys_reg_desc *rd)
+{
+ u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
+
+ if (p->is_write)
+ reg_to_dbg(vcpu, p, dbg_reg);
+ else
+ dbg_to_reg(vcpu, p, dbg_reg);
+
+ trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
+
+ return true;
+}
+
+static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
+{
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
+
+ if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
+ return -EFAULT;
+
+ return 0;
+}
+
+static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
+{
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
+
+ if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
+ return -EFAULT;
+ return 0;
+}
+
+static inline void reset_bcr(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val;
+}
+
+static inline bool trap_wvr(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ const struct sys_reg_desc *rd)
+{
+ u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
+
+ if (p->is_write)
+ reg_to_dbg(vcpu, p, dbg_reg);
+ else
+ dbg_to_reg(vcpu, p, dbg_reg);
+
+ trace_trap_reg(__func__, rd->reg, p->is_write,
+ vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]);
+
+ return true;
+}
+
+static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
+{
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
+
+ if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
+ return -EFAULT;
+ return 0;
+}
+
+static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
+{
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
+
+ if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
+ return -EFAULT;
+ return 0;
+}
+
+static inline void reset_wvr(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val;
+}
+
+static inline bool trap_wcr(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ const struct sys_reg_desc *rd)
+{
+ u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
+
+ if (p->is_write)
+ reg_to_dbg(vcpu, p, dbg_reg);
+ else
+ dbg_to_reg(vcpu, p, dbg_reg);
+
+ trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
+
+ return true;
+}
+
+static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
+{
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
+
+ if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
+ return -EFAULT;
+ return 0;
+}
+
+static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
+{
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
+
+ if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
+ return -EFAULT;
+ return 0;
+}
+
+static inline void reset_wcr(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val;
+}
+
static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
u64 amair;
@@ -240,16 +450,16 @@ static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
/* DBGBVRn_EL1 */ \
{ Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b100), \
- trap_debug_regs, reset_val, (DBGBVR0_EL1 + (n)), 0 }, \
+ trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr }, \
/* DBGBCRn_EL1 */ \
{ Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b101), \
- trap_debug_regs, reset_val, (DBGBCR0_EL1 + (n)), 0 }, \
+ trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr }, \
/* DBGWVRn_EL1 */ \
{ Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b110), \
- trap_debug_regs, reset_val, (DBGWVR0_EL1 + (n)), 0 }, \
+ trap_wvr, reset_wvr, n, 0, get_wvr, set_wvr }, \
/* DBGWCRn_EL1 */ \
{ Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111), \
- trap_debug_regs, reset_val, (DBGWCR0_EL1 + (n)), 0 }
+ trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr }
/*
* Architected system registers.
@@ -329,13 +539,6 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b110),
trap_dbgauthstatus_el1 },
- /* TEECR32_EL1 */
- { Op0(0b10), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
- NULL, reset_val, TEECR32_EL1, 0 },
- /* TEEHBR32_EL1 */
- { Op0(0b10), Op1(0b010), CRn(0b0001), CRm(0b0000), Op2(0b000),
- NULL, reset_val, TEEHBR32_EL1, 0 },
-
/* MDCCSR_EL1 */
{ Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0001), Op2(0b000),
trap_raz_wi },
@@ -516,28 +719,57 @@ static bool trap_debug32(struct kvm_vcpu *vcpu,
return true;
}
-#define DBG_BCR_BVR_WCR_WVR(n) \
- /* DBGBVRn */ \
- { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_debug32, \
- NULL, (cp14_DBGBVR0 + (n) * 2) }, \
- /* DBGBCRn */ \
- { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_debug32, \
- NULL, (cp14_DBGBCR0 + (n) * 2) }, \
- /* DBGWVRn */ \
- { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_debug32, \
- NULL, (cp14_DBGWVR0 + (n) * 2) }, \
- /* DBGWCRn */ \
- { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_debug32, \
- NULL, (cp14_DBGWCR0 + (n) * 2) }
-
-#define DBGBXVR(n) \
- { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_debug32, \
- NULL, cp14_DBGBXVR0 + n * 2 }
+/* AArch32 debug register mappings
+ *
+ * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
+ * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
+ *
+ * All control registers and watchpoint value registers are mapped to
+ * the lower 32 bits of their AArch64 equivalents. We share the trap
+ * handlers with the above AArch64 code which checks what mode the
+ * system is in.
+ */
+
+static inline bool trap_xvr(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ const struct sys_reg_desc *rd)
+{
+ u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
+
+ if (p->is_write) {
+ u64 val = *dbg_reg;
+
+ val &= 0xffffffffUL;
+ val |= *vcpu_reg(vcpu, p->Rt) << 32;
+ *dbg_reg = val;
+
+ vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
+ } else {
+ *vcpu_reg(vcpu, p->Rt) = *dbg_reg >> 32;
+ }
+
+ trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
+
+ return true;
+}
+
+#define DBG_BCR_BVR_WCR_WVR(n) \
+ /* DBGBVRn */ \
+ { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
+ /* DBGBCRn */ \
+ { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \
+ /* DBGWVRn */ \
+ { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \
+ /* DBGWCRn */ \
+ { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
+
+#define DBGBXVR(n) \
+ { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_xvr, NULL, n }
/*
* Trapped cp14 registers. We generally ignore most of the external
* debug, on the principle that they don't really make sense to a
- * guest. Revisit this one day, whould this principle change.
+ * guest. Revisit this one day, would this principle change.
*/
static const struct sys_reg_desc cp14_regs[] = {
/* DBGIDR */
@@ -999,6 +1231,8 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
struct sys_reg_params params;
unsigned long esr = kvm_vcpu_get_hsr(vcpu);
+ trace_kvm_handle_sys_reg(esr);
+
params.is_aarch32 = false;
params.is_32bit = false;
params.Op0 = (esr >> 20) & 3;
@@ -1303,6 +1537,9 @@ int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg
if (!r)
return get_invariant_sys_reg(reg->id, uaddr);
+ if (r->get_user)
+ return (r->get_user)(vcpu, r, reg, uaddr);
+
return reg_to_user(uaddr, &vcpu_sys_reg(vcpu, r->reg), reg->id);
}
@@ -1321,6 +1558,9 @@ int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg
if (!r)
return set_invariant_sys_reg(reg->id, uaddr);
+ if (r->set_user)
+ return (r->set_user)(vcpu, r, reg, uaddr);
+
return reg_from_user(&vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
}
diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h
index d411e2514..eaa324e4d 100644
--- a/arch/arm64/kvm/sys_regs.h
+++ b/arch/arm64/kvm/sys_regs.h
@@ -55,6 +55,12 @@ struct sys_reg_desc {
/* Value (usually reset value) */
u64 val;
+
+ /* Custom get/set_user functions, fallback to generic if NULL */
+ int (*get_user)(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr);
+ int (*set_user)(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr);
};
static inline void print_sys_reg_instr(const struct sys_reg_params *p)
diff --git a/arch/arm64/kvm/sys_regs_generic_v8.c b/arch/arm64/kvm/sys_regs_generic_v8.c
index 475fd2929..1e4576824 100644
--- a/arch/arm64/kvm/sys_regs_generic_v8.c
+++ b/arch/arm64/kvm/sys_regs_generic_v8.c
@@ -94,6 +94,8 @@ static int __init sys_reg_genericv8_init(void)
&genericv8_target_table);
kvm_register_target_sys_reg_table(KVM_ARM_TARGET_XGENE_POTENZA,
&genericv8_target_table);
+ kvm_register_target_sys_reg_table(KVM_ARM_TARGET_GENERIC_V8,
+ &genericv8_target_table);
return 0;
}
diff --git a/arch/arm64/kvm/trace.h b/arch/arm64/kvm/trace.h
index 157416e96..7fb0008c4 100644
--- a/arch/arm64/kvm/trace.h
+++ b/arch/arm64/kvm/trace.h
@@ -44,6 +44,129 @@ TRACE_EVENT(kvm_hvc_arm64,
__entry->vcpu_pc, __entry->r0, __entry->imm)
);
+TRACE_EVENT(kvm_arm_setup_debug,
+ TP_PROTO(struct kvm_vcpu *vcpu, __u32 guest_debug),
+ TP_ARGS(vcpu, guest_debug),
+
+ TP_STRUCT__entry(
+ __field(struct kvm_vcpu *, vcpu)
+ __field(__u32, guest_debug)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu = vcpu;
+ __entry->guest_debug = guest_debug;
+ ),
+
+ TP_printk("vcpu: %p, flags: 0x%08x", __entry->vcpu, __entry->guest_debug)
+);
+
+TRACE_EVENT(kvm_arm_clear_debug,
+ TP_PROTO(__u32 guest_debug),
+ TP_ARGS(guest_debug),
+
+ TP_STRUCT__entry(
+ __field(__u32, guest_debug)
+ ),
+
+ TP_fast_assign(
+ __entry->guest_debug = guest_debug;
+ ),
+
+ TP_printk("flags: 0x%08x", __entry->guest_debug)
+);
+
+TRACE_EVENT(kvm_arm_set_dreg32,
+ TP_PROTO(const char *name, __u32 value),
+ TP_ARGS(name, value),
+
+ TP_STRUCT__entry(
+ __field(const char *, name)
+ __field(__u32, value)
+ ),
+
+ TP_fast_assign(
+ __entry->name = name;
+ __entry->value = value;
+ ),
+
+ TP_printk("%s: 0x%08x", __entry->name, __entry->value)
+);
+
+TRACE_EVENT(kvm_arm_set_regset,
+ TP_PROTO(const char *type, int len, __u64 *control, __u64 *value),
+ TP_ARGS(type, len, control, value),
+ TP_STRUCT__entry(
+ __field(const char *, name)
+ __field(int, len)
+ __array(u64, ctrls, 16)
+ __array(u64, values, 16)
+ ),
+ TP_fast_assign(
+ __entry->name = type;
+ __entry->len = len;
+ memcpy(__entry->ctrls, control, len << 3);
+ memcpy(__entry->values, value, len << 3);
+ ),
+ TP_printk("%d %s CTRL:%s VALUE:%s", __entry->len, __entry->name,
+ __print_array(__entry->ctrls, __entry->len, sizeof(__u64)),
+ __print_array(__entry->values, __entry->len, sizeof(__u64)))
+);
+
+TRACE_EVENT(trap_reg,
+ TP_PROTO(const char *fn, int reg, bool is_write, u64 write_value),
+ TP_ARGS(fn, reg, is_write, write_value),
+
+ TP_STRUCT__entry(
+ __field(const char *, fn)
+ __field(int, reg)
+ __field(bool, is_write)
+ __field(u64, write_value)
+ ),
+
+ TP_fast_assign(
+ __entry->fn = fn;
+ __entry->reg = reg;
+ __entry->is_write = is_write;
+ __entry->write_value = write_value;
+ ),
+
+ TP_printk("%s %s reg %d (0x%08llx)", __entry->fn, __entry->is_write?"write to":"read from", __entry->reg, __entry->write_value)
+);
+
+TRACE_EVENT(kvm_handle_sys_reg,
+ TP_PROTO(unsigned long hsr),
+ TP_ARGS(hsr),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, hsr)
+ ),
+
+ TP_fast_assign(
+ __entry->hsr = hsr;
+ ),
+
+ TP_printk("HSR 0x%08lx", __entry->hsr)
+);
+
+TRACE_EVENT(kvm_set_guest_debug,
+ TP_PROTO(struct kvm_vcpu *vcpu, __u32 guest_debug),
+ TP_ARGS(vcpu, guest_debug),
+
+ TP_STRUCT__entry(
+ __field(struct kvm_vcpu *, vcpu)
+ __field(__u32, guest_debug)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu = vcpu;
+ __entry->guest_debug = guest_debug;
+ ),
+
+ TP_printk("vcpu: %p, flags: 0x%08x", __entry->vcpu, __entry->guest_debug)
+);
+
+
#endif /* _TRACE_ARM64_KVM_H */
#undef TRACE_INCLUDE_PATH