summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorAndré Fabian Silva Delgado <emulatorman@parabola.nu>2015-10-27 13:37:40 -0300
committerAndré Fabian Silva Delgado <emulatorman@parabola.nu>2015-10-27 13:37:40 -0300
commitbadc0e42604c4465facd81d2323e98b3b1eb9188 (patch)
treee61811e7ddf70517f4b546d522ba3b630b38f50f /arch
parentf19866e5cb04f8f007cb422132ea4611cdacc8e3 (diff)
Linux-libre 4.2.4-gnupck-4.2.4-gnu
Diffstat (limited to 'arch')
-rw-r--r--arch/arc/plat-axs10x/axs10x.c2
-rw-r--r--arch/arm/Makefile8
-rw-r--r--arch/arm/boot/dts/exynos5420.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-rex.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-beagle.dts2
-rw-r--r--arch/arm/boot/dts/omap5-uevm.dts4
-rw-r--r--arch/arm/boot/dts/sun7i-a20.dtsi2
-rw-r--r--arch/arm/kernel/kgdb.c8
-rw-r--r--arch/arm/kernel/perf_event.c4
-rw-r--r--arch/arm/kernel/signal.c15
-rw-r--r--arch/arm/kvm/interrupts_head.S6
-rw-r--r--arch/arm/kvm/mmu.c6
-rw-r--r--arch/arm/mach-exynos/mcpm-exynos.c27
-rw-r--r--arch/arm/mach-exynos/regs-pmu.h6
-rw-r--r--arch/arm/plat-pxa/ssp.c1
-rw-r--r--arch/arm64/kernel/efi.c3
-rw-r--r--arch/arm64/kernel/entry-ftrace.S22
-rw-r--r--arch/arm64/mm/fault.c1
-rw-r--r--arch/m68k/include/asm/linkage.h30
-rw-r--r--arch/mips/kernel/cps-vec.S12
-rw-r--r--arch/mips/kernel/setup.c10
-rw-r--r--arch/mips/loongson64/common/env.c3
-rw-r--r--arch/mips/mm/dma-default.c2
-rw-r--r--arch/mips/net/bpf_jit_asm.S50
-rw-r--r--arch/powerpc/kvm/book3s.c6
-rw-r--r--arch/powerpc/kvm/book3s_hv.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S1
-rw-r--r--arch/powerpc/platforms/pasemi/msi.c5
-rw-r--r--arch/powerpc/platforms/powernv/pci.c5
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c5
-rw-r--r--arch/powerpc/sysdev/mpic_u3msi.c5
-rw-r--r--arch/powerpc/sysdev/ppc4xx_msi.c5
-rw-r--r--arch/s390/boot/compressed/Makefile2
-rw-r--r--arch/s390/kernel/compat_signal.c27
-rw-r--r--arch/x86/entry/entry_64.S16
-rw-r--r--arch/x86/include/asm/msr-index.h1
-rw-r--r--arch/x86/include/asm/preempt.h4
-rw-r--r--arch/x86/include/asm/qspinlock.h19
-rw-r--r--arch/x86/kernel/alternative.c5
-rw-r--r--arch/x86/kernel/apic/apic.c7
-rw-r--r--arch/x86/kernel/apic/io_apic.c5
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c5
-rw-r--r--arch/x86/kernel/crash.c7
-rw-r--r--arch/x86/kernel/paravirt.c16
-rw-r--r--arch/x86/kernel/process_64.c52
-rw-r--r--arch/x86/kernel/tsc.c17
-rw-r--r--arch/x86/kvm/svm.c111
-rw-r--r--arch/x86/kvm/vmx.c13
-rw-r--r--arch/x86/kvm/x86.c2
-rw-r--r--arch/x86/mm/init_64.c2
-rw-r--r--arch/x86/pci/intel_mid_pci.c24
-rw-r--r--arch/x86/platform/efi/efi.c67
-rw-r--r--arch/x86/xen/enlighten.c23
53 files changed, 489 insertions, 198 deletions
diff --git a/arch/arc/plat-axs10x/axs10x.c b/arch/arc/plat-axs10x/axs10x.c
index e7769c3ab..ac79491ee 100644
--- a/arch/arc/plat-axs10x/axs10x.c
+++ b/arch/arc/plat-axs10x/axs10x.c
@@ -402,6 +402,8 @@ static void __init axs103_early_init(void)
unsigned int num_cores = (read_aux_reg(ARC_REG_MCIP_BCR) >> 16) & 0x3F;
if (num_cores > 2)
arc_set_core_freq(50 * 1000000);
+ else if (num_cores == 2)
+ arc_set_core_freq(75 * 1000000);
#endif
switch (arc_get_core_freq()/1000000) {
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 7451b447c..2c2b28ee4 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -54,6 +54,14 @@ AS += -EL
LD += -EL
endif
+#
+# The Scalar Replacement of Aggregates (SRA) optimization pass in GCC 4.9 and
+# later may result in code being generated that handles signed short and signed
+# char struct members incorrectly. So disable it.
+# (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65932)
+#
+KBUILD_CFLAGS += $(call cc-option,-fno-ipa-sra)
+
# This selects which instruction set is used.
# Note that GCC does not numerically define an architecture version
# macro, but instead defines a whole series of macros which makes
diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi
index 534f27ceb..fa8107dec 100644
--- a/arch/arm/boot/dts/exynos5420.dtsi
+++ b/arch/arm/boot/dts/exynos5420.dtsi
@@ -1118,7 +1118,7 @@
interrupt-parent = <&combiner>;
interrupts = <3 0>;
clock-names = "sysmmu", "master";
- clocks = <&clock CLK_SMMU_FIMD1M0>, <&clock CLK_FIMD1>;
+ clocks = <&clock CLK_SMMU_FIMD1M1>, <&clock CLK_FIMD1>;
power-domains = <&disp_pd>;
#iommu-cells = <0>;
};
diff --git a/arch/arm/boot/dts/imx6qdl-rex.dtsi b/arch/arm/boot/dts/imx6qdl-rex.dtsi
index 3373fd958..a50356243 100644
--- a/arch/arm/boot/dts/imx6qdl-rex.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-rex.dtsi
@@ -35,7 +35,6 @@
compatible = "regulator-fixed";
reg = <1>;
pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_usbh1>;
regulator-name = "usbh1_vbus";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
@@ -47,7 +46,6 @@
compatible = "regulator-fixed";
reg = <2>;
pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_usbotg>;
regulator-name = "usb_otg_vbus";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
diff --git a/arch/arm/boot/dts/omap3-beagle.dts b/arch/arm/boot/dts/omap3-beagle.dts
index a5474113c..67659a0ed 100644
--- a/arch/arm/boot/dts/omap3-beagle.dts
+++ b/arch/arm/boot/dts/omap3-beagle.dts
@@ -202,7 +202,7 @@
tfp410_pins: pinmux_tfp410_pins {
pinctrl-single,pins = <
- 0x194 (PIN_OUTPUT | MUX_MODE4) /* hdq_sio.gpio_170 */
+ 0x196 (PIN_OUTPUT | MUX_MODE4) /* hdq_sio.gpio_170 */
>;
};
diff --git a/arch/arm/boot/dts/omap5-uevm.dts b/arch/arm/boot/dts/omap5-uevm.dts
index 275618f19..5771a149c 100644
--- a/arch/arm/boot/dts/omap5-uevm.dts
+++ b/arch/arm/boot/dts/omap5-uevm.dts
@@ -174,8 +174,8 @@
i2c5_pins: pinmux_i2c5_pins {
pinctrl-single,pins = <
- 0x184 (PIN_INPUT | MUX_MODE0) /* i2c5_scl */
- 0x186 (PIN_INPUT | MUX_MODE0) /* i2c5_sda */
+ 0x186 (PIN_INPUT | MUX_MODE0) /* i2c5_scl */
+ 0x188 (PIN_INPUT | MUX_MODE0) /* i2c5_sda */
>;
};
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index 6a63f30c9..f5f384c04 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -107,7 +107,7 @@
720000 1200000
528000 1100000
312000 1000000
- 144000 900000
+ 144000 1000000
>;
#cooling-cells = <2>;
cooling-min-level = <0>;
diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
index a6ad93c9b..fd9eefce0 100644
--- a/arch/arm/kernel/kgdb.c
+++ b/arch/arm/kernel/kgdb.c
@@ -259,15 +259,17 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
if (err)
return err;
- patch_text((void *)bpt->bpt_addr,
- *(unsigned int *)arch_kgdb_ops.gdb_bpt_instr);
+ /* Machine is already stopped, so we can use __patch_text() directly */
+ __patch_text((void *)bpt->bpt_addr,
+ *(unsigned int *)arch_kgdb_ops.gdb_bpt_instr);
return err;
}
int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
{
- patch_text((void *)bpt->bpt_addr, *(unsigned int *)bpt->saved_instr);
+ /* Machine is already stopped, so we can use __patch_text() directly */
+ __patch_text((void *)bpt->bpt_addr, *(unsigned int *)bpt->saved_instr);
return 0;
}
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 54272e0be..7d5379c1c 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -795,8 +795,10 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
/* Don't bother with PPIs; they're already affine */
irq = platform_get_irq(pdev, 0);
- if (irq >= 0 && irq_is_percpu(irq))
+ if (irq >= 0 && irq_is_percpu(irq)) {
+ cpumask_setall(&pmu->supported_cpus);
return 0;
+ }
irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
if (!irqs)
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 423663e23..586eef262 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -343,12 +343,17 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
*/
thumb = handler & 1;
-#if __LINUX_ARM_ARCH__ >= 7
+#if __LINUX_ARM_ARCH__ >= 6
/*
- * Clear the If-Then Thumb-2 execution state
- * ARM spec requires this to be all 000s in ARM mode
- * Snapdragon S4/Krait misbehaves on a Thumb=>ARM
- * signal transition without this.
+ * Clear the If-Then Thumb-2 execution state. ARM spec
+ * requires this to be all 000s in ARM mode. Snapdragon
+ * S4/Krait misbehaves on a Thumb=>ARM signal transition
+ * without this.
+ *
+ * We must do this whenever we are running on a Thumb-2
+ * capable CPU, which includes ARMv6T2. However, we elect
+ * to do this whenever we're on an ARMv6 or later CPU for
+ * simplicity.
*/
cpsr &= ~PSR_IT_MASK;
#endif
diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S
index 702740d37..51a59504b 100644
--- a/arch/arm/kvm/interrupts_head.S
+++ b/arch/arm/kvm/interrupts_head.S
@@ -515,8 +515,7 @@ ARM_BE8(rev r6, r6 )
mrc p15, 0, r2, c14, c3, 1 @ CNTV_CTL
str r2, [vcpu, #VCPU_TIMER_CNTV_CTL]
- bic r2, #1 @ Clear ENABLE
- mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
+
isb
mrrc p15, 3, rr_lo_hi(r2, r3), c14 @ CNTV_CVAL
@@ -529,6 +528,9 @@ ARM_BE8(rev r6, r6 )
mcrr p15, 4, r2, r2, c14 @ CNTVOFF
1:
+ mov r2, #0 @ Clear ENABLE
+ mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
+
@ Allow physical timer/counter access for the host
mrc p15, 4, r2, c14, c1, 0 @ CNTHCTL
orr r2, r2, #(CNTHCTL_PL1PCEN | CNTHCTL_PL1PCTEN)
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 7b4201294..6984342da 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -1792,8 +1792,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
if (vma->vm_flags & VM_PFNMAP) {
gpa_t gpa = mem->guest_phys_addr +
(vm_start - mem->userspace_addr);
- phys_addr_t pa = (vma->vm_pgoff << PAGE_SHIFT) +
- vm_start - vma->vm_start;
+ phys_addr_t pa;
+
+ pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
+ pa += vm_start - vma->vm_start;
/* IO region dirty page logging not allowed */
if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES)
diff --git a/arch/arm/mach-exynos/mcpm-exynos.c b/arch/arm/mach-exynos/mcpm-exynos.c
index 9bdf54795..56978199c 100644
--- a/arch/arm/mach-exynos/mcpm-exynos.c
+++ b/arch/arm/mach-exynos/mcpm-exynos.c
@@ -20,6 +20,7 @@
#include <asm/cputype.h>
#include <asm/cp15.h>
#include <asm/mcpm.h>
+#include <asm/smp_plat.h>
#include "regs-pmu.h"
#include "common.h"
@@ -70,7 +71,31 @@ static int exynos_cpu_powerup(unsigned int cpu, unsigned int cluster)
cluster >= EXYNOS5420_NR_CLUSTERS)
return -EINVAL;
- exynos_cpu_power_up(cpunr);
+ if (!exynos_cpu_power_state(cpunr)) {
+ exynos_cpu_power_up(cpunr);
+
+ /*
+ * This assumes the cluster number of the big cores(Cortex A15)
+ * is 0 and the Little cores(Cortex A7) is 1.
+ * When the system was booted from the Little core,
+ * they should be reset during power up cpu.
+ */
+ if (cluster &&
+ cluster == MPIDR_AFFINITY_LEVEL(cpu_logical_map(0), 1)) {
+ /*
+ * Before we reset the Little cores, we should wait
+ * the SPARE2 register is set to 1 because the init
+ * codes of the iROM will set the register after
+ * initialization.
+ */
+ while (!pmu_raw_readl(S5P_PMU_SPARE2))
+ udelay(10);
+
+ pmu_raw_writel(EXYNOS5420_KFC_CORE_RESET(cpu),
+ EXYNOS_SWRESET);
+ }
+ }
+
return 0;
}
diff --git a/arch/arm/mach-exynos/regs-pmu.h b/arch/arm/mach-exynos/regs-pmu.h
index b7614333d..fba9068ed 100644
--- a/arch/arm/mach-exynos/regs-pmu.h
+++ b/arch/arm/mach-exynos/regs-pmu.h
@@ -513,6 +513,12 @@ static inline unsigned int exynos_pmu_cpunr(unsigned int mpidr)
#define SPREAD_ENABLE 0xF
#define SPREAD_USE_STANDWFI 0xF
+#define EXYNOS5420_KFC_CORE_RESET0 BIT(8)
+#define EXYNOS5420_KFC_ETM_RESET0 BIT(20)
+
+#define EXYNOS5420_KFC_CORE_RESET(_nr) \
+ ((EXYNOS5420_KFC_CORE_RESET0 | EXYNOS5420_KFC_ETM_RESET0) << (_nr))
+
#define EXYNOS5420_BB_CON1 0x0784
#define EXYNOS5420_BB_SEL_EN BIT(31)
#define EXYNOS5420_BB_PMOS_EN BIT(7)
diff --git a/arch/arm/plat-pxa/ssp.c b/arch/arm/plat-pxa/ssp.c
index ad9529cc4..daa1a65f2 100644
--- a/arch/arm/plat-pxa/ssp.c
+++ b/arch/arm/plat-pxa/ssp.c
@@ -107,7 +107,6 @@ static const struct of_device_id pxa_ssp_of_ids[] = {
{ .compatible = "mvrl,pxa168-ssp", .data = (void *) PXA168_SSP },
{ .compatible = "mrvl,pxa910-ssp", .data = (void *) PXA910_SSP },
{ .compatible = "mrvl,ce4100-ssp", .data = (void *) CE4100_SSP },
- { .compatible = "mrvl,lpss-ssp", .data = (void *) LPSS_SSP },
{ },
};
MODULE_DEVICE_TABLE(of, pxa_ssp_of_ids);
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index e8ca6eaed..13671a9cf 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -258,7 +258,8 @@ static bool __init efi_virtmap_init(void)
*/
if (!is_normal_ram(md))
prot = __pgprot(PROT_DEVICE_nGnRE);
- else if (md->type == EFI_RUNTIME_SERVICES_CODE)
+ else if (md->type == EFI_RUNTIME_SERVICES_CODE ||
+ !PAGE_ALIGNED(md->phys_addr))
prot = PAGE_KERNEL_EXEC;
else
prot = PAGE_KERNEL;
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index 08cafc518..0f03a8fe2 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -178,6 +178,24 @@ ENTRY(ftrace_stub)
ENDPROC(ftrace_stub)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ /* save return value regs*/
+ .macro save_return_regs
+ sub sp, sp, #64
+ stp x0, x1, [sp]
+ stp x2, x3, [sp, #16]
+ stp x4, x5, [sp, #32]
+ stp x6, x7, [sp, #48]
+ .endm
+
+ /* restore return value regs*/
+ .macro restore_return_regs
+ ldp x0, x1, [sp]
+ ldp x2, x3, [sp, #16]
+ ldp x4, x5, [sp, #32]
+ ldp x6, x7, [sp, #48]
+ add sp, sp, #64
+ .endm
+
/*
* void ftrace_graph_caller(void)
*
@@ -204,11 +222,11 @@ ENDPROC(ftrace_graph_caller)
* only when CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST is enabled.
*/
ENTRY(return_to_handler)
- str x0, [sp, #-16]!
+ save_return_regs
mov x0, x29 // parent's fp
bl ftrace_return_to_handler// addr = ftrace_return_to_hander(fp);
mov x30, x0 // restore the original return address
- ldr x0, [sp], #16
+ restore_return_regs
ret
END(return_to_handler)
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 94d98cd1a..27c3e6fd2 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -278,6 +278,7 @@ retry:
* starvation.
*/
mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
+ mm_flags |= FAULT_FLAG_TRIED;
goto retry;
}
}
diff --git a/arch/m68k/include/asm/linkage.h b/arch/m68k/include/asm/linkage.h
index 5a822bb79..066e74f66 100644
--- a/arch/m68k/include/asm/linkage.h
+++ b/arch/m68k/include/asm/linkage.h
@@ -4,4 +4,34 @@
#define __ALIGN .align 4
#define __ALIGN_STR ".align 4"
+/*
+ * Make sure the compiler doesn't do anything stupid with the
+ * arguments on the stack - they are owned by the *caller*, not
+ * the callee. This just fools gcc into not spilling into them,
+ * and keeps it from doing tailcall recursion and/or using the
+ * stack slots for temporaries, since they are live and "used"
+ * all the way to the end of the function.
+ */
+#define asmlinkage_protect(n, ret, args...) \
+ __asmlinkage_protect##n(ret, ##args)
+#define __asmlinkage_protect_n(ret, args...) \
+ __asm__ __volatile__ ("" : "=r" (ret) : "0" (ret), ##args)
+#define __asmlinkage_protect0(ret) \
+ __asmlinkage_protect_n(ret)
+#define __asmlinkage_protect1(ret, arg1) \
+ __asmlinkage_protect_n(ret, "m" (arg1))
+#define __asmlinkage_protect2(ret, arg1, arg2) \
+ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2))
+#define __asmlinkage_protect3(ret, arg1, arg2, arg3) \
+ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3))
+#define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \
+ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
+ "m" (arg4))
+#define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \
+ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
+ "m" (arg4), "m" (arg5))
+#define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \
+ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
+ "m" (arg4), "m" (arg5), "m" (arg6))
+
#endif
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
index 9f71c06ae..209ded168 100644
--- a/arch/mips/kernel/cps-vec.S
+++ b/arch/mips/kernel/cps-vec.S
@@ -39,6 +39,7 @@
mfc0 \dest, CP0_CONFIG, 3
andi \dest, \dest, MIPS_CONF3_MT
beqz \dest, \nomt
+ nop
.endm
.section .text.cps-vec
@@ -223,10 +224,9 @@ LEAF(excep_ejtag)
END(excep_ejtag)
LEAF(mips_cps_core_init)
-#ifdef CONFIG_MIPS_MT
+#ifdef CONFIG_MIPS_MT_SMP
/* Check that the core implements the MT ASE */
has_mt t0, 3f
- nop
.set push
.set mips64r2
@@ -310,8 +310,9 @@ LEAF(mips_cps_boot_vpes)
PTR_ADDU t0, t0, t1
/* Calculate this VPEs ID. If the core doesn't support MT use 0 */
+ li t9, 0
+#ifdef CONFIG_MIPS_MT_SMP
has_mt ta2, 1f
- li t9, 0
/* Find the number of VPEs present in the core */
mfc0 t1, CP0_MVPCONF0
@@ -330,6 +331,7 @@ LEAF(mips_cps_boot_vpes)
/* Retrieve the VPE ID from EBase.CPUNum */
mfc0 t9, $15, 1
and t9, t9, t1
+#endif
1: /* Calculate a pointer to this VPEs struct vpe_boot_config */
li t1, VPEBOOTCFG_SIZE
@@ -337,7 +339,7 @@ LEAF(mips_cps_boot_vpes)
PTR_L ta3, COREBOOTCFG_VPECONFIG(t0)
PTR_ADDU v0, v0, ta3
-#ifdef CONFIG_MIPS_MT
+#ifdef CONFIG_MIPS_MT_SMP
/* If the core doesn't support MT then return */
bnez ta2, 1f
@@ -451,7 +453,7 @@ LEAF(mips_cps_boot_vpes)
2: .set pop
-#endif /* CONFIG_MIPS_MT */
+#endif /* CONFIG_MIPS_MT_SMP */
/* Return */
jr ra
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 008b33786..4ceac5cdd 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -338,7 +338,7 @@ static void __init bootmem_init(void)
if (end <= reserved_end)
continue;
#ifdef CONFIG_BLK_DEV_INITRD
- /* mapstart should be after initrd_end */
+ /* Skip zones before initrd and initrd itself */
if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end)))
continue;
#endif
@@ -371,6 +371,14 @@ static void __init bootmem_init(void)
max_low_pfn = PFN_DOWN(HIGHMEM_START);
}
+#ifdef CONFIG_BLK_DEV_INITRD
+ /*
+ * mapstart should be after initrd_end
+ */
+ if (initrd_end)
+ mapstart = max(mapstart, (unsigned long)PFN_UP(__pa(initrd_end)));
+#endif
+
/*
* Initialize the boot-time allocator with low memory only.
*/
diff --git a/arch/mips/loongson64/common/env.c b/arch/mips/loongson64/common/env.c
index f6c44dd33..d6d07ad56 100644
--- a/arch/mips/loongson64/common/env.c
+++ b/arch/mips/loongson64/common/env.c
@@ -64,6 +64,9 @@ void __init prom_init_env(void)
}
if (memsize == 0)
memsize = 256;
+
+ loongson_sysconf.nr_uarts = 1;
+
pr_info("memsize=%u, highmemsize=%u\n", memsize, highmemsize);
#else
struct boot_params *boot_p;
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index eeaf0245c..815892ed3 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -100,7 +100,7 @@ static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
else
#endif
#if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
- if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
+ if (dev->coherent_dma_mask < DMA_BIT_MASK(sizeof(phys_addr_t) * 8))
dma_flag = __GFP_DMA;
else
#endif
diff --git a/arch/mips/net/bpf_jit_asm.S b/arch/mips/net/bpf_jit_asm.S
index e92726099..dabf4179c 100644
--- a/arch/mips/net/bpf_jit_asm.S
+++ b/arch/mips/net/bpf_jit_asm.S
@@ -64,8 +64,20 @@ sk_load_word_positive:
PTR_ADDU t1, $r_skb_data, offset
lw $r_A, 0(t1)
#ifdef CONFIG_CPU_LITTLE_ENDIAN
+# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
wsbh t0, $r_A
rotr $r_A, t0, 16
+# else
+ sll t0, $r_A, 24
+ srl t1, $r_A, 24
+ srl t2, $r_A, 8
+ or t0, t0, t1
+ andi t2, t2, 0xff00
+ andi t1, $r_A, 0xff00
+ or t0, t0, t2
+ sll t1, t1, 8
+ or $r_A, t0, t1
+# endif
#endif
jr $r_ra
move $r_ret, zero
@@ -80,8 +92,16 @@ sk_load_half_positive:
PTR_ADDU t1, $r_skb_data, offset
lh $r_A, 0(t1)
#ifdef CONFIG_CPU_LITTLE_ENDIAN
+# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
wsbh t0, $r_A
seh $r_A, t0
+# else
+ sll t0, $r_A, 24
+ andi t1, $r_A, 0xff00
+ sra t0, t0, 16
+ srl t1, t1, 8
+ or $r_A, t0, t1
+# endif
#endif
jr $r_ra
move $r_ret, zero
@@ -148,23 +168,47 @@ sk_load_byte_positive:
NESTED(bpf_slow_path_word, (6 * SZREG), $r_sp)
bpf_slow_path_common(4)
#ifdef CONFIG_CPU_LITTLE_ENDIAN
+# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
wsbh t0, $r_s0
jr $r_ra
rotr $r_A, t0, 16
-#endif
+# else
+ sll t0, $r_s0, 24
+ srl t1, $r_s0, 24
+ srl t2, $r_s0, 8
+ or t0, t0, t1
+ andi t2, t2, 0xff00
+ andi t1, $r_s0, 0xff00
+ or t0, t0, t2
+ sll t1, t1, 8
+ jr $r_ra
+ or $r_A, t0, t1
+# endif
+#else
jr $r_ra
- move $r_A, $r_s0
+ move $r_A, $r_s0
+#endif
END(bpf_slow_path_word)
NESTED(bpf_slow_path_half, (6 * SZREG), $r_sp)
bpf_slow_path_common(2)
#ifdef CONFIG_CPU_LITTLE_ENDIAN
+# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
jr $r_ra
wsbh $r_A, $r_s0
-#endif
+# else
+ sll t0, $r_s0, 8
+ andi t1, $r_s0, 0xff00
+ andi t0, t0, 0xff00
+ srl t1, t1, 8
+ jr $r_ra
+ or $r_A, t0, t1
+# endif
+#else
jr $r_ra
move $r_A, $r_s0
+#endif
END(bpf_slow_path_half)
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 05ea8fc7f..4816fe2fa 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -827,12 +827,15 @@ int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu)
unsigned long size = kvmppc_get_gpr(vcpu, 4);
unsigned long addr = kvmppc_get_gpr(vcpu, 5);
u64 buf;
+ int srcu_idx;
int ret;
if (!is_power_of_2(size) || (size > sizeof(buf)))
return H_TOO_HARD;
+ srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, size, &buf);
+ srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
if (ret != 0)
return H_TOO_HARD;
@@ -867,6 +870,7 @@ int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu)
unsigned long addr = kvmppc_get_gpr(vcpu, 5);
unsigned long val = kvmppc_get_gpr(vcpu, 6);
u64 buf;
+ int srcu_idx;
int ret;
switch (size) {
@@ -890,7 +894,9 @@ int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu)
return H_TOO_HARD;
}
+ srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, size, &buf);
+ srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
if (ret != 0)
return H_TOO_HARD;
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 68d067ad4..a9f753fb7 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -2178,7 +2178,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
vc->runner = vcpu;
if (n_ceded == vc->n_runnable) {
kvmppc_vcore_blocked(vc);
- } else if (should_resched()) {
+ } else if (need_resched()) {
vc->vcore_state = VCORE_PREEMPT;
/* Let something else run */
cond_resched_lock(&vc->lock);
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 76408cf0a..437f64350 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1171,6 +1171,7 @@ mc_cont:
bl kvmhv_accumulate_time
#endif
+ mr r3, r12
/* Increment exit count, poke other threads to exit */
bl kvmhv_commence_exit
nop
diff --git a/arch/powerpc/platforms/pasemi/msi.c b/arch/powerpc/platforms/pasemi/msi.c
index 27f2b187a..ff1bb4b69 100644
--- a/arch/powerpc/platforms/pasemi/msi.c
+++ b/arch/powerpc/platforms/pasemi/msi.c
@@ -63,6 +63,7 @@ static struct irq_chip mpic_pasemi_msi_chip = {
static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev)
{
struct msi_desc *entry;
+ irq_hw_number_t hwirq;
pr_debug("pasemi_msi_teardown_msi_irqs, pdev %p\n", pdev);
@@ -70,10 +71,10 @@ static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev)
if (entry->irq == NO_IRQ)
continue;
+ hwirq = virq_to_hw(entry->irq);
irq_set_msi_desc(entry->irq, NULL);
- msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap,
- virq_to_hw(entry->irq), ALLOC_CHUNK);
irq_dispose_mapping(entry->irq);
+ msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, ALLOC_CHUNK);
}
return;
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index 765d8ed55..fd16f86e5 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -99,6 +99,7 @@ void pnv_teardown_msi_irqs(struct pci_dev *pdev)
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
struct pnv_phb *phb = hose->private_data;
struct msi_desc *entry;
+ irq_hw_number_t hwirq;
if (WARN_ON(!phb))
return;
@@ -106,10 +107,10 @@ void pnv_teardown_msi_irqs(struct pci_dev *pdev)
list_for_each_entry(entry, &pdev->msi_list, list) {
if (entry->irq == NO_IRQ)
continue;
+ hwirq = virq_to_hw(entry->irq);
irq_set_msi_desc(entry->irq, NULL);
- msi_bitmap_free_hwirqs(&phb->msi_bmp,
- virq_to_hw(entry->irq) - phb->msi_base, 1);
irq_dispose_mapping(entry->irq);
+ msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, 1);
}
}
#endif /* CONFIG_PCI_MSI */
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index 5236e5427..691e8e517 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -128,15 +128,16 @@ static void fsl_teardown_msi_irqs(struct pci_dev *pdev)
{
struct msi_desc *entry;
struct fsl_msi *msi_data;
+ irq_hw_number_t hwirq;
list_for_each_entry(entry, &pdev->msi_list, list) {
if (entry->irq == NO_IRQ)
continue;
+ hwirq = virq_to_hw(entry->irq);
msi_data = irq_get_chip_data(entry->irq);
irq_set_msi_desc(entry->irq, NULL);
- msi_bitmap_free_hwirqs(&msi_data->bitmap,
- virq_to_hw(entry->irq), 1);
irq_dispose_mapping(entry->irq);
+ msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
}
return;
diff --git a/arch/powerpc/sysdev/mpic_u3msi.c b/arch/powerpc/sysdev/mpic_u3msi.c
index fc46ef3b8..4c3165fa5 100644
--- a/arch/powerpc/sysdev/mpic_u3msi.c
+++ b/arch/powerpc/sysdev/mpic_u3msi.c
@@ -107,15 +107,16 @@ static u64 find_u4_magic_addr(struct pci_dev *pdev, unsigned int hwirq)
static void u3msi_teardown_msi_irqs(struct pci_dev *pdev)
{
struct msi_desc *entry;
+ irq_hw_number_t hwirq;
list_for_each_entry(entry, &pdev->msi_list, list) {
if (entry->irq == NO_IRQ)
continue;
+ hwirq = virq_to_hw(entry->irq);
irq_set_msi_desc(entry->irq, NULL);
- msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap,
- virq_to_hw(entry->irq), 1);
irq_dispose_mapping(entry->irq);
+ msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, 1);
}
return;
diff --git a/arch/powerpc/sysdev/ppc4xx_msi.c b/arch/powerpc/sysdev/ppc4xx_msi.c
index 6eb21f2ea..060f23775 100644
--- a/arch/powerpc/sysdev/ppc4xx_msi.c
+++ b/arch/powerpc/sysdev/ppc4xx_msi.c
@@ -124,16 +124,17 @@ void ppc4xx_teardown_msi_irqs(struct pci_dev *dev)
{
struct msi_desc *entry;
struct ppc4xx_msi *msi_data = &ppc4xx_msi;
+ irq_hw_number_t hwirq;
dev_dbg(&dev->dev, "PCIE-MSI: tearing down msi irqs\n");
list_for_each_entry(entry, &dev->msi_list, list) {
if (entry->irq == NO_IRQ)
continue;
+ hwirq = virq_to_hw(entry->irq);
irq_set_msi_desc(entry->irq, NULL);
- msi_bitmap_free_hwirqs(&msi_data->bitmap,
- virq_to_hw(entry->irq), 1);
irq_dispose_mapping(entry->irq);
+ msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
}
}
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
index d4788111c..fac6ac979 100644
--- a/arch/s390/boot/compressed/Makefile
+++ b/arch/s390/boot/compressed/Makefile
@@ -10,7 +10,7 @@ targets += misc.o piggy.o sizes.h head.o
KBUILD_CFLAGS := -m64 -D__KERNEL__ $(LINUX_INCLUDE) -O2
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
-KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks
+KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks -msoft-float
KBUILD_CFLAGS += $(call cc-option,-mpacked-stack)
KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index fe8d6924e..c78ba51ae 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -48,6 +48,19 @@ typedef struct
struct ucontext32 uc;
} rt_sigframe32;
+static inline void sigset_to_sigset32(unsigned long *set64,
+ compat_sigset_word *set32)
+{
+ set32[0] = (compat_sigset_word) set64[0];
+ set32[1] = (compat_sigset_word)(set64[0] >> 32);
+}
+
+static inline void sigset32_to_sigset(compat_sigset_word *set32,
+ unsigned long *set64)
+{
+ set64[0] = (unsigned long) set32[0] | ((unsigned long) set32[1] << 32);
+}
+
int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
{
int err;
@@ -303,10 +316,12 @@ COMPAT_SYSCALL_DEFINE0(sigreturn)
{
struct pt_regs *regs = task_pt_regs(current);
sigframe32 __user *frame = (sigframe32 __user *)regs->gprs[15];
+ compat_sigset_t cset;
sigset_t set;
- if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32))
+ if (__copy_from_user(&cset.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32))
goto badframe;
+ sigset32_to_sigset(cset.sig, set.sig);
set_current_blocked(&set);
if (restore_sigregs32(regs, &frame->sregs))
goto badframe;
@@ -323,10 +338,12 @@ COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
{
struct pt_regs *regs = task_pt_regs(current);
rt_sigframe32 __user *frame = (rt_sigframe32 __user *)regs->gprs[15];
+ compat_sigset_t cset;
sigset_t set;
- if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+ if (__copy_from_user(&cset, &frame->uc.uc_sigmask, sizeof(cset)))
goto badframe;
+ sigset32_to_sigset(cset.sig, set.sig);
set_current_blocked(&set);
if (compat_restore_altstack(&frame->uc.uc_stack))
goto badframe;
@@ -397,7 +414,7 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
return -EFAULT;
/* Create struct sigcontext32 on the signal stack */
- memcpy(&sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE32);
+ sigset_to_sigset32(set->sig, sc.oldmask);
sc.sregs = (__u32)(unsigned long __force) &frame->sregs;
if (__copy_to_user(&frame->sc, &sc, sizeof(frame->sc)))
return -EFAULT;
@@ -458,6 +475,7 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs)
{
+ compat_sigset_t cset;
rt_sigframe32 __user *frame;
unsigned long restorer;
size_t frame_size;
@@ -505,11 +523,12 @@ static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set,
store_sigregs();
/* Create ucontext on the signal stack. */
+ sigset_to_sigset32(set->sig, cset.sig);
if (__put_user(uc_flags, &frame->uc.uc_flags) ||
__put_user(0, &frame->uc.uc_link) ||
__compat_save_altstack(&frame->uc.uc_stack, regs->gprs[15]) ||
save_sigregs32(regs, &frame->uc.uc_mcontext) ||
- __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)) ||
+ __copy_to_user(&frame->uc.uc_sigmask, &cset, sizeof(cset)) ||
save_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext))
return -EFAULT;
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 8cb3e438f..d330840a2 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -1219,7 +1219,18 @@ END(error_exit)
/* Runs on exception stack */
ENTRY(nmi)
+ /*
+ * Fix up the exception frame if we're on Xen.
+ * PARAVIRT_ADJUST_EXCEPTION_FRAME is guaranteed to push at most
+ * one value to the stack on native, so it may clobber the rdx
+ * scratch slot, but it won't clobber any of the important
+ * slots past it.
+ *
+ * Xen is a different story, because the Xen frame itself overlaps
+ * the "NMI executing" variable.
+ */
PARAVIRT_ADJUST_EXCEPTION_FRAME
+
/*
* We allow breakpoints in NMIs. If a breakpoint occurs, then
* the iretq it performs will take us out of NMI context.
@@ -1270,9 +1281,12 @@ ENTRY(nmi)
* we don't want to enable interrupts, because then we'll end
* up in an awkward situation in which IRQs are on but NMIs
* are off.
+ *
+ * We also must not push anything to the stack before switching
+ * stacks lest we corrupt the "NMI executing" variable.
*/
- SWAPGS
+ SWAPGS_UNSAFE_STACK
cld
movq %rsp, %rdx
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 9ebc3d009..2350ab781 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -311,6 +311,7 @@
/* C1E active bits in int pending message */
#define K8_INTP_C1E_ACTIVE_MASK 0x18000000
#define MSR_K8_TSEG_ADDR 0xc0010112
+#define MSR_K8_TSEG_MASK 0xc0010113
#define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */
#define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */
#define K8_MTRR_RDMEM_WRMEM_MASK 0x18181818 /* Mask: RdMem|WrMem */
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
index dca71714f..b12f81022 100644
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
@@ -90,9 +90,9 @@ static __always_inline bool __preempt_count_dec_and_test(void)
/*
* Returns true when we need to resched and can (barring IRQ state).
*/
-static __always_inline bool should_resched(void)
+static __always_inline bool should_resched(int preempt_offset)
{
- return unlikely(!raw_cpu_read_4(__preempt_count));
+ return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
}
#ifdef CONFIG_PREEMPT
diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h
index 9d51fae1c..eaba08076 100644
--- a/arch/x86/include/asm/qspinlock.h
+++ b/arch/x86/include/asm/qspinlock.h
@@ -39,18 +39,27 @@ static inline void queued_spin_unlock(struct qspinlock *lock)
}
#endif
-#define virt_queued_spin_lock virt_queued_spin_lock
-
-static inline bool virt_queued_spin_lock(struct qspinlock *lock)
+#ifdef CONFIG_PARAVIRT
+#define virt_spin_lock virt_spin_lock
+static inline bool virt_spin_lock(struct qspinlock *lock)
{
if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
return false;
- while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0)
- cpu_relax();
+ /*
+ * On hypervisors without PARAVIRT_SPINLOCKS support we fall
+ * back to a Test-and-Set spinlock, because fair locks have
+ * horrible lock 'holder' preemption issues.
+ */
+
+ do {
+ while (atomic_read(&lock->val) != 0)
+ cpu_relax();
+ } while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0);
return true;
}
+#endif /* CONFIG_PARAVIRT */
#include <asm-generic/qspinlock.h>
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index c42827eb8..25f909362 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -338,10 +338,15 @@ done:
static void __init_or_module optimize_nops(struct alt_instr *a, u8 *instr)
{
+ unsigned long flags;
+
if (instr[0] != 0x90)
return;
+ local_irq_save(flags);
add_nops(instr + (a->instrlen - a->padlen), a->padlen);
+ sync_core();
+ local_irq_restore(flags);
DUMP_BYTES(instr, a->instrlen, "%p: [%d:%d) optimized NOPs: ",
instr, a->instrlen - a->padlen, a->padlen);
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index cde732c1b..307a49828 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -336,6 +336,13 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
apic_write(APIC_LVTT, lvtt_value);
if (lvtt_value & APIC_LVT_TIMER_TSCDEADLINE) {
+ /*
+ * See Intel SDM: TSC-Deadline Mode chapter. In xAPIC mode,
+ * writing to the APIC LVTT and TSC_DEADLINE MSR isn't serialized.
+ * According to Intel, MFENCE can do the serialization here.
+ */
+ asm volatile("mfence" : : : "memory");
+
printk_once(KERN_DEBUG "TSC deadline timer enabled\n");
return;
}
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 206052e55..5880b482d 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2522,6 +2522,7 @@ void __init setup_ioapic_dest(void)
int pin, ioapic, irq, irq_entry;
const struct cpumask *mask;
struct irq_data *idata;
+ struct irq_chip *chip;
if (skip_ioapic_setup == 1)
return;
@@ -2545,9 +2546,9 @@ void __init setup_ioapic_dest(void)
else
mask = apic->target_cpus();
- irq_set_affinity(irq, mask);
+ chip = irq_data_get_irq_chip(idata);
+ chip->irq_set_affinity(idata, mask, false);
}
-
}
#endif
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 6326ae24e..1b09c420c 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -2102,9 +2102,12 @@ static struct event_constraint *
intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
struct perf_event *event)
{
- struct event_constraint *c1 = cpuc->event_constraint[idx];
+ struct event_constraint *c1 = NULL;
struct event_constraint *c2;
+ if (idx >= 0) /* fake does < 0 */
+ c1 = cpuc->event_constraint[idx];
+
/*
* first time only
* - static constraint: no change across incremental scheduling calls
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index e068d6683..74ca2fe7a 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -185,10 +185,9 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
}
#ifdef CONFIG_KEXEC_FILE
-static int get_nr_ram_ranges_callback(unsigned long start_pfn,
- unsigned long nr_pfn, void *arg)
+static int get_nr_ram_ranges_callback(u64 start, u64 end, void *arg)
{
- int *nr_ranges = arg;
+ unsigned int *nr_ranges = arg;
(*nr_ranges)++;
return 0;
@@ -214,7 +213,7 @@ static void fill_up_crash_elf_data(struct crash_elf_data *ced,
ced->image = image;
- walk_system_ram_range(0, -1, &nr_ranges,
+ walk_system_ram_res(0, -1, &nr_ranges,
get_nr_ram_ranges_callback);
ced->max_nr_ranges = nr_ranges;
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 58bcfb67c..ebb5657ee 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -41,10 +41,18 @@
#include <asm/timer.h>
#include <asm/special_insns.h>
-/* nop stub */
-void _paravirt_nop(void)
-{
-}
+/*
+ * nop stub, which must not clobber anything *including the stack* to
+ * avoid confusing the entry prologues.
+ */
+extern void _paravirt_nop(void);
+asm (".pushsection .entry.text, \"ax\"\n"
+ ".global _paravirt_nop\n"
+ "_paravirt_nop:\n\t"
+ "ret\n\t"
+ ".size _paravirt_nop, . - _paravirt_nop\n\t"
+ ".type _paravirt_nop, @function\n\t"
+ ".popsection");
/* identity function, which can be inlined */
u32 _paravirt_ident_32(u32 x)
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index f6b916387..a90ac9556 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -497,27 +497,59 @@ void set_personality_ia32(bool x32)
}
EXPORT_SYMBOL_GPL(set_personality_ia32);
+/*
+ * Called from fs/proc with a reference on @p to find the function
+ * which called into schedule(). This needs to be done carefully
+ * because the task might wake up and we might look at a stack
+ * changing under us.
+ */
unsigned long get_wchan(struct task_struct *p)
{
- unsigned long stack;
- u64 fp, ip;
+ unsigned long start, bottom, top, sp, fp, ip;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
- stack = (unsigned long)task_stack_page(p);
- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
+
+ start = (unsigned long)task_stack_page(p);
+ if (!start)
+ return 0;
+
+ /*
+ * Layout of the stack page:
+ *
+ * ----------- topmax = start + THREAD_SIZE - sizeof(unsigned long)
+ * PADDING
+ * ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING
+ * stack
+ * ----------- bottom = start + sizeof(thread_info)
+ * thread_info
+ * ----------- start
+ *
+ * The tasks stack pointer points at the location where the
+ * framepointer is stored. The data on the stack is:
+ * ... IP FP ... IP FP
+ *
+ * We need to read FP and IP, so we need to adjust the upper
+ * bound by another unsigned long.
+ */
+ top = start + THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING;
+ top -= 2 * sizeof(unsigned long);
+ bottom = start + sizeof(struct thread_info);
+
+ sp = READ_ONCE(p->thread.sp);
+ if (sp < bottom || sp > top)
return 0;
- fp = *(u64 *)(p->thread.sp);
+
+ fp = READ_ONCE(*(unsigned long *)sp);
do {
- if (fp < (unsigned long)stack ||
- fp >= (unsigned long)stack+THREAD_SIZE)
+ if (fp < bottom || fp > top)
return 0;
- ip = *(u64 *)(fp+8);
+ ip = READ_ONCE(*(unsigned long *)(fp + sizeof(unsigned long)));
if (!in_sched_functions(ip))
return ip;
- fp = *(u64 *)fp;
- } while (count++ < 16);
+ fp = READ_ONCE(*(unsigned long *)fp);
+ } while (count++ < 16 && p->state != TASK_RUNNING);
return 0;
}
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 1b513e093..0e8151b4b 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -22,6 +22,7 @@
#include <asm/hypervisor.h>
#include <asm/nmi.h>
#include <asm/x86_init.h>
+#include <asm/geode.h>
unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
EXPORT_SYMBOL(cpu_khz);
@@ -1018,15 +1019,17 @@ EXPORT_SYMBOL_GPL(mark_tsc_unstable);
static void __init check_system_tsc_reliable(void)
{
-#ifdef CONFIG_MGEODE_LX
- /* RTSC counts during suspend */
+#if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC)
+ if (is_geode_lx()) {
+ /* RTSC counts during suspend */
#define RTSC_SUSP 0x100
- unsigned long res_low, res_high;
+ unsigned long res_low, res_high;
- rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
- /* Geode_LX - the OLPC CPU has a very reliable TSC */
- if (res_low & RTSC_SUSP)
- tsc_clocksource_reliable = 1;
+ rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
+ /* Geode_LX - the OLPC CPU has a very reliable TSC */
+ if (res_low & RTSC_SUSP)
+ tsc_clocksource_reliable = 1;
+ }
#endif
if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
tsc_clocksource_reliable = 1;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 8e0c0844c..2d32b67a1 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -513,7 +513,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
struct vcpu_svm *svm = to_svm(vcpu);
if (svm->vmcb->control.next_rip != 0) {
- WARN_ON(!static_cpu_has(X86_FEATURE_NRIPS));
+ WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS));
svm->next_rip = svm->vmcb->control.next_rip;
}
@@ -865,64 +865,6 @@ static void svm_disable_lbrv(struct vcpu_svm *svm)
set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
}
-#define MTRR_TYPE_UC_MINUS 7
-#define MTRR2PROTVAL_INVALID 0xff
-
-static u8 mtrr2protval[8];
-
-static u8 fallback_mtrr_type(int mtrr)
-{
- /*
- * WT and WP aren't always available in the host PAT. Treat
- * them as UC and UC- respectively. Everything else should be
- * there.
- */
- switch (mtrr)
- {
- case MTRR_TYPE_WRTHROUGH:
- return MTRR_TYPE_UNCACHABLE;
- case MTRR_TYPE_WRPROT:
- return MTRR_TYPE_UC_MINUS;
- default:
- BUG();
- }
-}
-
-static void build_mtrr2protval(void)
-{
- int i;
- u64 pat;
-
- for (i = 0; i < 8; i++)
- mtrr2protval[i] = MTRR2PROTVAL_INVALID;
-
- /* Ignore the invalid MTRR types. */
- mtrr2protval[2] = 0;
- mtrr2protval[3] = 0;
-
- /*
- * Use host PAT value to figure out the mapping from guest MTRR
- * values to nested page table PAT/PCD/PWT values. We do not
- * want to change the host PAT value every time we enter the
- * guest.
- */
- rdmsrl(MSR_IA32_CR_PAT, pat);
- for (i = 0; i < 8; i++) {
- u8 mtrr = pat >> (8 * i);
-
- if (mtrr2protval[mtrr] == MTRR2PROTVAL_INVALID)
- mtrr2protval[mtrr] = __cm_idx2pte(i);
- }
-
- for (i = 0; i < 8; i++) {
- if (mtrr2protval[i] == MTRR2PROTVAL_INVALID) {
- u8 fallback = fallback_mtrr_type(i);
- mtrr2protval[i] = mtrr2protval[fallback];
- BUG_ON(mtrr2protval[i] == MTRR2PROTVAL_INVALID);
- }
- }
-}
-
static __init int svm_hardware_setup(void)
{
int cpu;
@@ -989,7 +931,6 @@ static __init int svm_hardware_setup(void)
} else
kvm_disable_tdp();
- build_mtrr2protval();
return 0;
err:
@@ -1144,39 +1085,6 @@ static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
return target_tsc - tsc;
}
-static void svm_set_guest_pat(struct vcpu_svm *svm, u64 *g_pat)
-{
- struct kvm_vcpu *vcpu = &svm->vcpu;
-
- /* Unlike Intel, AMD takes the guest's CR0.CD into account.
- *
- * AMD does not have IPAT. To emulate it for the case of guests
- * with no assigned devices, just set everything to WB. If guests
- * have assigned devices, however, we cannot force WB for RAM
- * pages only, so use the guest PAT directly.
- */
- if (!kvm_arch_has_assigned_device(vcpu->kvm))
- *g_pat = 0x0606060606060606;
- else
- *g_pat = vcpu->arch.pat;
-}
-
-static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
-{
- u8 mtrr;
-
- /*
- * 1. MMIO: trust guest MTRR, so same as item 3.
- * 2. No passthrough: always map as WB, and force guest PAT to WB as well
- * 3. Passthrough: can't guarantee the result, try to trust guest.
- */
- if (!is_mmio && !kvm_arch_has_assigned_device(vcpu->kvm))
- return 0;
-
- mtrr = kvm_mtrr_get_guest_memory_type(vcpu, gfn);
- return mtrr2protval[mtrr];
-}
-
static void init_vmcb(struct vcpu_svm *svm, bool init_event)
{
struct vmcb_control_area *control = &svm->vmcb->control;
@@ -1260,6 +1168,7 @@ static void init_vmcb(struct vcpu_svm *svm, bool init_event)
* It also updates the guest-visible cr0 value.
*/
(void)kvm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
+ kvm_mmu_reset_context(&svm->vcpu);
save->cr4 = X86_CR4_PAE;
/* rdx = ?? */
@@ -1272,7 +1181,6 @@ static void init_vmcb(struct vcpu_svm *svm, bool init_event)
clr_cr_intercept(svm, INTERCEPT_CR3_READ);
clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
save->g_pat = svm->vcpu.arch.pat;
- svm_set_guest_pat(svm, &save->g_pat);
save->cr3 = 0;
save->cr4 = 0;
}
@@ -3347,16 +3255,6 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
case MSR_VM_IGNNE:
vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
break;
- case MSR_IA32_CR_PAT:
- if (npt_enabled) {
- if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
- return 1;
- vcpu->arch.pat = data;
- svm_set_guest_pat(svm, &svm->vmcb->save.g_pat);
- mark_dirty(svm->vmcb, VMCB_NPT);
- break;
- }
- /* fall through */
default:
return kvm_set_msr_common(vcpu, msr);
}
@@ -4191,6 +4089,11 @@ static bool svm_has_high_real_mode_segbase(void)
return true;
}
+static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
+{
+ return 0;
+}
+
static void svm_cpuid_update(struct kvm_vcpu *vcpu)
{
}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 83b7b5cd7..aa9e82295 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -6134,6 +6134,8 @@ static __init int hardware_setup(void)
memcpy(vmx_msr_bitmap_longmode_x2apic,
vmx_msr_bitmap_longmode, PAGE_SIZE);
+ set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
+
if (enable_apicv) {
for (msr = 0x800; msr <= 0x8ff; msr++)
vmx_disable_intercept_msr_read_x2apic(msr);
@@ -8632,17 +8634,22 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
u64 ipat = 0;
/* For VT-d and EPT combination
- * 1. MMIO: guest may want to apply WC, trust it.
+ * 1. MMIO: always map as UC
* 2. EPT with VT-d:
* a. VT-d without snooping control feature: can't guarantee the
- * result, try to trust guest. So the same as item 1.
+ * result, try to trust guest.
* b. VT-d with snooping control feature: snooping control feature of
* VT-d engine can guarantee the cache correctness. Just set it
* to WB to keep consistent with host. So the same as item 3.
* 3. EPT without VT-d: always map as WB and set IPAT=1 to keep
* consistent with host MTRR
*/
- if (!is_mmio && !kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
+ if (is_mmio) {
+ cache = MTRR_TYPE_UNCACHABLE;
+ goto exit;
+ }
+
+ if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
ipat = VMX_EPT_IPAT_BIT;
cache = MTRR_TYPE_WRBACK;
goto exit;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 8f0f6eca6..32c6e6ac5 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2388,6 +2388,8 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_IA32_LASTINTFROMIP:
case MSR_IA32_LASTINTTOIP:
case MSR_K8_SYSCFG:
+ case MSR_K8_TSEG_ADDR:
+ case MSR_K8_TSEG_MASK:
case MSR_K7_HWCR:
case MSR_VM_HSAVE_PA:
case MSR_K8_INT_PENDING_MSG:
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 3fba623e3..f9977a7a9 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -1132,7 +1132,7 @@ void mark_rodata_ro(void)
* has been zapped already via cleanup_highmem().
*/
all_end = roundup((unsigned long)_brk_end, PMD_SIZE);
- set_memory_nx(rodata_start, (all_end - rodata_start) >> PAGE_SHIFT);
+ set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT);
rodata_test();
diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
index 27062303c..7553921c1 100644
--- a/arch/x86/pci/intel_mid_pci.c
+++ b/arch/x86/pci/intel_mid_pci.c
@@ -35,6 +35,9 @@
#define PCIE_CAP_OFFSET 0x100
+/* Quirks for the listed devices */
+#define PCI_DEVICE_ID_INTEL_MRFL_MMC 0x1190
+
/* Fixed BAR fields */
#define PCIE_VNDR_CAP_ID_FIXED_BAR 0x00 /* Fixed BAR (TBD) */
#define PCI_FIXED_BAR_0_SIZE 0x04
@@ -214,10 +217,27 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
if (dev->irq_managed && dev->irq > 0)
return 0;
- if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER)
+ switch (intel_mid_identify_cpu()) {
+ case INTEL_MID_CPU_CHIP_TANGIER:
polarity = 0; /* active high */
- else
+
+ /* Special treatment for IRQ0 */
+ if (dev->irq == 0) {
+ /*
+ * TNG has IRQ0 assigned to eMMC controller. But there
+ * are also other devices with bogus PCI configuration
+ * that have IRQ0 assigned. This check ensures that
+ * eMMC gets it.
+ */
+ if (dev->device != PCI_DEVICE_ID_INTEL_MRFL_MMC)
+ return -EBUSY;
+ }
+ break;
+ default:
polarity = 1; /* active low */
+ break;
+ }
+
ioapic_set_alloc_attr(&info, dev_to_node(&dev->dev), 1, polarity);
/*
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index e4308fe6a..c6835bfad 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -705,6 +705,70 @@ out:
}
/*
+ * Iterate the EFI memory map in reverse order because the regions
+ * will be mapped top-down. The end result is the same as if we had
+ * mapped things forward, but doesn't require us to change the
+ * existing implementation of efi_map_region().
+ */
+static inline void *efi_map_next_entry_reverse(void *entry)
+{
+ /* Initial call */
+ if (!entry)
+ return memmap.map_end - memmap.desc_size;
+
+ entry -= memmap.desc_size;
+ if (entry < memmap.map)
+ return NULL;
+
+ return entry;
+}
+
+/*
+ * efi_map_next_entry - Return the next EFI memory map descriptor
+ * @entry: Previous EFI memory map descriptor
+ *
+ * This is a helper function to iterate over the EFI memory map, which
+ * we do in different orders depending on the current configuration.
+ *
+ * To begin traversing the memory map @entry must be %NULL.
+ *
+ * Returns %NULL when we reach the end of the memory map.
+ */
+static void *efi_map_next_entry(void *entry)
+{
+ if (!efi_enabled(EFI_OLD_MEMMAP) && efi_enabled(EFI_64BIT)) {
+ /*
+ * Starting in UEFI v2.5 the EFI_PROPERTIES_TABLE
+ * config table feature requires us to map all entries
+ * in the same order as they appear in the EFI memory
+ * map. That is to say, entry N must have a lower
+ * virtual address than entry N+1. This is because the
+ * firmware toolchain leaves relative references in
+ * the code/data sections, which are split and become
+ * separate EFI memory regions. Mapping things
+ * out-of-order leads to the firmware accessing
+ * unmapped addresses.
+ *
+ * Since we need to map things this way whether or not
+ * the kernel actually makes use of
+ * EFI_PROPERTIES_TABLE, let's just switch to this
+ * scheme by default for 64-bit.
+ */
+ return efi_map_next_entry_reverse(entry);
+ }
+
+ /* Initial call */
+ if (!entry)
+ return memmap.map;
+
+ entry += memmap.desc_size;
+ if (entry >= memmap.map_end)
+ return NULL;
+
+ return entry;
+}
+
+/*
* Map the efi memory ranges of the runtime services and update new_mmap with
* virtual addresses.
*/
@@ -714,7 +778,8 @@ static void * __init efi_map_regions(int *count, int *pg_shift)
unsigned long left = 0;
efi_memory_desc_t *md;
- for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
+ p = NULL;
+ while ((p = efi_map_next_entry(p))) {
md = p;
if (!(md->attribute & EFI_MEMORY_RUNTIME)) {
#ifdef CONFIG_X86_64
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 11d6fb4e8..777ad2f03 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -33,6 +33,10 @@
#include <linux/memblock.h>
#include <linux/edd.h>
+#ifdef CONFIG_KEXEC_CORE
+#include <linux/kexec.h>
+#endif
+
#include <xen/xen.h>
#include <xen/events.h>
#include <xen/interface/xen.h>
@@ -1800,6 +1804,21 @@ static struct notifier_block xen_hvm_cpu_notifier = {
.notifier_call = xen_hvm_cpu_notify,
};
+#ifdef CONFIG_KEXEC_CORE
+static void xen_hvm_shutdown(void)
+{
+ native_machine_shutdown();
+ if (kexec_in_progress)
+ xen_reboot(SHUTDOWN_soft_reset);
+}
+
+static void xen_hvm_crash_shutdown(struct pt_regs *regs)
+{
+ native_machine_crash_shutdown(regs);
+ xen_reboot(SHUTDOWN_soft_reset);
+}
+#endif
+
static void __init xen_hvm_guest_init(void)
{
if (xen_pv_domain())
@@ -1819,6 +1838,10 @@ static void __init xen_hvm_guest_init(void)
x86_init.irqs.intr_init = xen_init_IRQ;
xen_hvm_init_time_ops();
xen_hvm_init_mmu_ops();
+#ifdef CONFIG_KEXEC_CORE
+ machine_ops.shutdown = xen_hvm_shutdown;
+ machine_ops.crash_shutdown = xen_hvm_crash_shutdown;
+#endif
}
#endif