summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arc/include/asm/pgtable.h2
-rw-r--r--arch/arc/mm/dma.c3
-rw-r--r--arch/arm/boot/dts/arm-realview-pbx-a9.dts9
-rw-r--r--arch/arm/boot/dts/sun4i-a10-a1000.dts1
-rw-r--r--arch/arm/boot/dts/sun4i-a10-hackberry.dts1
-rw-r--r--arch/arm/boot/dts/sun4i-a10-jesurun-q5.dts1
-rw-r--r--arch/arm/boot/dts/sun5i-a10s-wobo-i5.dts1
-rw-r--r--arch/arm/boot/dts/tegra124-jetson-tk1.dts4
-rw-r--r--arch/arm/configs/aspeed_g4_defconfig2
-rw-r--r--arch/arm/configs/aspeed_g5_defconfig2
-rw-r--r--arch/arm/mm/dma-mapping.c62
-rw-r--r--arch/arm64/Kconfig2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368.dtsi2
-rw-r--r--arch/arm64/kernel/debug-monitors.c1
-rw-r--r--arch/arm64/kernel/hibernate.c82
-rw-r--r--arch/arm64/kernel/smp.c10
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S4
-rw-r--r--arch/arm64/kvm/hyp/sysreg-sr.c8
-rw-r--r--arch/arm64/kvm/inject_fault.c12
-rw-r--r--arch/arm64/mm/mmu.c8
-rw-r--r--arch/arm64/mm/proc.S2
-rw-r--r--arch/metag/include/asm/cmpxchg_lnkget.h2
-rw-r--r--arch/mips/kernel/cevt-r4k.c7
-rw-r--r--arch/mips/kernel/csrc-r4k.c4
-rw-r--r--arch/mips/kvm/emulate.c40
-rw-r--r--arch/mips/kvm/tlb.c66
-rw-r--r--arch/mips/loongson64/loongson-3/hpet.c14
-rw-r--r--arch/mips/mm/uasm-mips.c2
-rw-r--r--arch/powerpc/kernel/tm.S3
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S462
-rw-r--r--arch/s390/include/asm/pgtable.h2
-rw-r--r--arch/s390/include/asm/tlbflush.h3
-rw-r--r--arch/s390/mm/gmap.c4
-rw-r--r--arch/s390/mm/pgtable.c2
-rw-r--r--arch/um/os-Linux/signal.c5
-rw-r--r--arch/unicore32/include/asm/mmu_context.h2
-rw-r--r--arch/x86/events/intel/uncore_snbep.c10
-rw-r--r--arch/x86/kvm/mtrr.c1
-rw-r--r--arch/x86/kvm/vmx.c20
-rw-r--r--arch/x86/kvm/x86.c45
-rw-r--r--arch/x86/pci/intel_mid_pci.c12
41 files changed, 541 insertions, 384 deletions
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index 858f98ef7..0f92d9743 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -110,7 +110,7 @@
#define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE)
/* Set of bits not changed in pte_modify */
-#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL)
/* More Abbrevaited helpers */
#define PAGE_U_NONE __pgprot(___DEF)
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index 73d7e4c75..ab74b5d91 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -92,7 +92,8 @@ static void *arc_dma_alloc(struct device *dev, size_t size,
static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, struct dma_attrs *attrs)
{
- struct page *page = virt_to_page(dma_handle);
+ phys_addr_t paddr = plat_dma_to_phys(dev, dma_handle);
+ struct page *page = virt_to_page(paddr);
int is_non_coh = 1;
is_non_coh = dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs) ||
diff --git a/arch/arm/boot/dts/arm-realview-pbx-a9.dts b/arch/arm/boot/dts/arm-realview-pbx-a9.dts
index db808f92d..90d00b407 100644
--- a/arch/arm/boot/dts/arm-realview-pbx-a9.dts
+++ b/arch/arm/boot/dts/arm-realview-pbx-a9.dts
@@ -70,13 +70,12 @@
* associativity as these may be erroneously set
* up by boot loader(s).
*/
- cache-size = <1048576>; // 1MB
- cache-sets = <4096>;
+ cache-size = <131072>; // 128KB
+ cache-sets = <512>;
cache-line-size = <32>;
arm,parity-disable;
- arm,tag-latency = <1>;
- arm,data-latency = <1 1>;
- arm,dirty-latency = <1>;
+ arm,tag-latency = <1 1 1>;
+ arm,data-latency = <1 1 1>;
};
scu: scu@1f000000 {
diff --git a/arch/arm/boot/dts/sun4i-a10-a1000.dts b/arch/arm/boot/dts/sun4i-a10-a1000.dts
index c92a1ae33..fa70b8fbf 100644
--- a/arch/arm/boot/dts/sun4i-a10-a1000.dts
+++ b/arch/arm/boot/dts/sun4i-a10-a1000.dts
@@ -84,6 +84,7 @@
regulator-name = "emac-3v3";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
+ startup-delay-us = <20000>;
enable-active-high;
gpio = <&pio 7 15 GPIO_ACTIVE_HIGH>;
};
diff --git a/arch/arm/boot/dts/sun4i-a10-hackberry.dts b/arch/arm/boot/dts/sun4i-a10-hackberry.dts
index 2b17c5199..6de83a618 100644
--- a/arch/arm/boot/dts/sun4i-a10-hackberry.dts
+++ b/arch/arm/boot/dts/sun4i-a10-hackberry.dts
@@ -66,6 +66,7 @@
regulator-name = "emac-3v3";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
+ startup-delay-us = <20000>;
enable-active-high;
gpio = <&pio 7 19 GPIO_ACTIVE_HIGH>;
};
diff --git a/arch/arm/boot/dts/sun4i-a10-jesurun-q5.dts b/arch/arm/boot/dts/sun4i-a10-jesurun-q5.dts
index 7afc7a64e..e28f080b1 100644
--- a/arch/arm/boot/dts/sun4i-a10-jesurun-q5.dts
+++ b/arch/arm/boot/dts/sun4i-a10-jesurun-q5.dts
@@ -80,6 +80,7 @@
regulator-name = "emac-3v3";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
+ startup-delay-us = <20000>;
enable-active-high;
gpio = <&pio 7 19 GPIO_ACTIVE_HIGH>; /* PH19 */
};
diff --git a/arch/arm/boot/dts/sun5i-a10s-wobo-i5.dts b/arch/arm/boot/dts/sun5i-a10s-wobo-i5.dts
index 9fea918f9..39731a78f 100644
--- a/arch/arm/boot/dts/sun5i-a10s-wobo-i5.dts
+++ b/arch/arm/boot/dts/sun5i-a10s-wobo-i5.dts
@@ -79,6 +79,7 @@
regulator-name = "emac-3v3";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
+ startup-delay-us = <20000>;
enable-active-high;
gpio = <&pio 0 2 GPIO_ACTIVE_HIGH>;
};
diff --git a/arch/arm/boot/dts/tegra124-jetson-tk1.dts b/arch/arm/boot/dts/tegra124-jetson-tk1.dts
index 941f36263..f4d8125c1 100644
--- a/arch/arm/boot/dts/tegra124-jetson-tk1.dts
+++ b/arch/arm/boot/dts/tegra124-jetson-tk1.dts
@@ -1386,7 +1386,7 @@
* Pin 41: BR_UART1_TXD
* Pin 44: BR_UART1_RXD
*/
- serial@70006000 {
+ serial@0,70006000 {
compatible = "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart";
status = "okay";
};
@@ -1398,7 +1398,7 @@
* Pin 71: UART2_CTS_L
* Pin 74: UART2_RTS_L
*/
- serial@70006040 {
+ serial@0,70006040 {
compatible = "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart";
status = "okay";
};
diff --git a/arch/arm/configs/aspeed_g4_defconfig b/arch/arm/configs/aspeed_g4_defconfig
index b6e54ee9b..ca39c04fe 100644
--- a/arch/arm/configs/aspeed_g4_defconfig
+++ b/arch/arm/configs/aspeed_g4_defconfig
@@ -58,7 +58,7 @@ CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_FIRMWARE_MEMMAP=y
CONFIG_FANOTIFY=y
-CONFIG_PRINTK_TIME=1
+CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
CONFIG_STRIP_ASM_SYMS=y
CONFIG_PAGE_POISONING=y
diff --git a/arch/arm/configs/aspeed_g5_defconfig b/arch/arm/configs/aspeed_g5_defconfig
index 892605167..4f366b037 100644
--- a/arch/arm/configs/aspeed_g5_defconfig
+++ b/arch/arm/configs/aspeed_g5_defconfig
@@ -59,7 +59,7 @@ CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_FIRMWARE_MEMMAP=y
CONFIG_FANOTIFY=y
-CONFIG_PRINTK_TIME=1
+CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
CONFIG_STRIP_ASM_SYMS=y
CONFIG_PAGE_POISONING=y
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index ff7ed5697..d2485c749 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -49,6 +49,7 @@ struct arm_dma_alloc_args {
pgprot_t prot;
const void *caller;
bool want_vaddr;
+ int coherent_flag;
};
struct arm_dma_free_args {
@@ -59,6 +60,9 @@ struct arm_dma_free_args {
bool want_vaddr;
};
+#define NORMAL 0
+#define COHERENT 1
+
struct arm_dma_allocator {
void *(*alloc)(struct arm_dma_alloc_args *args,
struct page **ret_page);
@@ -272,7 +276,7 @@ static u64 get_coherent_dma_mask(struct device *dev)
return mask;
}
-static void __dma_clear_buffer(struct page *page, size_t size)
+static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag)
{
/*
* Ensure that the allocated pages are zeroed, and that any data
@@ -284,17 +288,21 @@ static void __dma_clear_buffer(struct page *page, size_t size)
while (size > 0) {
void *ptr = kmap_atomic(page);
memset(ptr, 0, PAGE_SIZE);
- dmac_flush_range(ptr, ptr + PAGE_SIZE);
+ if (coherent_flag != COHERENT)
+ dmac_flush_range(ptr, ptr + PAGE_SIZE);
kunmap_atomic(ptr);
page++;
size -= PAGE_SIZE;
}
- outer_flush_range(base, end);
+ if (coherent_flag != COHERENT)
+ outer_flush_range(base, end);
} else {
void *ptr = page_address(page);
memset(ptr, 0, size);
- dmac_flush_range(ptr, ptr + size);
- outer_flush_range(__pa(ptr), __pa(ptr) + size);
+ if (coherent_flag != COHERENT) {
+ dmac_flush_range(ptr, ptr + size);
+ outer_flush_range(__pa(ptr), __pa(ptr) + size);
+ }
}
}
@@ -302,7 +310,8 @@ static void __dma_clear_buffer(struct page *page, size_t size)
* Allocate a DMA buffer for 'dev' of size 'size' using the
* specified gfp mask. Note that 'size' must be page aligned.
*/
-static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
+static struct page *__dma_alloc_buffer(struct device *dev, size_t size,
+ gfp_t gfp, int coherent_flag)
{
unsigned long order = get_order(size);
struct page *page, *p, *e;
@@ -318,7 +327,7 @@ static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gf
for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
__free_page(p);
- __dma_clear_buffer(page, size);
+ __dma_clear_buffer(page, size, coherent_flag);
return page;
}
@@ -340,7 +349,8 @@ static void __dma_free_buffer(struct page *page, size_t size)
static void *__alloc_from_contiguous(struct device *dev, size_t size,
pgprot_t prot, struct page **ret_page,
- const void *caller, bool want_vaddr);
+ const void *caller, bool want_vaddr,
+ int coherent_flag);
static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
pgprot_t prot, struct page **ret_page,
@@ -405,10 +415,13 @@ static int __init atomic_pool_init(void)
atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
if (!atomic_pool)
goto out;
-
+ /*
+ * The atomic pool is only used for non-coherent allocations
+ * so we must pass NORMAL for coherent_flag.
+ */
if (dev_get_cma_area(NULL))
ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot,
- &page, atomic_pool_init, true);
+ &page, atomic_pool_init, true, NORMAL);
else
ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot,
&page, atomic_pool_init, true);
@@ -522,7 +535,11 @@ static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
{
struct page *page;
void *ptr = NULL;
- page = __dma_alloc_buffer(dev, size, gfp);
+ /*
+ * __alloc_remap_buffer is only called when the device is
+ * non-coherent
+ */
+ page = __dma_alloc_buffer(dev, size, gfp, NORMAL);
if (!page)
return NULL;
if (!want_vaddr)
@@ -577,7 +594,8 @@ static int __free_from_pool(void *start, size_t size)
static void *__alloc_from_contiguous(struct device *dev, size_t size,
pgprot_t prot, struct page **ret_page,
- const void *caller, bool want_vaddr)
+ const void *caller, bool want_vaddr,
+ int coherent_flag)
{
unsigned long order = get_order(size);
size_t count = size >> PAGE_SHIFT;
@@ -588,7 +606,7 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size,
if (!page)
return NULL;
- __dma_clear_buffer(page, size);
+ __dma_clear_buffer(page, size, coherent_flag);
if (!want_vaddr)
goto out;
@@ -638,7 +656,7 @@ static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
#define __get_dma_pgprot(attrs, prot) __pgprot(0)
#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c, wv) NULL
#define __alloc_from_pool(size, ret_page) NULL
-#define __alloc_from_contiguous(dev, size, prot, ret, c, wv) NULL
+#define __alloc_from_contiguous(dev, size, prot, ret, c, wv, coherent_flag) NULL
#define __free_from_pool(cpu_addr, size) do { } while (0)
#define __free_from_contiguous(dev, page, cpu_addr, size, wv) do { } while (0)
#define __dma_free_remap(cpu_addr, size) do { } while (0)
@@ -649,7 +667,8 @@ static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
struct page **ret_page)
{
struct page *page;
- page = __dma_alloc_buffer(dev, size, gfp);
+ /* __alloc_simple_buffer is only called when the device is coherent */
+ page = __dma_alloc_buffer(dev, size, gfp, COHERENT);
if (!page)
return NULL;
@@ -679,7 +698,7 @@ static void *cma_allocator_alloc(struct arm_dma_alloc_args *args,
{
return __alloc_from_contiguous(args->dev, args->size, args->prot,
ret_page, args->caller,
- args->want_vaddr);
+ args->want_vaddr, args->coherent_flag);
}
static void cma_allocator_free(struct arm_dma_free_args *args)
@@ -746,6 +765,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
.prot = prot,
.caller = caller,
.want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs),
+ .coherent_flag = is_coherent ? COHERENT : NORMAL,
};
#ifdef CONFIG_DMA_API_DEBUG
@@ -1253,7 +1273,8 @@ static inline void __free_iova(struct dma_iommu_mapping *mapping,
static const int iommu_order_array[] = { 9, 8, 4, 0 };
static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
- gfp_t gfp, struct dma_attrs *attrs)
+ gfp_t gfp, struct dma_attrs *attrs,
+ int coherent_flag)
{
struct page **pages;
int count = size >> PAGE_SHIFT;
@@ -1277,7 +1298,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
if (!page)
goto error;
- __dma_clear_buffer(page, size);
+ __dma_clear_buffer(page, size, coherent_flag);
for (i = 0; i < count; i++)
pages[i] = page + i;
@@ -1327,7 +1348,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
pages[i + j] = pages[i] + j;
}
- __dma_clear_buffer(pages[i], PAGE_SIZE << order);
+ __dma_clear_buffer(pages[i], PAGE_SIZE << order, coherent_flag);
i += 1 << order;
count -= 1 << order;
}
@@ -1505,7 +1526,8 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
*/
gfp &= ~(__GFP_COMP);
- pages = __iommu_alloc_buffer(dev, size, gfp, attrs);
+ /* For now always consider we are in a non-coherent case */
+ pages = __iommu_alloc_buffer(dev, size, gfp, attrs, NORMAL);
if (!pages)
return NULL;
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 5a0a691d4..20384925b 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -872,7 +872,7 @@ config RELOCATABLE
config RANDOMIZE_BASE
bool "Randomize the address of the kernel image"
- select ARM64_MODULE_PLTS
+ select ARM64_MODULE_PLTS if MODULES
select RELOCATABLE
help
Randomizes the virtual address at which the kernel image is
diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
index 8b4a7c915..080203e3a 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
@@ -670,7 +670,7 @@
#address-cells = <0>;
reg = <0x0 0xffb71000 0x0 0x1000>,
- <0x0 0xffb72000 0x0 0x1000>,
+ <0x0 0xffb72000 0x0 0x2000>,
<0x0 0xffb74000 0x0 0x2000>,
<0x0 0xffb76000 0x0 0x2000>;
interrupts = <GIC_PPI 9
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 4fbf3c542..0800d23e2 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -151,7 +151,6 @@ static int debug_monitors_init(void)
/* Clear the OS lock. */
on_each_cpu(clear_os_lock, NULL, 1);
isb();
- local_dbg_enable();
/* Register hotplug handler. */
__register_cpu_notifier(&os_lock_nb);
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index 21ab5df9f..65d81f965 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -35,6 +35,7 @@
#include <asm/sections.h>
#include <asm/smp.h>
#include <asm/suspend.h>
+#include <asm/sysreg.h>
#include <asm/virt.h>
/*
@@ -217,12 +218,22 @@ static int create_safe_exec_page(void *src_start, size_t length,
set_pte(pte, __pte(virt_to_phys((void *)dst) |
pgprot_val(PAGE_KERNEL_EXEC)));
- /* Load our new page tables */
- asm volatile("msr ttbr0_el1, %0;"
- "isb;"
- "tlbi vmalle1is;"
- "dsb ish;"
- "isb" : : "r"(virt_to_phys(pgd)));
+ /*
+ * Load our new page tables. A strict BBM approach requires that we
+ * ensure that TLBs are free of any entries that may overlap with the
+ * global mappings we are about to install.
+ *
+ * For a real hibernate/resume cycle TTBR0 currently points to a zero
+ * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI
+ * runtime services), while for a userspace-driven test_resume cycle it
+ * points to userspace page tables (and we must point it at a zero page
+ * ourselves). Elsewhere we only (un)install the idmap with preemption
+ * disabled, so T0SZ should be as required regardless.
+ */
+ cpu_set_reserved_ttbr0();
+ local_flush_tlb_all();
+ write_sysreg(virt_to_phys(pgd), ttbr0_el1);
+ isb();
*phys_dst_addr = virt_to_phys((void *)dst);
@@ -394,6 +405,38 @@ int swsusp_arch_resume(void)
void *, phys_addr_t, phys_addr_t);
/*
+ * Restoring the memory image will overwrite the ttbr1 page tables.
+ * Create a second copy of just the linear map, and use this when
+ * restoring.
+ */
+ tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
+ if (!tmp_pg_dir) {
+ pr_err("Failed to allocate memory for temporary page tables.");
+ rc = -ENOMEM;
+ goto out;
+ }
+ rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0);
+ if (rc)
+ goto out;
+
+ /*
+ * Since we only copied the linear map, we need to find restore_pblist's
+ * linear map address.
+ */
+ lm_restore_pblist = LMADDR(restore_pblist);
+
+ /*
+ * We need a zero page that is zero before & after resume in order to
+ * to break before make on the ttbr1 page tables.
+ */
+ zero_page = (void *)get_safe_page(GFP_ATOMIC);
+ if (!zero_page) {
+ pr_err("Failed to allocate zero page.");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /*
* Locate the exit code in the bottom-but-one page, so that *NULL
* still has disastrous affects.
*/
@@ -419,27 +462,6 @@ int swsusp_arch_resume(void)
__flush_dcache_area(hibernate_exit, exit_size);
/*
- * Restoring the memory image will overwrite the ttbr1 page tables.
- * Create a second copy of just the linear map, and use this when
- * restoring.
- */
- tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
- if (!tmp_pg_dir) {
- pr_err("Failed to allocate memory for temporary page tables.");
- rc = -ENOMEM;
- goto out;
- }
- rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0);
- if (rc)
- goto out;
-
- /*
- * Since we only copied the linear map, we need to find restore_pblist's
- * linear map address.
- */
- lm_restore_pblist = LMADDR(restore_pblist);
-
- /*
* KASLR will cause the el2 vectors to be in a different location in
* the resumed kernel. Load hibernate's temporary copy into el2.
*
@@ -453,12 +475,6 @@ int swsusp_arch_resume(void)
__hyp_set_vectors(el2_vectors);
}
- /*
- * We need a zero page that is zero before & after resume in order to
- * to break before make on the ttbr1 page tables.
- */
- zero_page = (void *)get_safe_page(GFP_ATOMIC);
-
hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1,
resume_hdr.reenter_kernel, lm_restore_pblist,
resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page));
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 62ff3c062..490db85de 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -267,7 +267,6 @@ asmlinkage void secondary_start_kernel(void)
set_cpu_online(cpu, true);
complete(&cpu_running);
- local_dbg_enable();
local_irq_enable();
local_async_enable();
@@ -437,9 +436,9 @@ void __init smp_cpus_done(unsigned int max_cpus)
void __init smp_prepare_boot_cpu(void)
{
+ set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
cpuinfo_store_boot_cpu();
save_boot_cpu_run_el();
- set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
}
static u64 __init of_get_cpu_mpidr(struct device_node *dn)
@@ -694,6 +693,13 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
smp_store_cpu_info(smp_processor_id());
/*
+ * If UP is mandated by "nosmp" (which implies "maxcpus=0"), don't set
+ * secondary CPUs present.
+ */
+ if (max_cpus == 0)
+ return;
+
+ /*
* Initialise the present map (which describes the set of CPUs
* actually populated at the present time) and release the
* secondaries from the bootloader.
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 435e820e8..e564d4595 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -181,9 +181,9 @@ SECTIONS
*(.hash)
}
- __rela_offset = ADDR(.rela) - KIMAGE_VADDR;
+ __rela_offset = ABSOLUTE(ADDR(.rela) - KIMAGE_VADDR);
__rela_size = SIZEOF(.rela);
- __dynsym_offset = ADDR(.dynsym) - KIMAGE_VADDR;
+ __dynsym_offset = ABSOLUTE(ADDR(.dynsym) - KIMAGE_VADDR);
. = ALIGN(SEGMENT_ALIGN);
__init_end = .;
diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
index 0f7c40eb3..934137647 100644
--- a/arch/arm64/kvm/hyp/sysreg-sr.c
+++ b/arch/arm64/kvm/hyp/sysreg-sr.c
@@ -27,8 +27,8 @@ static void __hyp_text __sysreg_do_nothing(struct kvm_cpu_context *ctxt) { }
/*
* Non-VHE: Both host and guest must save everything.
*
- * VHE: Host must save tpidr*_el[01], actlr_el1, sp0, pc, pstate, and
- * guest must save everything.
+ * VHE: Host must save tpidr*_el[01], actlr_el1, mdscr_el1, sp0, pc,
+ * pstate, and guest must save everything.
*/
static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
@@ -37,6 +37,7 @@ static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
ctxt->sys_regs[TPIDR_EL0] = read_sysreg(tpidr_el0);
ctxt->sys_regs[TPIDRRO_EL0] = read_sysreg(tpidrro_el0);
ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1);
+ ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1);
ctxt->gp_regs.regs.sp = read_sysreg(sp_el0);
ctxt->gp_regs.regs.pc = read_sysreg_el2(elr);
ctxt->gp_regs.regs.pstate = read_sysreg_el2(spsr);
@@ -61,7 +62,6 @@ static void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt)
ctxt->sys_regs[AMAIR_EL1] = read_sysreg_el1(amair);
ctxt->sys_regs[CNTKCTL_EL1] = read_sysreg_el1(cntkctl);
ctxt->sys_regs[PAR_EL1] = read_sysreg(par_el1);
- ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1);
ctxt->gp_regs.sp_el1 = read_sysreg(sp_el1);
ctxt->gp_regs.elr_el1 = read_sysreg_el1(elr);
@@ -90,6 +90,7 @@ static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctx
write_sysreg(ctxt->sys_regs[TPIDR_EL0], tpidr_el0);
write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0);
write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1);
+ write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1);
write_sysreg(ctxt->gp_regs.regs.sp, sp_el0);
write_sysreg_el2(ctxt->gp_regs.regs.pc, elr);
write_sysreg_el2(ctxt->gp_regs.regs.pstate, spsr);
@@ -114,7 +115,6 @@ static void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt)
write_sysreg_el1(ctxt->sys_regs[AMAIR_EL1], amair);
write_sysreg_el1(ctxt->sys_regs[CNTKCTL_EL1], cntkctl);
write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1);
- write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1);
write_sysreg(ctxt->gp_regs.sp_el1, sp_el1);
write_sysreg_el1(ctxt->gp_regs.elr_el1, elr);
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index e9e0e6db7..898c0e6ae 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -132,16 +132,14 @@ static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
{
unsigned long cpsr = *vcpu_cpsr(vcpu);
- bool is_aarch32;
+ bool is_aarch32 = vcpu_mode_is_32bit(vcpu);
u32 esr = 0;
- is_aarch32 = vcpu_mode_is_32bit(vcpu);
-
- *vcpu_spsr(vcpu) = cpsr;
*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
-
*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
+
*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
+ *vcpu_spsr(vcpu) = cpsr;
vcpu_sys_reg(vcpu, FAR_EL1) = addr;
@@ -172,11 +170,11 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
unsigned long cpsr = *vcpu_cpsr(vcpu);
u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
- *vcpu_spsr(vcpu) = cpsr;
*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
-
*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
+
*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
+ *vcpu_spsr(vcpu) = cpsr;
/*
* Build an unknown exception, depending on the instruction
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 0f85a46c3..3e90a2cad 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -748,9 +748,9 @@ void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
/*
* Check whether the physical FDT address is set and meets the minimum
* alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
- * at least 8 bytes so that we can always access the size field of the
- * FDT header after mapping the first chunk, double check here if that
- * is indeed the case.
+ * at least 8 bytes so that we can always access the magic and size
+ * fields of the FDT header after mapping the first chunk, double check
+ * here if that is indeed the case.
*/
BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
@@ -778,7 +778,7 @@ void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE),
dt_virt_base, SWAPPER_BLOCK_SIZE, prot);
- if (fdt_check_header(dt_virt) != 0)
+ if (fdt_magic(dt_virt) != FDT_MAGIC)
return NULL;
*size = fdt_totalsize(dt_virt);
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index c4317879b..5bb61de23 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -180,6 +180,8 @@ ENTRY(__cpu_setup)
msr cpacr_el1, x0 // Enable FP/ASIMD
mov x0, #1 << 12 // Reset mdscr_el1 and disable
msr mdscr_el1, x0 // access to the DCC from EL0
+ isb // Unmask debug exceptions now,
+ enable_dbg // since this is per-cpu
reset_pmuserenr_el0 x0 // Disable PMU access from EL0
/*
* Memory region attributes for LPAE:
diff --git a/arch/metag/include/asm/cmpxchg_lnkget.h b/arch/metag/include/asm/cmpxchg_lnkget.h
index 0154e2807..2369ad394 100644
--- a/arch/metag/include/asm/cmpxchg_lnkget.h
+++ b/arch/metag/include/asm/cmpxchg_lnkget.h
@@ -73,7 +73,7 @@ static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old,
" DCACHE [%2], %0\n"
#endif
"2:\n"
- : "=&d" (temp), "=&da" (retval)
+ : "=&d" (temp), "=&d" (retval)
: "da" (m), "bd" (old), "da" (new)
: "cc"
);
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index e4c21bbf9..804d2a2a1 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -276,12 +276,7 @@ int r4k_clockevent_init(void)
CLOCK_EVT_FEAT_C3STOP |
CLOCK_EVT_FEAT_PERCPU;
- clockevent_set_clock(cd, mips_hpt_frequency);
-
- /* Calculate the min / max delta */
- cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
min_delta = calculate_min_delta();
- cd->min_delta_ns = clockevent_delta2ns(min_delta, cd);
cd->rating = 300;
cd->irq = irq;
@@ -289,7 +284,7 @@ int r4k_clockevent_init(void)
cd->set_next_event = mips_next_event;
cd->event_handler = mips_event_handler;
- clockevents_register_device(cd);
+ clockevents_config_and_register(cd, mips_hpt_frequency, min_delta, 0x7fffffff);
if (cp0_timer_irq_installed)
return 0;
diff --git a/arch/mips/kernel/csrc-r4k.c b/arch/mips/kernel/csrc-r4k.c
index 1f910563f..d76275da5 100644
--- a/arch/mips/kernel/csrc-r4k.c
+++ b/arch/mips/kernel/csrc-r4k.c
@@ -23,7 +23,7 @@ static struct clocksource clocksource_mips = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
-static u64 notrace r4k_read_sched_clock(void)
+static u64 __maybe_unused notrace r4k_read_sched_clock(void)
{
return read_c0_count();
}
@@ -82,7 +82,9 @@ int __init init_r4k_clocksource(void)
clocksource_register_hz(&clocksource_mips, mips_hpt_frequency);
+#ifndef CONFIG_CPU_FREQ
sched_clock_register(r4k_read_sched_clock, 32, mips_hpt_frequency);
+#endif
return 0;
}
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index 645c8a198..2b42a74ed 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -1615,8 +1615,14 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
preempt_disable();
if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
- if (kvm_mips_host_tlb_lookup(vcpu, va) < 0)
- kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
+ if (kvm_mips_host_tlb_lookup(vcpu, va) < 0 &&
+ kvm_mips_handle_kseg0_tlb_fault(va, vcpu)) {
+ kvm_err("%s: handling mapped kseg0 tlb fault for %lx, vcpu: %p, ASID: %#lx\n",
+ __func__, va, vcpu, read_c0_entryhi());
+ er = EMULATE_FAIL;
+ preempt_enable();
+ goto done;
+ }
} else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
int index;
@@ -1654,14 +1660,19 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
run, vcpu);
preempt_enable();
goto dont_update_pc;
- } else {
- /*
- * We fault an entry from the guest tlb to the
- * shadow host TLB
- */
- kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
- NULL,
- NULL);
+ }
+ /*
+ * We fault an entry from the guest tlb to the
+ * shadow host TLB
+ */
+ if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
+ NULL, NULL)) {
+ kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
+ __func__, va, index, vcpu,
+ read_c0_entryhi());
+ er = EMULATE_FAIL;
+ preempt_enable();
+ goto done;
}
}
} else {
@@ -2625,8 +2636,13 @@ enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
* OK we have a Guest TLB entry, now inject it into the
* shadow host TLB
*/
- kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,
- NULL);
+ if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
+ NULL, NULL)) {
+ kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
+ __func__, va, index, vcpu,
+ read_c0_entryhi());
+ er = EMULATE_FAIL;
+ }
}
}
diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c
index ed021ae78..ad2270ff8 100644
--- a/arch/mips/kvm/tlb.c
+++ b/arch/mips/kvm/tlb.c
@@ -284,7 +284,7 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
}
gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
- if (gfn >= kvm->arch.guest_pmap_npages) {
+ if ((gfn | 1) >= kvm->arch.guest_pmap_npages) {
kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
gfn, badvaddr);
kvm_mips_dump_host_tlbs();
@@ -373,26 +373,40 @@ int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
struct kvm *kvm = vcpu->kvm;
kvm_pfn_t pfn0, pfn1;
+ gfn_t gfn0, gfn1;
+ long tlb_lo[2];
int ret;
- if ((tlb->tlb_hi & VPN2_MASK) == 0) {
- pfn0 = 0;
- pfn1 = 0;
- } else {
- if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
- >> PAGE_SHIFT) < 0)
- return -1;
-
- if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
- >> PAGE_SHIFT) < 0)
- return -1;
-
- pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
- >> PAGE_SHIFT];
- pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
- >> PAGE_SHIFT];
+ tlb_lo[0] = tlb->tlb_lo0;
+ tlb_lo[1] = tlb->tlb_lo1;
+
+ /*
+ * The commpage address must not be mapped to anything else if the guest
+ * TLB contains entries nearby, or commpage accesses will break.
+ */
+ if (!((tlb->tlb_hi ^ KVM_GUEST_COMMPAGE_ADDR) &
+ VPN2_MASK & (PAGE_MASK << 1)))
+ tlb_lo[(KVM_GUEST_COMMPAGE_ADDR >> PAGE_SHIFT) & 1] = 0;
+
+ gfn0 = mips3_tlbpfn_to_paddr(tlb_lo[0]) >> PAGE_SHIFT;
+ gfn1 = mips3_tlbpfn_to_paddr(tlb_lo[1]) >> PAGE_SHIFT;
+ if (gfn0 >= kvm->arch.guest_pmap_npages ||
+ gfn1 >= kvm->arch.guest_pmap_npages) {
+ kvm_err("%s: Invalid gfn: [%#llx, %#llx], EHi: %#lx\n",
+ __func__, gfn0, gfn1, tlb->tlb_hi);
+ kvm_mips_dump_guest_tlbs(vcpu);
+ return -1;
}
+ if (kvm_mips_map_page(kvm, gfn0) < 0)
+ return -1;
+
+ if (kvm_mips_map_page(kvm, gfn1) < 0)
+ return -1;
+
+ pfn0 = kvm->arch.guest_pmap[gfn0];
+ pfn1 = kvm->arch.guest_pmap[gfn1];
+
if (hpa0)
*hpa0 = pfn0 << PAGE_SHIFT;
@@ -401,9 +415,9 @@ int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
/* Get attributes from the Guest TLB */
entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
- (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
+ (tlb_lo[0] & MIPS3_PG_D) | (tlb_lo[0] & MIPS3_PG_V);
entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
- (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
+ (tlb_lo[1] & MIPS3_PG_D) | (tlb_lo[1] & MIPS3_PG_V);
kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
tlb->tlb_lo0, tlb->tlb_lo1);
@@ -776,10 +790,16 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
local_irq_restore(flags);
return KVM_INVALID_INST;
}
- kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
- &vcpu->arch.
- guest_tlb[index],
- NULL, NULL);
+ if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
+ &vcpu->arch.guest_tlb[index],
+ NULL, NULL)) {
+ kvm_err("%s: handling mapped seg tlb fault failed for %p, index: %u, vcpu: %p, ASID: %#lx\n",
+ __func__, opc, index, vcpu,
+ read_c0_entryhi());
+ kvm_mips_dump_guest_tlbs(vcpu);
+ local_irq_restore(flags);
+ return KVM_INVALID_INST;
+ }
inst = *(opc);
}
local_irq_restore(flags);
diff --git a/arch/mips/loongson64/loongson-3/hpet.c b/arch/mips/loongson64/loongson-3/hpet.c
index 249039af6..4788bea62 100644
--- a/arch/mips/loongson64/loongson-3/hpet.c
+++ b/arch/mips/loongson64/loongson-3/hpet.c
@@ -13,8 +13,8 @@
#define SMBUS_PCI_REG64 0x64
#define SMBUS_PCI_REGB4 0xb4
-#define HPET_MIN_CYCLES 64
-#define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1))
+#define HPET_MIN_CYCLES 16
+#define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES * 12)
static DEFINE_SPINLOCK(hpet_lock);
DEFINE_PER_CPU(struct clock_event_device, hpet_clockevent_device);
@@ -157,14 +157,14 @@ static int hpet_tick_resume(struct clock_event_device *evt)
static int hpet_next_event(unsigned long delta,
struct clock_event_device *evt)
{
- unsigned int cnt;
- int res;
+ u32 cnt;
+ s32 res;
cnt = hpet_read(HPET_COUNTER);
- cnt += delta;
+ cnt += (u32) delta;
hpet_write(HPET_T0_CMP, cnt);
- res = (int)(cnt - hpet_read(HPET_COUNTER));
+ res = (s32)(cnt - hpet_read(HPET_COUNTER));
return res < HPET_MIN_CYCLES ? -ETIME : 0;
}
@@ -230,7 +230,7 @@ void __init setup_hpet_timer(void)
cd = &per_cpu(hpet_clockevent_device, cpu);
cd->name = "hpet";
- cd->rating = 320;
+ cd->rating = 100;
cd->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
cd->set_state_shutdown = hpet_set_state_shutdown;
cd->set_state_periodic = hpet_set_state_periodic;
diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c
index 9c2220a45..45e3b8799 100644
--- a/arch/mips/mm/uasm-mips.c
+++ b/arch/mips/mm/uasm-mips.c
@@ -65,7 +65,7 @@ static struct insn insn_table[] = {
#ifndef CONFIG_CPU_MIPSR6
{ insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
#else
- { insn_cache, M6(cache_op, 0, 0, 0, cache6_op), RS | RT | SIMM9 },
+ { insn_cache, M6(spec3_op, 0, 0, 0, cache6_op), RS | RT | SIMM9 },
#endif
{ insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
index b7019b559..298afcf3b 100644
--- a/arch/powerpc/kernel/tm.S
+++ b/arch/powerpc/kernel/tm.S
@@ -338,8 +338,6 @@ _GLOBAL(__tm_recheckpoint)
*/
subi r7, r7, STACK_FRAME_OVERHEAD
- SET_SCRATCH0(r1)
-
mfmsr r6
/* R4 = original MSR to indicate whether thread used FP/Vector etc. */
@@ -468,6 +466,7 @@ restore_gprs:
* until we turn MSR RI back on.
*/
+ SET_SCRATCH0(r1)
ld r5, -8(r1)
ld r1, -16(r1)
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index e571ad277..38e108eae 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -655,112 +655,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
BEGIN_FTR_SECTION
- b skip_tm
-END_FTR_SECTION_IFCLR(CPU_FTR_TM)
-
- /* Turn on TM/FP/VSX/VMX so we can restore them. */
- mfmsr r5
- li r6, MSR_TM >> 32
- sldi r6, r6, 32
- or r5, r5, r6
- ori r5, r5, MSR_FP
- oris r5, r5, (MSR_VEC | MSR_VSX)@h
- mtmsrd r5
-
- /*
- * The user may change these outside of a transaction, so they must
- * always be context switched.
- */
- ld r5, VCPU_TFHAR(r4)
- ld r6, VCPU_TFIAR(r4)
- ld r7, VCPU_TEXASR(r4)
- mtspr SPRN_TFHAR, r5
- mtspr SPRN_TFIAR, r6
- mtspr SPRN_TEXASR, r7
-
- ld r5, VCPU_MSR(r4)
- rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
- beq skip_tm /* TM not active in guest */
-
- /* Make sure the failure summary is set, otherwise we'll program check
- * when we trechkpt. It's possible that this might have been not set
- * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
- * host.
- */
- oris r7, r7, (TEXASR_FS)@h
- mtspr SPRN_TEXASR, r7
-
- /*
- * We need to load up the checkpointed state for the guest.
- * We need to do this early as it will blow away any GPRs, VSRs and
- * some SPRs.
- */
-
- mr r31, r4
- addi r3, r31, VCPU_FPRS_TM
- bl load_fp_state
- addi r3, r31, VCPU_VRS_TM
- bl load_vr_state
- mr r4, r31
- lwz r7, VCPU_VRSAVE_TM(r4)
- mtspr SPRN_VRSAVE, r7
-
- ld r5, VCPU_LR_TM(r4)
- lwz r6, VCPU_CR_TM(r4)
- ld r7, VCPU_CTR_TM(r4)
- ld r8, VCPU_AMR_TM(r4)
- ld r9, VCPU_TAR_TM(r4)
- mtlr r5
- mtcr r6
- mtctr r7
- mtspr SPRN_AMR, r8
- mtspr SPRN_TAR, r9
-
- /*
- * Load up PPR and DSCR values but don't put them in the actual SPRs
- * till the last moment to avoid running with userspace PPR and DSCR for
- * too long.
- */
- ld r29, VCPU_DSCR_TM(r4)
- ld r30, VCPU_PPR_TM(r4)
-
- std r2, PACATMSCRATCH(r13) /* Save TOC */
-
- /* Clear the MSR RI since r1, r13 are all going to be foobar. */
- li r5, 0
- mtmsrd r5, 1
-
- /* Load GPRs r0-r28 */
- reg = 0
- .rept 29
- ld reg, VCPU_GPRS_TM(reg)(r31)
- reg = reg + 1
- .endr
-
- mtspr SPRN_DSCR, r29
- mtspr SPRN_PPR, r30
-
- /* Load final GPRs */
- ld 29, VCPU_GPRS_TM(29)(r31)
- ld 30, VCPU_GPRS_TM(30)(r31)
- ld 31, VCPU_GPRS_TM(31)(r31)
-
- /* TM checkpointed state is now setup. All GPRs are now volatile. */
- TRECHKPT
-
- /* Now let's get back the state we need. */
- HMT_MEDIUM
- GET_PACA(r13)
- ld r29, HSTATE_DSCR(r13)
- mtspr SPRN_DSCR, r29
- ld r4, HSTATE_KVM_VCPU(r13)
- ld r1, HSTATE_HOST_R1(r13)
- ld r2, PACATMSCRATCH(r13)
-
- /* Set the MSR RI since we have our registers back. */
- li r5, MSR_RI
- mtmsrd r5, 1
-skip_tm:
+ bl kvmppc_restore_tm
+END_FTR_SECTION_IFSET(CPU_FTR_TM)
#endif
/* Load guest PMU registers */
@@ -841,12 +737,6 @@ BEGIN_FTR_SECTION
/* Skip next section on POWER7 */
b 8f
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
- /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
- mfmsr r8
- li r0, 1
- rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
- mtmsrd r8
-
/* Load up POWER8-specific registers */
ld r5, VCPU_IAMR(r4)
lwz r6, VCPU_PSPB(r4)
@@ -1436,106 +1326,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
BEGIN_FTR_SECTION
- b 2f
-END_FTR_SECTION_IFCLR(CPU_FTR_TM)
- /* Turn on TM. */
- mfmsr r8
- li r0, 1
- rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
- mtmsrd r8
-
- ld r5, VCPU_MSR(r9)
- rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
- beq 1f /* TM not active in guest. */
-
- li r3, TM_CAUSE_KVM_RESCHED
-
- /* Clear the MSR RI since r1, r13 are all going to be foobar. */
- li r5, 0
- mtmsrd r5, 1
-
- /* All GPRs are volatile at this point. */
- TRECLAIM(R3)
-
- /* Temporarily store r13 and r9 so we have some regs to play with */
- SET_SCRATCH0(r13)
- GET_PACA(r13)
- std r9, PACATMSCRATCH(r13)
- ld r9, HSTATE_KVM_VCPU(r13)
-
- /* Get a few more GPRs free. */
- std r29, VCPU_GPRS_TM(29)(r9)
- std r30, VCPU_GPRS_TM(30)(r9)
- std r31, VCPU_GPRS_TM(31)(r9)
-
- /* Save away PPR and DSCR soon so don't run with user values. */
- mfspr r31, SPRN_PPR
- HMT_MEDIUM
- mfspr r30, SPRN_DSCR
- ld r29, HSTATE_DSCR(r13)
- mtspr SPRN_DSCR, r29
-
- /* Save all but r9, r13 & r29-r31 */
- reg = 0
- .rept 29
- .if (reg != 9) && (reg != 13)
- std reg, VCPU_GPRS_TM(reg)(r9)
- .endif
- reg = reg + 1
- .endr
- /* ... now save r13 */
- GET_SCRATCH0(r4)
- std r4, VCPU_GPRS_TM(13)(r9)
- /* ... and save r9 */
- ld r4, PACATMSCRATCH(r13)
- std r4, VCPU_GPRS_TM(9)(r9)
-
- /* Reload stack pointer and TOC. */
- ld r1, HSTATE_HOST_R1(r13)
- ld r2, PACATOC(r13)
-
- /* Set MSR RI now we have r1 and r13 back. */
- li r5, MSR_RI
- mtmsrd r5, 1
-
- /* Save away checkpinted SPRs. */
- std r31, VCPU_PPR_TM(r9)
- std r30, VCPU_DSCR_TM(r9)
- mflr r5
- mfcr r6
- mfctr r7
- mfspr r8, SPRN_AMR
- mfspr r10, SPRN_TAR
- std r5, VCPU_LR_TM(r9)
- stw r6, VCPU_CR_TM(r9)
- std r7, VCPU_CTR_TM(r9)
- std r8, VCPU_AMR_TM(r9)
- std r10, VCPU_TAR_TM(r9)
-
- /* Restore r12 as trap number. */
- lwz r12, VCPU_TRAP(r9)
-
- /* Save FP/VSX. */
- addi r3, r9, VCPU_FPRS_TM
- bl store_fp_state
- addi r3, r9, VCPU_VRS_TM
- bl store_vr_state
- mfspr r6, SPRN_VRSAVE
- stw r6, VCPU_VRSAVE_TM(r9)
-1:
- /*
- * We need to save these SPRs after the treclaim so that the software
- * error code is recorded correctly in the TEXASR. Also the user may
- * change these outside of a transaction, so they must always be
- * context switched.
- */
- mfspr r5, SPRN_TFHAR
- mfspr r6, SPRN_TFIAR
- mfspr r7, SPRN_TEXASR
- std r5, VCPU_TFHAR(r9)
- std r6, VCPU_TFIAR(r9)
- std r7, VCPU_TEXASR(r9)
-2:
+ bl kvmppc_save_tm
+END_FTR_SECTION_IFSET(CPU_FTR_TM)
#endif
/* Increment yield count if they have a VPA */
@@ -2245,6 +2037,13 @@ _GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
/* save FP state */
bl kvmppc_save_fp
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+BEGIN_FTR_SECTION
+ ld r9, HSTATE_KVM_VCPU(r13)
+ bl kvmppc_save_tm
+END_FTR_SECTION_IFSET(CPU_FTR_TM)
+#endif
+
/*
* Set DEC to the smaller of DEC and HDEC, so that we wake
* no later than the end of our timeslice (HDEC interrupts
@@ -2321,6 +2120,12 @@ kvm_end_cede:
bl kvmhv_accumulate_time
#endif
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+BEGIN_FTR_SECTION
+ bl kvmppc_restore_tm
+END_FTR_SECTION_IFSET(CPU_FTR_TM)
+#endif
+
/* load up FP state */
bl kvmppc_load_fp
@@ -2631,6 +2436,239 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
mr r4,r31
blr
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+/*
+ * Save transactional state and TM-related registers.
+ * Called with r9 pointing to the vcpu struct.
+ * This can modify all checkpointed registers, but
+ * restores r1, r2 and r9 (vcpu pointer) before exit.
+ */
+kvmppc_save_tm:
+ mflr r0
+ std r0, PPC_LR_STKOFF(r1)
+
+ /* Turn on TM. */
+ mfmsr r8
+ li r0, 1
+ rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
+ mtmsrd r8
+
+ ld r5, VCPU_MSR(r9)
+ rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
+ beq 1f /* TM not active in guest. */
+
+ std r1, HSTATE_HOST_R1(r13)
+ li r3, TM_CAUSE_KVM_RESCHED
+
+ /* Clear the MSR RI since r1, r13 are all going to be foobar. */
+ li r5, 0
+ mtmsrd r5, 1
+
+ /* All GPRs are volatile at this point. */
+ TRECLAIM(R3)
+
+ /* Temporarily store r13 and r9 so we have some regs to play with */
+ SET_SCRATCH0(r13)
+ GET_PACA(r13)
+ std r9, PACATMSCRATCH(r13)
+ ld r9, HSTATE_KVM_VCPU(r13)
+
+ /* Get a few more GPRs free. */
+ std r29, VCPU_GPRS_TM(29)(r9)
+ std r30, VCPU_GPRS_TM(30)(r9)
+ std r31, VCPU_GPRS_TM(31)(r9)
+
+ /* Save away PPR and DSCR soon so don't run with user values. */
+ mfspr r31, SPRN_PPR
+ HMT_MEDIUM
+ mfspr r30, SPRN_DSCR
+ ld r29, HSTATE_DSCR(r13)
+ mtspr SPRN_DSCR, r29
+
+ /* Save all but r9, r13 & r29-r31 */
+ reg = 0
+ .rept 29
+ .if (reg != 9) && (reg != 13)
+ std reg, VCPU_GPRS_TM(reg)(r9)
+ .endif
+ reg = reg + 1
+ .endr
+ /* ... now save r13 */
+ GET_SCRATCH0(r4)
+ std r4, VCPU_GPRS_TM(13)(r9)
+ /* ... and save r9 */
+ ld r4, PACATMSCRATCH(r13)
+ std r4, VCPU_GPRS_TM(9)(r9)
+
+ /* Reload stack pointer and TOC. */
+ ld r1, HSTATE_HOST_R1(r13)
+ ld r2, PACATOC(r13)
+
+ /* Set MSR RI now we have r1 and r13 back. */
+ li r5, MSR_RI
+ mtmsrd r5, 1
+
+ /* Save away checkpinted SPRs. */
+ std r31, VCPU_PPR_TM(r9)
+ std r30, VCPU_DSCR_TM(r9)
+ mflr r5
+ mfcr r6
+ mfctr r7
+ mfspr r8, SPRN_AMR
+ mfspr r10, SPRN_TAR
+ std r5, VCPU_LR_TM(r9)
+ stw r6, VCPU_CR_TM(r9)
+ std r7, VCPU_CTR_TM(r9)
+ std r8, VCPU_AMR_TM(r9)
+ std r10, VCPU_TAR_TM(r9)
+
+ /* Restore r12 as trap number. */
+ lwz r12, VCPU_TRAP(r9)
+
+ /* Save FP/VSX. */
+ addi r3, r9, VCPU_FPRS_TM
+ bl store_fp_state
+ addi r3, r9, VCPU_VRS_TM
+ bl store_vr_state
+ mfspr r6, SPRN_VRSAVE
+ stw r6, VCPU_VRSAVE_TM(r9)
+1:
+ /*
+ * We need to save these SPRs after the treclaim so that the software
+ * error code is recorded correctly in the TEXASR. Also the user may
+ * change these outside of a transaction, so they must always be
+ * context switched.
+ */
+ mfspr r5, SPRN_TFHAR
+ mfspr r6, SPRN_TFIAR
+ mfspr r7, SPRN_TEXASR
+ std r5, VCPU_TFHAR(r9)
+ std r6, VCPU_TFIAR(r9)
+ std r7, VCPU_TEXASR(r9)
+
+ ld r0, PPC_LR_STKOFF(r1)
+ mtlr r0
+ blr
+
+/*
+ * Restore transactional state and TM-related registers.
+ * Called with r4 pointing to the vcpu struct.
+ * This potentially modifies all checkpointed registers.
+ * It restores r1, r2, r4 from the PACA.
+ */
+kvmppc_restore_tm:
+ mflr r0
+ std r0, PPC_LR_STKOFF(r1)
+
+ /* Turn on TM/FP/VSX/VMX so we can restore them. */
+ mfmsr r5
+ li r6, MSR_TM >> 32
+ sldi r6, r6, 32
+ or r5, r5, r6
+ ori r5, r5, MSR_FP
+ oris r5, r5, (MSR_VEC | MSR_VSX)@h
+ mtmsrd r5
+
+ /*
+ * The user may change these outside of a transaction, so they must
+ * always be context switched.
+ */
+ ld r5, VCPU_TFHAR(r4)
+ ld r6, VCPU_TFIAR(r4)
+ ld r7, VCPU_TEXASR(r4)
+ mtspr SPRN_TFHAR, r5
+ mtspr SPRN_TFIAR, r6
+ mtspr SPRN_TEXASR, r7
+
+ ld r5, VCPU_MSR(r4)
+ rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
+ beqlr /* TM not active in guest */
+ std r1, HSTATE_HOST_R1(r13)
+
+ /* Make sure the failure summary is set, otherwise we'll program check
+ * when we trechkpt. It's possible that this might have been not set
+ * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
+ * host.
+ */
+ oris r7, r7, (TEXASR_FS)@h
+ mtspr SPRN_TEXASR, r7
+
+ /*
+ * We need to load up the checkpointed state for the guest.
+ * We need to do this early as it will blow away any GPRs, VSRs and
+ * some SPRs.
+ */
+
+ mr r31, r4
+ addi r3, r31, VCPU_FPRS_TM
+ bl load_fp_state
+ addi r3, r31, VCPU_VRS_TM
+ bl load_vr_state
+ mr r4, r31
+ lwz r7, VCPU_VRSAVE_TM(r4)
+ mtspr SPRN_VRSAVE, r7
+
+ ld r5, VCPU_LR_TM(r4)
+ lwz r6, VCPU_CR_TM(r4)
+ ld r7, VCPU_CTR_TM(r4)
+ ld r8, VCPU_AMR_TM(r4)
+ ld r9, VCPU_TAR_TM(r4)
+ mtlr r5
+ mtcr r6
+ mtctr r7
+ mtspr SPRN_AMR, r8
+ mtspr SPRN_TAR, r9
+
+ /*
+ * Load up PPR and DSCR values but don't put them in the actual SPRs
+ * till the last moment to avoid running with userspace PPR and DSCR for
+ * too long.
+ */
+ ld r29, VCPU_DSCR_TM(r4)
+ ld r30, VCPU_PPR_TM(r4)
+
+ std r2, PACATMSCRATCH(r13) /* Save TOC */
+
+ /* Clear the MSR RI since r1, r13 are all going to be foobar. */
+ li r5, 0
+ mtmsrd r5, 1
+
+ /* Load GPRs r0-r28 */
+ reg = 0
+ .rept 29
+ ld reg, VCPU_GPRS_TM(reg)(r31)
+ reg = reg + 1
+ .endr
+
+ mtspr SPRN_DSCR, r29
+ mtspr SPRN_PPR, r30
+
+ /* Load final GPRs */
+ ld 29, VCPU_GPRS_TM(29)(r31)
+ ld 30, VCPU_GPRS_TM(30)(r31)
+ ld 31, VCPU_GPRS_TM(31)(r31)
+
+ /* TM checkpointed state is now setup. All GPRs are now volatile. */
+ TRECHKPT
+
+ /* Now let's get back the state we need. */
+ HMT_MEDIUM
+ GET_PACA(r13)
+ ld r29, HSTATE_DSCR(r13)
+ mtspr SPRN_DSCR, r29
+ ld r4, HSTATE_KVM_VCPU(r13)
+ ld r1, HSTATE_HOST_R1(r13)
+ ld r2, PACATMSCRATCH(r13)
+
+ /* Set the MSR RI since we have our registers back. */
+ li r5, MSR_RI
+ mtmsrd r5, 1
+
+ ld r0, PPC_LR_STKOFF(r1)
+ mtlr r0
+ blr
+#endif
+
/*
* We come here if we get any exception or interrupt while we are
* executing host real mode code while in guest MMU context.
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 18d2beb89..42b968a85 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -893,7 +893,7 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long address);
int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
unsigned char key, bool nq);
-unsigned char get_guest_storage_key(struct mm_struct *mm, unsigned long addr);
+unsigned long get_guest_storage_key(struct mm_struct *mm, unsigned long addr);
/*
* Certain architectures need to do special things when PTEs
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index a2e6ef32e..0a2031618 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -81,7 +81,8 @@ static inline void __tlb_flush_full(struct mm_struct *mm)
}
/*
- * Flush TLB entries for a specific ASCE on all CPUs.
+ * Flush TLB entries for a specific ASCE on all CPUs. Should never be used
+ * when more than one asce (e.g. gmap) ran on this mm.
*/
static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
{
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index cace818d8..313c3b8cf 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -85,7 +85,7 @@ EXPORT_SYMBOL_GPL(gmap_alloc);
static void gmap_flush_tlb(struct gmap *gmap)
{
if (MACHINE_HAS_IDTE)
- __tlb_flush_asce(gmap->mm, gmap->asce);
+ __tlb_flush_idte(gmap->asce);
else
__tlb_flush_global();
}
@@ -124,7 +124,7 @@ void gmap_free(struct gmap *gmap)
/* Flush tlb. */
if (MACHINE_HAS_IDTE)
- __tlb_flush_asce(gmap->mm, gmap->asce);
+ __tlb_flush_idte(gmap->asce);
else
__tlb_flush_global();
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 9f0ce0e6e..ebb4f8711 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -543,7 +543,7 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
}
EXPORT_SYMBOL(set_guest_storage_key);
-unsigned char get_guest_storage_key(struct mm_struct *mm, unsigned long addr)
+unsigned long get_guest_storage_key(struct mm_struct *mm, unsigned long addr)
{
unsigned char key;
spinlock_t *ptl;
diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c
index 8acaf4e38..a86d7cc2c 100644
--- a/arch/um/os-Linux/signal.c
+++ b/arch/um/os-Linux/signal.c
@@ -15,6 +15,7 @@
#include <kern_util.h>
#include <os.h>
#include <sysdep/mcontext.h>
+#include <um_malloc.h>
void (*sig_info[NSIG])(int, struct siginfo *, struct uml_pt_regs *) = {
[SIGTRAP] = relay_signal,
@@ -32,7 +33,7 @@ static void sig_handler_common(int sig, struct siginfo *si, mcontext_t *mc)
struct uml_pt_regs *r;
int save_errno = errno;
- r = malloc(sizeof(struct uml_pt_regs));
+ r = uml_kmalloc(sizeof(struct uml_pt_regs), UM_GFP_ATOMIC);
if (!r)
panic("out of memory");
@@ -91,7 +92,7 @@ static void timer_real_alarm_handler(mcontext_t *mc)
{
struct uml_pt_regs *regs;
- regs = malloc(sizeof(struct uml_pt_regs));
+ regs = uml_kmalloc(sizeof(struct uml_pt_regs), UM_GFP_ATOMIC);
if (!regs)
panic("out of memory");
diff --git a/arch/unicore32/include/asm/mmu_context.h b/arch/unicore32/include/asm/mmu_context.h
index e35632ef2..62dfc644c 100644
--- a/arch/unicore32/include/asm/mmu_context.h
+++ b/arch/unicore32/include/asm/mmu_context.h
@@ -98,7 +98,7 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm,
}
static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
- bool write, bool foreign)
+ bool write, bool execute, bool foreign)
{
/* by default, allow everything */
return true;
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index 874e8bd64..bd136ac14 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -2546,7 +2546,7 @@ void hswep_uncore_cpu_init(void)
static struct intel_uncore_type hswep_uncore_ha = {
.name = "ha",
- .num_counters = 5,
+ .num_counters = 4,
.num_boxes = 2,
.perf_ctr_bits = 48,
SNBEP_UNCORE_PCI_COMMON_INIT(),
@@ -2565,7 +2565,7 @@ static struct uncore_event_desc hswep_uncore_imc_events[] = {
static struct intel_uncore_type hswep_uncore_imc = {
.name = "imc",
- .num_counters = 5,
+ .num_counters = 4,
.num_boxes = 8,
.perf_ctr_bits = 48,
.fixed_ctr_bits = 48,
@@ -2611,7 +2611,7 @@ static struct intel_uncore_type hswep_uncore_irp = {
static struct intel_uncore_type hswep_uncore_qpi = {
.name = "qpi",
- .num_counters = 5,
+ .num_counters = 4,
.num_boxes = 3,
.perf_ctr_bits = 48,
.perf_ctr = SNBEP_PCI_PMON_CTR0,
@@ -2693,7 +2693,7 @@ static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
static struct intel_uncore_type hswep_uncore_r3qpi = {
.name = "r3qpi",
- .num_counters = 4,
+ .num_counters = 3,
.num_boxes = 3,
.perf_ctr_bits = 44,
.constraints = hswep_uncore_r3qpi_constraints,
@@ -2892,7 +2892,7 @@ static struct intel_uncore_type bdx_uncore_ha = {
static struct intel_uncore_type bdx_uncore_imc = {
.name = "imc",
- .num_counters = 5,
+ .num_counters = 4,
.num_boxes = 8,
.perf_ctr_bits = 48,
.fixed_ctr_bits = 48,
diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
index c146f3c26..0149ac59c 100644
--- a/arch/x86/kvm/mtrr.c
+++ b/arch/x86/kvm/mtrr.c
@@ -539,6 +539,7 @@ static void mtrr_lookup_var_start(struct mtrr_iter *iter)
iter->fixed = false;
iter->start_max = iter->start;
+ iter->range = NULL;
iter->range = list_prepare_entry(iter->range, &mtrr_state->head, node);
__mtrr_lookup_var_next(iter);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 64a79f271..8326d6891 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -8224,6 +8224,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
(exit_reason != EXIT_REASON_EXCEPTION_NMI &&
exit_reason != EXIT_REASON_EPT_VIOLATION &&
+ exit_reason != EXIT_REASON_PML_FULL &&
exit_reason != EXIT_REASON_TASK_SWITCH)) {
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
@@ -8854,6 +8855,22 @@ static void vmx_load_vmcs01(struct kvm_vcpu *vcpu)
put_cpu();
}
+/*
+ * Ensure that the current vmcs of the logical processor is the
+ * vmcs01 of the vcpu before calling free_nested().
+ */
+static void vmx_free_vcpu_nested(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ int r;
+
+ r = vcpu_load(vcpu);
+ BUG_ON(r);
+ vmx_load_vmcs01(vcpu);
+ free_nested(vmx);
+ vcpu_put(vcpu);
+}
+
static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -8862,8 +8879,7 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
vmx_destroy_pml_buffer(vmx);
free_vpid(vmx->vpid);
leave_guest_mode(vcpu);
- vmx_load_vmcs01(vcpu);
- free_nested(vmx);
+ vmx_free_vcpu_nested(vcpu);
free_loaded_vmcs(vmx->loaded_vmcs);
kfree(vmx->guest_msrs);
kvm_vcpu_uninit(vcpu);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 7da5dd205..fea2c5717 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -91,6 +91,7 @@ static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
static void update_cr8_intercept(struct kvm_vcpu *vcpu);
static void process_nmi(struct kvm_vcpu *vcpu);
+static void process_smi(struct kvm_vcpu *vcpu);
static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
struct kvm_x86_ops *kvm_x86_ops __read_mostly;
@@ -5296,13 +5297,8 @@ static void kvm_smm_changed(struct kvm_vcpu *vcpu)
/* This is a good place to trace that we are exiting SMM. */
trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, false);
- if (unlikely(vcpu->arch.smi_pending)) {
- kvm_make_request(KVM_REQ_SMI, vcpu);
- vcpu->arch.smi_pending = 0;
- } else {
- /* Process a latched INIT, if any. */
- kvm_make_request(KVM_REQ_EVENT, vcpu);
- }
+ /* Process a latched INIT or SMI, if any. */
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
}
kvm_mmu_reset_context(vcpu);
@@ -6102,7 +6098,10 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
}
/* try to inject new event if pending */
- if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) {
+ if (vcpu->arch.smi_pending && !is_smm(vcpu)) {
+ vcpu->arch.smi_pending = false;
+ process_smi(vcpu);
+ } else if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) {
--vcpu->arch.nmi_pending;
vcpu->arch.nmi_injected = true;
kvm_x86_ops->set_nmi(vcpu);
@@ -6312,11 +6311,6 @@ static void process_smi(struct kvm_vcpu *vcpu)
char buf[512];
u32 cr0;
- if (is_smm(vcpu)) {
- vcpu->arch.smi_pending = true;
- return;
- }
-
trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true);
vcpu->arch.hflags |= HF_SMM_MASK;
memset(buf, 0, 512);
@@ -6379,6 +6373,12 @@ static void process_smi(struct kvm_vcpu *vcpu)
kvm_mmu_reset_context(vcpu);
}
+static void process_smi_request(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.smi_pending = true;
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
+}
+
void kvm_make_scan_ioapic_request(struct kvm *kvm)
{
kvm_make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC);
@@ -6500,7 +6500,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
record_steal_time(vcpu);
if (kvm_check_request(KVM_REQ_SMI, vcpu))
- process_smi(vcpu);
+ process_smi_request(vcpu);
if (kvm_check_request(KVM_REQ_NMI, vcpu))
process_nmi(vcpu);
if (kvm_check_request(KVM_REQ_PMU, vcpu))
@@ -6573,8 +6573,18 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (inject_pending_event(vcpu, req_int_win) != 0)
req_immediate_exit = true;
- /* enable NMI/IRQ window open exits if needed */
else {
+ /* Enable NMI/IRQ window open exits if needed.
+ *
+ * SMIs have two cases: 1) they can be nested, and
+ * then there is nothing to do here because RSM will
+ * cause a vmexit anyway; 2) or the SMI can be pending
+ * because inject_pending_event has completed the
+ * injection of an IRQ or NMI from the previous vmexit,
+ * and then we request an immediate exit to inject the SMI.
+ */
+ if (vcpu->arch.smi_pending && !is_smm(vcpu))
+ req_immediate_exit = true;
if (vcpu->arch.nmi_pending)
kvm_x86_ops->enable_nmi_window(vcpu);
if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
@@ -6625,8 +6635,10 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
kvm_load_guest_xcr0(vcpu);
- if (req_immediate_exit)
+ if (req_immediate_exit) {
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
smp_send_reschedule(vcpu->cpu);
+ }
trace_kvm_entry(vcpu->vcpu_id);
wait_lapic_expire(vcpu);
@@ -7427,6 +7439,7 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
{
vcpu->arch.hflags = 0;
+ vcpu->arch.smi_pending = 0;
atomic_set(&vcpu->arch.nmi_queued, 0);
vcpu->arch.nmi_pending = 0;
vcpu->arch.nmi_injected = false;
diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
index 8b93e634a..ae97f24a4 100644
--- a/arch/x86/pci/intel_mid_pci.c
+++ b/arch/x86/pci/intel_mid_pci.c
@@ -37,6 +37,7 @@
/* Quirks for the listed devices */
#define PCI_DEVICE_ID_INTEL_MRFL_MMC 0x1190
+#define PCI_DEVICE_ID_INTEL_MRFL_HSU 0x1191
/* Fixed BAR fields */
#define PCIE_VNDR_CAP_ID_FIXED_BAR 0x00 /* Fixed BAR (TBD) */
@@ -225,13 +226,20 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
/* Special treatment for IRQ0 */
if (dev->irq == 0) {
/*
+ * Skip HS UART common registers device since it has
+ * IRQ0 assigned and not used by the kernel.
+ */
+ if (dev->device == PCI_DEVICE_ID_INTEL_MRFL_HSU)
+ return -EBUSY;
+ /*
* TNG has IRQ0 assigned to eMMC controller. But there
* are also other devices with bogus PCI configuration
* that have IRQ0 assigned. This check ensures that
- * eMMC gets it.
+ * eMMC gets it. The rest of devices still could be
+ * enabled without interrupt line being allocated.
*/
if (dev->device != PCI_DEVICE_ID_INTEL_MRFL_MMC)
- return -EBUSY;
+ return 0;
}
break;
default: