summaryrefslogtreecommitdiff
path: root/arch/mips
diff options
context:
space:
mode:
authorAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-10-20 00:10:27 -0300
committerAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-10-20 00:10:27 -0300
commitd0b2f91bede3bd5e3d24dd6803e56eee959c1797 (patch)
tree7fee4ab0509879c373c4f2cbd5b8a5be5b4041ee /arch/mips
parente914f8eb445e8f74b00303c19c2ffceaedd16a05 (diff)
Linux-libre 4.8.2-gnupck-4.8.2-gnu
Diffstat (limited to 'arch/mips')
-rw-r--r--arch/mips/Kconfig50
-rw-r--r--arch/mips/ath79/clock.c2
-rw-r--r--arch/mips/ath79/setup.c6
-rw-r--r--arch/mips/bmips/setup.c4
-rw-r--r--arch/mips/boot/compressed/decompress.c17
-rw-r--r--arch/mips/boot/compressed/head.S16
-rw-r--r--arch/mips/boot/dts/cavium-octeon/dlink_dsr-1000n.dts20
-rw-r--r--arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dts12
-rw-r--r--arch/mips/boot/tools/relocs_64.c19
-rw-r--r--arch/mips/cavium-octeon/dma-octeon.c8
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-bootmem.c2
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-board.c22
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c20
-rw-r--r--arch/mips/cavium-octeon/octeon-platform.c127
-rw-r--r--arch/mips/cavium-octeon/setup.c20
-rw-r--r--arch/mips/cavium-octeon/smp.c1
-rw-r--r--arch/mips/cobalt/setup.c4
-rw-r--r--arch/mips/configs/ath25_defconfig119
-rw-r--r--arch/mips/configs/cavium_octeon_defconfig2
-rw-r--r--arch/mips/configs/malta_qemu_32r6_defconfig2
-rw-r--r--arch/mips/configs/maltaaprp_defconfig2
-rw-r--r--arch/mips/configs/maltasmvp_eva_defconfig2
-rw-r--r--arch/mips/configs/maltaup_defconfig2
-rw-r--r--arch/mips/configs/rbtx49xx_defconfig2
-rw-r--r--arch/mips/dec/int-handler.S40
-rw-r--r--arch/mips/include/asm/addrspace.h2
-rw-r--r--arch/mips/include/asm/atomic.h154
-rw-r--r--arch/mips/include/asm/bootinfo.h4
-rw-r--r--arch/mips/include/asm/dsemul.h92
-rw-r--r--arch/mips/include/asm/elf.h4
-rw-r--r--arch/mips/include/asm/fpu_emulator.h17
-rw-r--r--arch/mips/include/asm/kvm_host.h315
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h2
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/irq.h2
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/mangle-port.h44
-rw-r--r--arch/mips/include/asm/mips-cm.h13
-rw-r--r--arch/mips/include/asm/mipsregs.h23
-rw-r--r--arch/mips/include/asm/mmu.h9
-rw-r--r--arch/mips/include/asm/mmu_context.h6
-rw-r--r--arch/mips/include/asm/msa.h2
-rw-r--r--arch/mips/include/asm/page.h44
-rw-r--r--arch/mips/include/asm/pci.h10
-rw-r--r--arch/mips/include/asm/pgtable.h16
-rw-r--r--arch/mips/include/asm/processor.h18
-rw-r--r--arch/mips/include/asm/r4kcache.h4
-rw-r--r--arch/mips/include/asm/seccomp.h4
-rw-r--r--arch/mips/include/asm/setup.h1
-rw-r--r--arch/mips/include/asm/signal.h6
-rw-r--r--arch/mips/include/asm/smp.h4
-rw-r--r--arch/mips/include/asm/spinlock.h19
-rw-r--r--arch/mips/include/asm/syscall.h2
-rw-r--r--arch/mips/include/asm/uaccess.h2
-rw-r--r--arch/mips/include/asm/uasm.h7
-rw-r--r--arch/mips/include/asm/uprobes.h1
-rw-r--r--arch/mips/include/uapi/asm/auxvec.h2
-rw-r--r--arch/mips/include/uapi/asm/inst.h114
-rw-r--r--arch/mips/jz4740/setup.c10
-rw-r--r--arch/mips/kernel/Makefile2
-rw-r--r--arch/mips/kernel/asm-offsets.c70
-rw-r--r--arch/mips/kernel/branch.c8
-rw-r--r--arch/mips/kernel/cpu-bugs64.c6
-rw-r--r--arch/mips/kernel/cpu-probe.c53
-rw-r--r--arch/mips/kernel/elf.c23
-rw-r--r--arch/mips/kernel/genex.S3
-rw-r--r--arch/mips/kernel/head.S21
-rw-r--r--arch/mips/kernel/mips-cm.c2
-rw-r--r--arch/mips/kernel/mips-r2-to-r6-emul.c42
-rw-r--r--arch/mips/kernel/pm-cps.c4
-rw-r--r--arch/mips/kernel/process.c14
-rw-r--r--arch/mips/kernel/ptrace.c9
-rw-r--r--arch/mips/kernel/segment.c13
-rw-r--r--arch/mips/kernel/setup.c13
-rw-r--r--arch/mips/kernel/signal.c18
-rw-r--r--arch/mips/kernel/signal32.c288
-rw-r--r--arch/mips/kernel/signal_o32.c285
-rw-r--r--arch/mips/kernel/smp-bmips.c1
-rw-r--r--arch/mips/kernel/smp-cps.c46
-rw-r--r--arch/mips/kernel/smp.c34
-rw-r--r--arch/mips/kernel/traps.c27
-rw-r--r--arch/mips/kernel/unaligned.c10
-rw-r--r--arch/mips/kernel/uprobes.c27
-rw-r--r--arch/mips/kernel/vdso.c10
-rw-r--r--arch/mips/kvm/Kconfig1
-rw-r--r--arch/mips/kvm/Makefile3
-rw-r--r--arch/mips/kvm/commpage.c2
-rw-r--r--arch/mips/kvm/dyntrans.c182
-rw-r--r--arch/mips/kvm/emulate.c547
-rw-r--r--arch/mips/kvm/entry.c701
-rw-r--r--arch/mips/kvm/fpu.S7
-rw-r--r--arch/mips/kvm/interrupt.c12
-rw-r--r--arch/mips/kvm/interrupt.h14
-rw-r--r--arch/mips/kvm/mips.c367
-rw-r--r--arch/mips/kvm/mmu.c395
-rw-r--r--arch/mips/kvm/stats.c21
-rw-r--r--arch/mips/kvm/tlb.c518
-rw-r--r--arch/mips/kvm/trace.h236
-rw-r--r--arch/mips/kvm/trap_emul.c178
-rw-r--r--arch/mips/lantiq/irq.c31
-rw-r--r--arch/mips/lantiq/prom.c4
-rw-r--r--arch/mips/loongson64/common/dma-swiotlb.c10
-rw-r--r--arch/mips/loongson64/loongson-3/smp.c1
-rw-r--r--arch/mips/math-emu/cp1emu.c22
-rw-r--r--arch/mips/math-emu/dsemul.c332
-rw-r--r--arch/mips/mm/c-r4k.c286
-rw-r--r--arch/mips/mm/dma-default.c20
-rw-r--r--arch/mips/mm/fault.c2
-rw-r--r--arch/mips/mm/init.c18
-rw-r--r--arch/mips/mm/sc-debugfs.c4
-rw-r--r--arch/mips/mm/sc-rm7k.c2
-rw-r--r--arch/mips/mm/tlbex.c12
-rw-r--r--arch/mips/mm/uasm-micromips.c13
-rw-r--r--arch/mips/mm/uasm-mips.c11
-rw-r--r--arch/mips/mm/uasm.c30
-rw-r--r--arch/mips/mti-malta/malta-dtshim.c4
-rw-r--r--arch/mips/mti-malta/malta-memory.c2
-rw-r--r--arch/mips/mti-malta/malta-setup.c10
-rw-r--r--arch/mips/mti-sead3/sead3-setup.c8
-rw-r--r--arch/mips/net/bpf_jit.c6
-rw-r--r--arch/mips/netlogic/common/nlm-dma.c4
-rw-r--r--arch/mips/oprofile/op_model_loongson3.c35
-rw-r--r--arch/mips/pci/pci.c19
-rw-r--r--arch/mips/pic32/pic32mzda/init.c7
-rw-r--r--arch/mips/pistachio/init.c38
-rw-r--r--arch/mips/ralink/cevt-rt3352.c17
-rw-r--r--arch/mips/ralink/mt7620.c2
-rw-r--r--arch/mips/sgi-ip22/ip22-reset.c2
-rw-r--r--arch/mips/sni/time.c1
-rw-r--r--arch/mips/txx9/generic/pci.c2
-rw-r--r--arch/mips/txx9/generic/setup.c2
-rw-r--r--arch/mips/txx9/rbtx4939/setup.c2
-rw-r--r--arch/mips/xilfpga/init.c13
131 files changed, 4262 insertions, 2397 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index ac91939b9..212ff9292 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -64,6 +64,8 @@ config MIPS
select GENERIC_TIME_VSYSCALL
select ARCH_CLOCKSOURCE_DATA
select HANDLE_DOMAIN_IRQ
+ select HAVE_EXIT_THREAD
+ select HAVE_REGS_AND_STACK_ACCESS_API
menu "Machine selection"
@@ -384,7 +386,7 @@ config MACH_PISTACHIO
select CLKSRC_MIPS_GIC
select COMMON_CLK
select CSRC_R4K
- select DMA_MAYBE_COHERENT
+ select DMA_NONCOHERENT
select GPIOLIB
select IRQ_MIPS_CPU
select LIBFDT
@@ -880,7 +882,6 @@ config CAVIUM_OCTEON_SOC
select SYS_SUPPORTS_HOTPLUG_CPU if CPU_BIG_ENDIAN
select SYS_HAS_EARLY_PRINTK
select SYS_HAS_CPU_CAVIUM_OCTEON
- select SWAP_IO_SPACE
select HW_HAS_PCI
select ZONE_DMA32
select HOLES_IN_ZONE
@@ -1111,16 +1112,6 @@ config NEED_DMA_MAP_STATE
config SYS_HAS_EARLY_PRINTK
bool
-config HOTPLUG_CPU
- bool "Support for hot-pluggable CPUs"
- depends on SMP && SYS_SUPPORTS_HOTPLUG_CPU
- help
- Say Y here to allow turning CPUs off and on. CPUs can be
- controlled through /sys/devices/system/cpu.
- (Note: power management support will enable this option
- automatically on SMP systems. )
- Say N if you want to disable CPU hotplug.
-
config SYS_SUPPORTS_HOTPLUG_CPU
bool
@@ -1406,7 +1397,6 @@ config CPU_LOONGSON1B
bool "Loongson 1B"
depends on SYS_HAS_CPU_LOONGSON1B
select CPU_LOONGSON1
- select ARCH_WANT_OPTIONAL_GPIOLIB
select LEDS_GPIO_REGISTER
help
The Loongson 1B is a 32-bit SoC, which implements the MIPS32
@@ -1488,6 +1478,7 @@ config CPU_MIPS64_R2
select CPU_SUPPORTS_HIGHMEM
select CPU_SUPPORTS_HUGEPAGES
select CPU_SUPPORTS_MSA
+ select HAVE_KVM
help
Choose this option to build a kernel for release 2 or later of the
MIPS64 architecture. Many modern embedded systems with a 64-bit
@@ -1505,6 +1496,7 @@ config CPU_MIPS64_R6
select CPU_SUPPORTS_MSA
select GENERIC_CSUM
select MIPS_O32_FP64_SUPPORT if MIPS32_O32
+ select HAVE_KVM
help
Choose this option to build a kernel for release 6 or later of the
MIPS64 architecture. New MIPS processors, starting with the Warrior
@@ -2634,6 +2626,16 @@ config SMP
If you don't know what to do here, say N.
+config HOTPLUG_CPU
+ bool "Support for hot-pluggable CPUs"
+ depends on SMP && SYS_SUPPORTS_HOTPLUG_CPU
+ help
+ Say Y here to allow turning CPUs off and on. CPUs can be
+ controlled through /sys/devices/system/cpu.
+ (Note: power management support will enable this option
+ automatically on SMP systems. )
+ Say N if you want to disable CPU hotplug.
+
config SMP_UP
bool
@@ -2885,10 +2887,10 @@ choice
the documented boot protocol using a device tree.
config MIPS_RAW_APPENDED_DTB
- bool "vmlinux.bin"
+ bool "vmlinux.bin or vmlinuz.bin"
help
With this option, the boot code will look for a device tree binary
- DTB) appended to raw vmlinux.bin (without decompressor).
+ DTB) appended to raw vmlinux.bin or vmlinuz.bin.
(e.g. cat vmlinux.bin <filename>.dtb > vmlinux_w_dtb).
This is meant as a backward compatibility convenience for those
@@ -2900,24 +2902,6 @@ choice
look like a DTB header after a reboot if no actual DTB is appended
to vmlinux.bin. Do not leave this option active in a production kernel
if you don't intend to always append a DTB.
-
- config MIPS_ZBOOT_APPENDED_DTB
- bool "vmlinuz.bin"
- depends on SYS_SUPPORTS_ZBOOT
- help
- With this option, the boot code will look for a device tree binary
- DTB) appended to raw vmlinuz.bin (with decompressor).
- (e.g. cat vmlinuz.bin <filename>.dtb > vmlinuz_w_dtb).
-
- This is meant as a backward compatibility convenience for those
- systems with a bootloader that can't be upgraded to accommodate
- the documented boot protocol using a device tree.
-
- Beware that there is very little in terms of protection against
- this option being confused by leftover garbage in memory that might
- look like a DTB header after a reboot if no actual DTB is appended
- to vmlinuz.bin. Do not leave this option active in a production kernel
- if you don't intend to always append a DTB.
endchoice
choice
diff --git a/arch/mips/ath79/clock.c b/arch/mips/ath79/clock.c
index 2e7378467..cc3a1e33a 100644
--- a/arch/mips/ath79/clock.c
+++ b/arch/mips/ath79/clock.c
@@ -96,7 +96,7 @@ static struct clk * __init ath79_reg_ffclk(const char *name,
struct clk *clk;
clk = clk_register_fixed_factor(NULL, name, parent_name, 0, mult, div);
- if (!clk)
+ if (IS_ERR(clk))
panic("failed to allocate %s clock structure", name);
return clk;
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index 7adab180e..f206dafbb 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -18,7 +18,6 @@
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
-#include <linux/of_platform.h>
#include <linux/of_fdt.h>
#include <asm/bootinfo.h>
@@ -204,8 +203,8 @@ void __init plat_mem_setup(void)
fdt_start = fw_getenvl("fdt_start");
if (fdt_start)
__dt_setup_arch((void *)KSEG0ADDR(fdt_start));
- else if (fw_arg0 == -2)
- __dt_setup_arch((void *)KSEG0ADDR(fw_arg1));
+ else if (fw_passed_dtb)
+ __dt_setup_arch((void *)KSEG0ADDR(fw_passed_dtb));
if (mips_machtype != ATH79_MACH_GENERIC_OF) {
ath79_reset_base = ioremap_nocache(AR71XX_RESET_BASE,
@@ -285,7 +284,6 @@ void __init plat_time_init(void)
static int __init ath79_setup(void)
{
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
if (mips_machtype == ATH79_MACH_GENERIC_OF)
return 0;
diff --git a/arch/mips/bmips/setup.c b/arch/mips/bmips/setup.c
index f146d1219..677604267 100644
--- a/arch/mips/bmips/setup.c
+++ b/arch/mips/bmips/setup.c
@@ -162,8 +162,8 @@ void __init plat_mem_setup(void)
/* intended to somewhat resemble ARM; see Documentation/arm/Booting */
if (fw_arg0 == 0 && fw_arg1 == 0xffffffff)
dtb = phys_to_virt(fw_arg2);
- else if (fw_arg0 == -2) /* UHI interface */
- dtb = (void *)fw_arg1;
+ else if (fw_passed_dtb) /* UHI interface */
+ dtb = (void *)fw_passed_dtb;
else if (__dtb_start != __dtb_end)
dtb = (void *)__dtb_start;
else
diff --git a/arch/mips/boot/compressed/decompress.c b/arch/mips/boot/compressed/decompress.c
index 080cd53ba..fdf99e9dd 100644
--- a/arch/mips/boot/compressed/decompress.c
+++ b/arch/mips/boot/compressed/decompress.c
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
+#include <linux/libfdt.h>
#include <asm/addrspace.h>
@@ -36,6 +37,8 @@ extern void puthex(unsigned long long val);
#define puthex(val) do {} while (0)
#endif
+extern char __appended_dtb[];
+
void error(char *x)
{
puts("\n\n");
@@ -114,6 +117,20 @@ void decompress_kernel(unsigned long boot_heap_start)
__decompress((char *)zimage_start, zimage_size, 0, 0,
(void *)VMLINUX_LOAD_ADDRESS_ULL, 0, 0, error);
+ if (IS_ENABLED(CONFIG_MIPS_RAW_APPENDED_DTB) &&
+ fdt_magic((void *)&__appended_dtb) == FDT_MAGIC) {
+ unsigned int image_size, dtb_size;
+
+ dtb_size = fdt_totalsize((void *)&__appended_dtb);
+
+ /* last four bytes is always image size in little endian */
+ image_size = le32_to_cpup((void *)&__image_end - 4);
+
+ /* copy dtb to where the booted kernel will expect it */
+ memcpy((void *)VMLINUX_LOAD_ADDRESS_ULL + image_size,
+ __appended_dtb, dtb_size);
+ }
+
/* FIXME: should we flush cache here? */
puts("Now, booting the kernel...\n");
}
diff --git a/arch/mips/boot/compressed/head.S b/arch/mips/boot/compressed/head.S
index c580e853b..409cb483a 100644
--- a/arch/mips/boot/compressed/head.S
+++ b/arch/mips/boot/compressed/head.S
@@ -25,22 +25,6 @@ start:
move s2, a2
move s3, a3
-#ifdef CONFIG_MIPS_ZBOOT_APPENDED_DTB
- PTR_LA t0, __appended_dtb
-#ifdef CONFIG_CPU_BIG_ENDIAN
- li t1, 0xd00dfeed
-#else
- li t1, 0xedfe0dd0
-#endif
- lw t2, (t0)
- bne t1, t2, not_found
- nop
-
- move s1, t0
- PTR_LI s0, -2
-not_found:
-#endif
-
/* Clear BSS */
PTR_LA a0, _edata
PTR_LA a2, _end
diff --git a/arch/mips/boot/dts/cavium-octeon/dlink_dsr-1000n.dts b/arch/mips/boot/dts/cavium-octeon/dlink_dsr-1000n.dts
index d6bc994f7..b134798a0 100644
--- a/arch/mips/boot/dts/cavium-octeon/dlink_dsr-1000n.dts
+++ b/arch/mips/boot/dts/cavium-octeon/dlink_dsr-1000n.dts
@@ -9,6 +9,7 @@
*/
/include/ "octeon_3xxx.dtsi"
+#include <dt-bindings/gpio/gpio.h>
/ {
model = "dlink,dsr-1000n";
@@ -63,12 +64,27 @@
usb1 {
label = "usb1";
- gpios = <&gpio 9 1>; /* Active low */
+ gpios = <&gpio 9 GPIO_ACTIVE_LOW>;
};
usb2 {
label = "usb2";
- gpios = <&gpio 10 1>; /* Active low */
+ gpios = <&gpio 10 GPIO_ACTIVE_LOW>;
+ };
+
+ wps {
+ label = "wps";
+ gpios = <&gpio 11 GPIO_ACTIVE_LOW>;
+ };
+
+ wireless1 {
+ label = "5g";
+ gpios = <&gpio 17 GPIO_ACTIVE_LOW>;
+ };
+
+ wireless2 {
+ label = "2.4g";
+ gpios = <&gpio 18 GPIO_ACTIVE_LOW>;
};
};
diff --git a/arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dts b/arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dts
index de61f02d3..ca6b4467b 100644
--- a/arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dts
+++ b/arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dts
@@ -388,16 +388,4 @@
usbn = &usbn;
led0 = &led0;
};
-
- dsr1000n-leds {
- compatible = "gpio-leds";
- usb1 {
- label = "usb1";
- gpios = <&gpio 9 1>; /* Active low */
- };
- usb2 {
- label = "usb2";
- gpios = <&gpio 10 1>; /* Active low */
- };
- };
};
diff --git a/arch/mips/boot/tools/relocs_64.c b/arch/mips/boot/tools/relocs_64.c
index b671b5e2d..06066e6ac 100644
--- a/arch/mips/boot/tools/relocs_64.c
+++ b/arch/mips/boot/tools/relocs_64.c
@@ -9,17 +9,20 @@
typedef uint8_t Elf64_Byte;
-typedef struct {
- Elf64_Word r_sym; /* Symbol index. */
- Elf64_Byte r_ssym; /* Special symbol. */
- Elf64_Byte r_type3; /* Third relocation. */
- Elf64_Byte r_type2; /* Second relocation. */
- Elf64_Byte r_type; /* First relocation. */
+typedef union {
+ struct {
+ Elf64_Word r_sym; /* Symbol index. */
+ Elf64_Byte r_ssym; /* Special symbol. */
+ Elf64_Byte r_type3; /* Third relocation. */
+ Elf64_Byte r_type2; /* Second relocation. */
+ Elf64_Byte r_type; /* First relocation. */
+ } fields;
+ Elf64_Xword unused;
} Elf64_Mips_Rela;
#define ELF_CLASS ELFCLASS64
-#define ELF_R_SYM(val) (((Elf64_Mips_Rela *)(&val))->r_sym)
-#define ELF_R_TYPE(val) (((Elf64_Mips_Rela *)(&val))->r_type)
+#define ELF_R_SYM(val) (((Elf64_Mips_Rela *)(&val))->fields.r_sym)
+#define ELF_R_TYPE(val) (((Elf64_Mips_Rela *)(&val))->fields.r_type)
#define ELF_ST_TYPE(o) ELF64_ST_TYPE(o)
#define ELF_ST_BIND(o) ELF64_ST_BIND(o)
#define ELF_ST_VISIBILITY(o) ELF64_ST_VISIBILITY(o)
diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
index 2cd45f5f9..fd69528b2 100644
--- a/arch/mips/cavium-octeon/dma-octeon.c
+++ b/arch/mips/cavium-octeon/dma-octeon.c
@@ -125,7 +125,7 @@ static phys_addr_t octeon_small_dma_to_phys(struct device *dev,
static dma_addr_t octeon_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
dma_addr_t daddr = swiotlb_map_page(dev, page, offset, size,
direction, attrs);
@@ -135,7 +135,7 @@ static dma_addr_t octeon_dma_map_page(struct device *dev, struct page *page,
}
static int octeon_dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
+ int nents, enum dma_data_direction direction, unsigned long attrs)
{
int r = swiotlb_map_sg_attrs(dev, sg, nents, direction, attrs);
mb();
@@ -157,7 +157,7 @@ static void octeon_dma_sync_sg_for_device(struct device *dev,
}
static void *octeon_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
void *ret;
@@ -189,7 +189,7 @@ static void *octeon_dma_alloc_coherent(struct device *dev, size_t size,
}
static void octeon_dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs)
+ void *vaddr, dma_addr_t dma_handle, unsigned long attrs)
{
swiotlb_free_coherent(dev, size, vaddr, dma_handle);
}
diff --git a/arch/mips/cavium-octeon/executive/cvmx-bootmem.c b/arch/mips/cavium-octeon/executive/cvmx-bootmem.c
index 504ed61a4..b65a6c1ac 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-bootmem.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-bootmem.c
@@ -668,7 +668,7 @@ int64_t cvmx_bootmem_phy_named_block_alloc(uint64_t size, uint64_t min_addr,
/*
* Round size up to mult of minimum alignment bytes We need
* the actual size allocated to allow for blocks to be
- * coallesced when they are freed. The alloc routine does the
+ * coalesced when they are freed. The alloc routine does the
* same rounding up on all allocations.
*/
size = ALIGN(size, CVMX_BOOTMEM_ALIGNMENT_SIZE);
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
index 36e30d65b..ff49fc045 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
@@ -186,15 +186,6 @@ int cvmx_helper_board_get_mii_address(int ipd_port)
return 7 - ipd_port;
else
return -1;
- case CVMX_BOARD_TYPE_CUST_DSR1000N:
- /*
- * Port 2 connects to Broadcom PHY (B5081). Other ports (0-1)
- * connect to a switch (BCM53115).
- */
- if (ipd_port == 2)
- return 8;
- else
- return -1;
case CVMX_BOARD_TYPE_KONTRON_S1901:
if (ipd_port == CVMX_HELPER_BOARD_MGMT_IPD_PORT)
return 1;
@@ -289,18 +280,6 @@ cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port)
return result;
}
break;
- case CVMX_BOARD_TYPE_CUST_DSR1000N:
- if (ipd_port == 0 || ipd_port == 1) {
- /* Ports 0 and 1 connect to a switch (BCM53115). */
- result.s.link_up = 1;
- result.s.full_duplex = 1;
- result.s.speed = 1000;
- return result;
- } else {
- /* Port 2 uses a Broadcom PHY (B5081). */
- is_broadcom_phy = 1;
- }
- break;
}
phy_addr = cvmx_helper_board_get_mii_address(ipd_port);
@@ -765,7 +744,6 @@ enum cvmx_helper_board_usb_clock_types __cvmx_helper_board_usb_get_clock_type(vo
case CVMX_BOARD_TYPE_LANAI2_G:
case CVMX_BOARD_TYPE_NIC10E_66:
case CVMX_BOARD_TYPE_UBNT_E100:
- case CVMX_BOARD_TYPE_CUST_DSR1000N:
return USB_CLOCK_TYPE_CRYSTAL_12;
case CVMX_BOARD_TYPE_NIC10E:
return USB_CLOCK_TYPE_REF_12;
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index 368eb4903..c1eb1ff7c 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -1260,7 +1260,7 @@ static int octeon_irq_gpio_map(struct irq_domain *d,
line = (hw + gpiod->base_hwirq) >> 6;
bit = (hw + gpiod->base_hwirq) & 63;
- if (line > ARRAY_SIZE(octeon_irq_ciu_to_irq) ||
+ if (line >= ARRAY_SIZE(octeon_irq_ciu_to_irq) ||
octeon_irq_ciu_to_irq[line][bit] != 0)
return -EINVAL;
@@ -1542,10 +1542,6 @@ static int __init octeon_irq_init_ciu(
goto err;
}
- r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56);
- if (r)
- goto err;
-
r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59);
if (r)
goto err;
@@ -1559,10 +1555,6 @@ static int __init octeon_irq_init_ciu(
goto err;
}
- r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17);
- if (r)
- goto err;
-
/* Enable the CIU lines */
set_c0_status(STATUSF_IP3 | STATUSF_IP2);
if (octeon_irq_use_ip4)
@@ -1627,6 +1619,12 @@ static int __init octeon_irq_init_gpio(
return -ENOMEM;
}
+ /*
+ * Clear the OF_POPULATED flag that was set by of_irq_init()
+ * so that all GPIO devices will be probed.
+ */
+ of_node_clear_flag(gpio_node, OF_POPULATED);
+
return 0;
}
/*
@@ -2077,10 +2075,6 @@ static int __init octeon_irq_init_ciu2(
goto err;
}
- r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 3, 44);
- if (r)
- goto err;
-
for (i = 0; i < 4; i++) {
r = octeon_irq_force_ciu_mapping(
ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i);
diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c
index 7aeafedff..37a932d91 100644
--- a/arch/mips/cavium-octeon/octeon-platform.c
+++ b/arch/mips/cavium-octeon/octeon-platform.c
@@ -3,33 +3,27 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2004-2011 Cavium Networks
+ * Copyright (C) 2004-2016 Cavium Networks
* Copyright (C) 2008 Wind River Systems
*/
-#include <linux/delay.h>
#include <linux/init.h>
-#include <linux/irq.h>
-#include <linux/i2c.h>
-#include <linux/usb.h>
-#include <linux/dma-mapping.h>
+#include <linux/delay.h>
#include <linux/etherdevice.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <linux/platform_device.h>
#include <linux/of_platform.h>
#include <linux/of_fdt.h>
#include <linux/libfdt.h>
+#include <linux/usb/ehci_def.h>
#include <linux/usb/ehci_pdriver.h>
#include <linux/usb/ohci_pdriver.h>
#include <asm/octeon/octeon.h>
-#include <asm/octeon/cvmx-rnm-defs.h>
-#include <asm/octeon/cvmx-helper.h>
#include <asm/octeon/cvmx-helper-board.h>
#include <asm/octeon/cvmx-uctlx-defs.h>
+#define CVMX_UAHCX_EHCI_USBCMD (CVMX_ADD_IO_SEG(0x00016F0000000010ull))
+#define CVMX_UAHCX_OHCI_USBCMD (CVMX_ADD_IO_SEG(0x00016F0000000408ull))
+
/* Octeon Random Number Generator. */
static int __init octeon_rng_device_init(void)
{
@@ -78,12 +72,36 @@ static DEFINE_MUTEX(octeon2_usb_clocks_mutex);
static int octeon2_usb_clock_start_cnt;
+static int __init octeon2_usb_reset(void)
+{
+ union cvmx_uctlx_clk_rst_ctl clk_rst_ctl;
+ u32 ucmd;
+
+ if (!OCTEON_IS_OCTEON2())
+ return 0;
+
+ clk_rst_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_CLK_RST_CTL(0));
+ if (clk_rst_ctl.s.hrst) {
+ ucmd = cvmx_read64_uint32(CVMX_UAHCX_EHCI_USBCMD);
+ ucmd &= ~CMD_RUN;
+ cvmx_write64_uint32(CVMX_UAHCX_EHCI_USBCMD, ucmd);
+ mdelay(2);
+ ucmd |= CMD_RESET;
+ cvmx_write64_uint32(CVMX_UAHCX_EHCI_USBCMD, ucmd);
+ ucmd = cvmx_read64_uint32(CVMX_UAHCX_OHCI_USBCMD);
+ ucmd |= CMD_RUN;
+ cvmx_write64_uint32(CVMX_UAHCX_OHCI_USBCMD, ucmd);
+ }
+
+ return 0;
+}
+arch_initcall(octeon2_usb_reset);
+
static void octeon2_usb_clocks_start(struct device *dev)
{
u64 div;
union cvmx_uctlx_if_ena if_ena;
union cvmx_uctlx_clk_rst_ctl clk_rst_ctl;
- union cvmx_uctlx_uphy_ctl_status uphy_ctl_status;
union cvmx_uctlx_uphy_portx_ctl_status port_ctl_status;
int i;
unsigned long io_clk_64_to_ns;
@@ -131,6 +149,17 @@ static void octeon2_usb_clocks_start(struct device *dev)
if_ena.s.en = 1;
cvmx_write_csr(CVMX_UCTLX_IF_ENA(0), if_ena.u64);
+ for (i = 0; i <= 1; i++) {
+ port_ctl_status.u64 =
+ cvmx_read_csr(CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(i, 0));
+ /* Set txvreftune to 15 to obtain compliant 'eye' diagram. */
+ port_ctl_status.s.txvreftune = 15;
+ port_ctl_status.s.txrisetune = 1;
+ port_ctl_status.s.txpreemphasistune = 1;
+ cvmx_write_csr(CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(i, 0),
+ port_ctl_status.u64);
+ }
+
/* Step 3: Configure the reference clock, PHY, and HCLK */
clk_rst_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_CLK_RST_CTL(0));
@@ -218,29 +247,10 @@ static void octeon2_usb_clocks_start(struct device *dev)
clk_rst_ctl.s.p_por = 0;
cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
- /* Step 5: Wait 1 ms for the PHY clock to start. */
- mdelay(1);
+ /* Step 5: Wait 3 ms for the PHY clock to start. */
+ mdelay(3);
- /*
- * Step 6: Program the reset input from automatic test
- * equipment field in the UPHY CSR
- */
- uphy_ctl_status.u64 = cvmx_read_csr(CVMX_UCTLX_UPHY_CTL_STATUS(0));
- uphy_ctl_status.s.ate_reset = 1;
- cvmx_write_csr(CVMX_UCTLX_UPHY_CTL_STATUS(0), uphy_ctl_status.u64);
-
- /* Step 7: Wait for at least 10ns. */
- ndelay(10);
-
- /* Step 8: Clear the ATE_RESET field in the UPHY CSR. */
- uphy_ctl_status.s.ate_reset = 0;
- cvmx_write_csr(CVMX_UCTLX_UPHY_CTL_STATUS(0), uphy_ctl_status.u64);
-
- /*
- * Step 9: Wait for at least 20ns for UPHY to output PHY clock
- * signals and OHCI_CLK48
- */
- ndelay(20);
+ /* Steps 6..9 for ATE only, are skipped. */
/* Step 10: Configure the OHCI_CLK48 and OHCI_CLK12 clocks. */
/* 10a */
@@ -261,6 +271,20 @@ static void octeon2_usb_clocks_start(struct device *dev)
clk_rst_ctl.s.p_prst = 1;
cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+ /* Step 11b */
+ udelay(1);
+
+ /* Step 11c */
+ clk_rst_ctl.s.p_prst = 0;
+ cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
+ /* Step 11d */
+ mdelay(1);
+
+ /* Step 11e */
+ clk_rst_ctl.s.p_prst = 1;
+ cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
/* Step 12: Wait 1 uS. */
udelay(1);
@@ -269,21 +293,9 @@ static void octeon2_usb_clocks_start(struct device *dev)
cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
end_clock:
- /* Now we can set some other registers. */
-
- for (i = 0; i <= 1; i++) {
- port_ctl_status.u64 =
- cvmx_read_csr(CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(i, 0));
- /* Set txvreftune to 15 to obtain compliant 'eye' diagram. */
- port_ctl_status.s.txvreftune = 15;
- port_ctl_status.s.txrisetune = 1;
- port_ctl_status.s.txpreemphasistune = 1;
- cvmx_write_csr(CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(i, 0),
- port_ctl_status.u64);
- }
-
/* Set uSOF cycle period to 60,000 bits. */
cvmx_write_csr(CVMX_UCTLX_EHCI_FLA(0), 0x20ull);
+
exit:
mutex_unlock(&octeon2_usb_clocks_mutex);
}
@@ -311,7 +323,11 @@ static struct usb_ehci_pdata octeon_ehci_pdata = {
#ifdef __BIG_ENDIAN
.big_endian_mmio = 1,
#endif
- .dma_mask_64 = 1,
+ /*
+ * We can DMA from anywhere. But the descriptors must be in
+ * the lower 4GB.
+ */
+ .dma_mask_64 = 0,
.power_on = octeon_ehci_power_on,
.power_off = octeon_ehci_power_off,
};
@@ -689,6 +705,10 @@ int __init octeon_prune_device_tree(void)
if (fdt_check_header(initial_boot_params))
panic("Corrupt Device Tree.");
+ WARN(octeon_bootinfo->board_type == CVMX_BOARD_TYPE_CUST_DSR1000N,
+ "Built-in DTB booting is deprecated on %s. Please switch to use appended DTB.",
+ cvmx_board_type_to_string(octeon_bootinfo->board_type));
+
aliases = fdt_path_offset(initial_boot_params, "/aliases");
if (aliases < 0) {
pr_err("Error: No /aliases node in device tree.");
@@ -1032,13 +1052,6 @@ end_led:
}
}
- if (octeon_bootinfo->board_type != CVMX_BOARD_TYPE_CUST_DSR1000N) {
- int dsr1000n_leds = fdt_path_offset(initial_boot_params,
- "/dsr1000n-leds");
- if (dsr1000n_leds >= 0)
- fdt_nop_node(initial_boot_params, dsr1000n_leds);
- }
-
return 0;
}
@@ -1046,7 +1059,7 @@ static int __init octeon_publish_devices(void)
{
return of_platform_bus_probe(NULL, octeon_ids, NULL);
}
-device_initcall(octeon_publish_devices);
+arch_initcall(octeon_publish_devices);
MODULE_AUTHOR("David Daney <ddaney@caviumnetworks.com>");
MODULE_LICENSE("GPL");
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index 64f852b06..cb16fcc5f 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -40,9 +40,27 @@
#include <asm/octeon/octeon.h>
#include <asm/octeon/pci-octeon.h>
-#include <asm/octeon/cvmx-mio-defs.h>
#include <asm/octeon/cvmx-rst-defs.h>
+/*
+ * TRUE for devices having registers with little-endian byte
+ * order, FALSE for registers with native-endian byte order.
+ * PCI mandates little-endian, USB and SATA are configuraable,
+ * but we chose little-endian for these.
+ */
+const bool octeon_should_swizzle_table[256] = {
+ [0x00] = true, /* bootbus/CF */
+ [0x1b] = true, /* PCI mmio window */
+ [0x1c] = true, /* PCI mmio window */
+ [0x1d] = true, /* PCI mmio window */
+ [0x1e] = true, /* PCI mmio window */
+ [0x68] = true, /* OCTEON III USB */
+ [0x69] = true, /* OCTEON III USB */
+ [0x6c] = true, /* OCTEON III SATA */
+ [0x6f] = true, /* OCTEON II USB */
+};
+EXPORT_SYMBOL(octeon_should_swizzle_table);
+
#ifdef CONFIG_PCI
extern void pci_console_init(const char *arg);
#endif
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index 33aab8925..4d457d602 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -271,6 +271,7 @@ static int octeon_cpu_disable(void)
return -ENOTSUPP;
set_cpu_online(cpu, false);
+ calculate_cpu_foreign_map();
cpumask_clear_cpu(cpu, &cpu_callin_map);
octeon_fixup_irqs();
diff --git a/arch/mips/cobalt/setup.c b/arch/mips/cobalt/setup.c
index 9a8c2fe8d..c136a18c7 100644
--- a/arch/mips/cobalt/setup.c
+++ b/arch/mips/cobalt/setup.c
@@ -42,8 +42,8 @@ const char *get_system_type(void)
/*
* Cobalt doesn't have PS/2 keyboard/mouse interfaces,
- * keyboard conntroller is never used.
- * Also PCI-ISA bridge DMA contoroller is never used.
+ * keyboard controller is never used.
+ * Also PCI-ISA bridge DMA controller is never used.
*/
static struct resource cobalt_reserved_resources[] = {
{ /* dma1 */
diff --git a/arch/mips/configs/ath25_defconfig b/arch/mips/configs/ath25_defconfig
new file mode 100644
index 000000000..2c829950b
--- /dev/null
+++ b/arch/mips/configs/ath25_defconfig
@@ -0,0 +1,119 @@
+CONFIG_ATH25=y
+# CONFIG_COMPACTION is not set
+CONFIG_HZ_100=y
+# CONFIG_SECCOMP is not set
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+# CONFIG_FHANDLE is not set
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_GZIP is not set
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+# CONFIG_AIO is not set
+CONFIG_EMBEDDED=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_CFQ is not set
+# CONFIG_SUSPEND is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_IPV6 is not set
+CONFIG_CFG80211=m
+CONFIG_MAC80211=m
+CONFIG_MAC80211_DEBUGFS=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_MTD=y
+CONFIG_MTD_REDBOOT_PARTS=y
+CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-2
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_ADV_OPTIONS=y
+CONFIG_MTD_CFI_GEOMETRY=y
+# CONFIG_MTD_MAP_BANK_WIDTH_1 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_4 is not set
+# CONFIG_MTD_CFI_I2 is not set
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_COMPLEX_MAPPINGS=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_NETDEVICES=y
+# CONFIG_ETHERNET is not set
+# CONFIG_WLAN_VENDOR_ADMTEK is not set
+CONFIG_ATH5K=m
+# CONFIG_WLAN_VENDOR_ATMEL is not set
+# CONFIG_WLAN_VENDOR_BROADCOM is not set
+# CONFIG_WLAN_VENDOR_CISCO is not set
+# CONFIG_WLAN_VENDOR_INTEL is not set
+# CONFIG_WLAN_VENDOR_INTERSIL is not set
+# CONFIG_WLAN_VENDOR_MARVELL is not set
+# CONFIG_WLAN_VENDOR_MEDIATEK is not set
+# CONFIG_WLAN_VENDOR_RALINK is not set
+# CONFIG_WLAN_VENDOR_REALTEK is not set
+# CONFIG_WLAN_VENDOR_RSI is not set
+# CONFIG_WLAN_VENDOR_ST is not set
+# CONFIG_WLAN_VENDOR_TI is not set
+# CONFIG_WLAN_VENDOR_ZYDAS is not set
+CONFIG_INPUT=m
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+# CONFIG_SERIAL_8250_PCI is not set
+CONFIG_SERIAL_8250_NR_UARTS=1
+CONFIG_SERIAL_8250_RUNTIME_UARTS=1
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_VGA_ARB is not set
+CONFIG_USB=m
+CONFIG_USB_EHCI_HCD=m
+CONFIG_LEDS_CLASS=y
+# CONFIG_IOMMU_SUPPORT is not set
+# CONFIG_DNOTIFY is not set
+# CONFIG_PROC_PAGE_MONITOR is not set
+CONFIG_TMPFS=y
+CONFIG_TMPFS_XATTR=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_SUMMARY=y
+CONFIG_JFFS2_FS_XATTR=y
+# CONFIG_JFFS2_FS_POSIX_ACL is not set
+# CONFIG_JFFS2_FS_SECURITY is not set
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+# CONFIG_JFFS2_ZLIB is not set
+CONFIG_SQUASHFS=y
+CONFIG_SQUASHFS_FILE_DIRECT=y
+CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y
+# CONFIG_SQUASHFS_ZLIB is not set
+CONFIG_SQUASHFS_XZ=y
+CONFIG_PRINTK_TIME=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_DEBUG_FS=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_FTRACE is not set
+# CONFIG_XZ_DEC_X86 is not set
+# CONFIG_XZ_DEC_POWERPC is not set
+# CONFIG_XZ_DEC_IA64 is not set
+# CONFIG_XZ_DEC_ARM is not set
+# CONFIG_XZ_DEC_ARMTHUMB is not set
+# CONFIG_XZ_DEC_SPARC is not set
diff --git a/arch/mips/configs/cavium_octeon_defconfig b/arch/mips/configs/cavium_octeon_defconfig
index dcac308ce..d470d0836 100644
--- a/arch/mips/configs/cavium_octeon_defconfig
+++ b/arch/mips/configs/cavium_octeon_defconfig
@@ -59,6 +59,8 @@ CONFIG_EEPROM_AT25=y
CONFIG_BLK_DEV_SD=y
CONFIG_ATA=y
CONFIG_SATA_AHCI=y
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_AHCI_OCTEON=y
CONFIG_PATA_OCTEON_CF=y
CONFIG_SATA_SIL=y
CONFIG_NETDEVICES=y
diff --git a/arch/mips/configs/malta_qemu_32r6_defconfig b/arch/mips/configs/malta_qemu_32r6_defconfig
index 7f50dd67a..65f140e1e 100644
--- a/arch/mips/configs/malta_qemu_32r6_defconfig
+++ b/arch/mips/configs/malta_qemu_32r6_defconfig
@@ -146,7 +146,7 @@ CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
-CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_DISK=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_LEDS_TRIGGER_BACKLIGHT=y
CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
diff --git a/arch/mips/configs/maltaaprp_defconfig b/arch/mips/configs/maltaaprp_defconfig
index a9d433a17..799c4338f 100644
--- a/arch/mips/configs/maltaaprp_defconfig
+++ b/arch/mips/configs/maltaaprp_defconfig
@@ -147,7 +147,7 @@ CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
-CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_DISK=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_LEDS_TRIGGER_BACKLIGHT=y
CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
diff --git a/arch/mips/configs/maltasmvp_eva_defconfig b/arch/mips/configs/maltasmvp_eva_defconfig
index 2774ef064..318460005 100644
--- a/arch/mips/configs/maltasmvp_eva_defconfig
+++ b/arch/mips/configs/maltasmvp_eva_defconfig
@@ -152,7 +152,7 @@ CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
-CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_DISK=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_LEDS_TRIGGER_BACKLIGHT=y
CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
diff --git a/arch/mips/configs/maltaup_defconfig b/arch/mips/configs/maltaup_defconfig
index 9bbd2218f..a79107da0 100644
--- a/arch/mips/configs/maltaup_defconfig
+++ b/arch/mips/configs/maltaup_defconfig
@@ -146,7 +146,7 @@ CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
-CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_DISK=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_LEDS_TRIGGER_BACKLIGHT=y
CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
diff --git a/arch/mips/configs/rbtx49xx_defconfig b/arch/mips/configs/rbtx49xx_defconfig
index f8bf9b4c1..43d55e5ab 100644
--- a/arch/mips/configs/rbtx49xx_defconfig
+++ b/arch/mips/configs/rbtx49xx_defconfig
@@ -90,7 +90,7 @@ CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
CONFIG_LEDS_GPIO=y
CONFIG_LEDS_TRIGGERS=y
-CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_DISK=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_INTF_DEV_UIE_EMUL=y
diff --git a/arch/mips/dec/int-handler.S b/arch/mips/dec/int-handler.S
index d7b99180c..1910223a9 100644
--- a/arch/mips/dec/int-handler.S
+++ b/arch/mips/dec/int-handler.S
@@ -146,7 +146,25 @@
/*
* Find irq with highest priority
*/
- PTR_LA t1,cpu_mask_nr_tbl
+ # open coded PTR_LA t1, cpu_mask_nr_tbl
+#if (_MIPS_SZPTR == 32)
+ # open coded la t1, cpu_mask_nr_tbl
+ lui t1, %hi(cpu_mask_nr_tbl)
+ addiu t1, %lo(cpu_mask_nr_tbl)
+
+#endif
+#if (_MIPS_SZPTR == 64)
+ # open coded dla t1, cpu_mask_nr_tbl
+ .set push
+ .set noat
+ lui t1, %highest(cpu_mask_nr_tbl)
+ lui AT, %hi(cpu_mask_nr_tbl)
+ daddiu t1, t1, %higher(cpu_mask_nr_tbl)
+ daddiu AT, AT, %lo(cpu_mask_nr_tbl)
+ dsll t1, 32
+ daddu t1, t1, AT
+ .set pop
+#endif
1: lw t2,(t1)
nop
and t2,t0
@@ -195,7 +213,25 @@
/*
* Find irq with highest priority
*/
- PTR_LA t1,asic_mask_nr_tbl
+ # open coded PTR_LA t1,asic_mask_nr_tbl
+#if (_MIPS_SZPTR == 32)
+ # open coded la t1, asic_mask_nr_tbl
+ lui t1, %hi(asic_mask_nr_tbl)
+ addiu t1, %lo(asic_mask_nr_tbl)
+
+#endif
+#if (_MIPS_SZPTR == 64)
+ # open coded dla t1, asic_mask_nr_tbl
+ .set push
+ .set noat
+ lui t1, %highest(asic_mask_nr_tbl)
+ lui AT, %hi(asic_mask_nr_tbl)
+ daddiu t1, t1, %higher(asic_mask_nr_tbl)
+ daddiu AT, AT, %lo(asic_mask_nr_tbl)
+ dsll t1, 32
+ daddu t1, t1, AT
+ .set pop
+#endif
2: lw t2,(t1)
nop
and t2,t0
diff --git a/arch/mips/include/asm/addrspace.h b/arch/mips/include/asm/addrspace.h
index 3b0e51d5a..c5b04e752 100644
--- a/arch/mips/include/asm/addrspace.h
+++ b/arch/mips/include/asm/addrspace.h
@@ -45,7 +45,7 @@
/*
* Returns the kernel segment base of a given address
*/
-#define KSEGX(a) ((_ACAST32_ (a)) & 0xe0000000)
+#define KSEGX(a) ((_ACAST32_(a)) & _ACAST32_(0xe0000000))
/*
* Returns the physical address of a CKSEGx / XKPHYS address
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 835b402e4..0ab176bdb 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -66,7 +66,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
" " #asm_op " %0, %2 \n" \
" sc %0, %1 \n" \
" .set mips0 \n" \
- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \
} while (unlikely(!temp)); \
} else { \
@@ -79,12 +79,10 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
}
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
-static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
+static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \
{ \
int result; \
\
- smp_mb__before_llsc(); \
- \
if (kernel_uses_llsc && R10000_LLSC_WAR) { \
int temp; \
\
@@ -125,23 +123,84 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
raw_local_irq_restore(flags); \
} \
\
- smp_llsc_mb(); \
+ return result; \
+}
+
+#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
+static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \
+{ \
+ int result; \
+ \
+ if (kernel_uses_llsc && R10000_LLSC_WAR) { \
+ int temp; \
+ \
+ __asm__ __volatile__( \
+ " .set arch=r4000 \n" \
+ "1: ll %1, %2 # atomic_fetch_" #op " \n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " sc %0, %2 \n" \
+ " beqzl %0, 1b \n" \
+ " move %0, %1 \n" \
+ " .set mips0 \n" \
+ : "=&r" (result), "=&r" (temp), \
+ "+" GCC_OFF_SMALL_ASM() (v->counter) \
+ : "Ir" (i)); \
+ } else if (kernel_uses_llsc) { \
+ int temp; \
+ \
+ do { \
+ __asm__ __volatile__( \
+ " .set "MIPS_ISA_LEVEL" \n" \
+ " ll %1, %2 # atomic_fetch_" #op " \n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " sc %0, %2 \n" \
+ " .set mips0 \n" \
+ : "=&r" (result), "=&r" (temp), \
+ "+" GCC_OFF_SMALL_ASM() (v->counter) \
+ : "Ir" (i)); \
+ } while (unlikely(!result)); \
+ \
+ result = temp; \
+ } else { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+ result = v->counter; \
+ v->counter c_op i; \
+ raw_local_irq_restore(flags); \
+ } \
\
return result; \
}
#define ATOMIC_OPS(op, c_op, asm_op) \
ATOMIC_OP(op, c_op, asm_op) \
- ATOMIC_OP_RETURN(op, c_op, asm_op)
+ ATOMIC_OP_RETURN(op, c_op, asm_op) \
+ ATOMIC_FETCH_OP(op, c_op, asm_op)
ATOMIC_OPS(add, +=, addu)
ATOMIC_OPS(sub, -=, subu)
-ATOMIC_OP(and, &=, and)
-ATOMIC_OP(or, |=, or)
-ATOMIC_OP(xor, ^=, xor)
+#define atomic_add_return_relaxed atomic_add_return_relaxed
+#define atomic_sub_return_relaxed atomic_sub_return_relaxed
+#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
+#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op, c_op, asm_op) \
+ ATOMIC_OP(op, c_op, asm_op) \
+ ATOMIC_FETCH_OP(op, c_op, asm_op)
+
+ATOMIC_OPS(and, &=, and)
+ATOMIC_OPS(or, |=, or)
+ATOMIC_OPS(xor, ^=, xor)
+
+#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
+#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
+#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
@@ -362,12 +421,10 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \
}
#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
-static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
+static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
{ \
long result; \
\
- smp_mb__before_llsc(); \
- \
if (kernel_uses_llsc && R10000_LLSC_WAR) { \
long temp; \
\
@@ -409,22 +466,85 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
raw_local_irq_restore(flags); \
} \
\
- smp_llsc_mb(); \
+ return result; \
+}
+
+#define ATOMIC64_FETCH_OP(op, c_op, asm_op) \
+static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \
+{ \
+ long result; \
+ \
+ if (kernel_uses_llsc && R10000_LLSC_WAR) { \
+ long temp; \
+ \
+ __asm__ __volatile__( \
+ " .set arch=r4000 \n" \
+ "1: lld %1, %2 # atomic64_fetch_" #op "\n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " scd %0, %2 \n" \
+ " beqzl %0, 1b \n" \
+ " move %0, %1 \n" \
+ " .set mips0 \n" \
+ : "=&r" (result), "=&r" (temp), \
+ "+" GCC_OFF_SMALL_ASM() (v->counter) \
+ : "Ir" (i)); \
+ } else if (kernel_uses_llsc) { \
+ long temp; \
+ \
+ do { \
+ __asm__ __volatile__( \
+ " .set "MIPS_ISA_LEVEL" \n" \
+ " lld %1, %2 # atomic64_fetch_" #op "\n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " scd %0, %2 \n" \
+ " .set mips0 \n" \
+ : "=&r" (result), "=&r" (temp), \
+ "=" GCC_OFF_SMALL_ASM() (v->counter) \
+ : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \
+ : "memory"); \
+ } while (unlikely(!result)); \
+ \
+ result = temp; \
+ } else { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+ result = v->counter; \
+ v->counter c_op i; \
+ raw_local_irq_restore(flags); \
+ } \
\
return result; \
}
#define ATOMIC64_OPS(op, c_op, asm_op) \
ATOMIC64_OP(op, c_op, asm_op) \
- ATOMIC64_OP_RETURN(op, c_op, asm_op)
+ ATOMIC64_OP_RETURN(op, c_op, asm_op) \
+ ATOMIC64_FETCH_OP(op, c_op, asm_op)
ATOMIC64_OPS(add, +=, daddu)
ATOMIC64_OPS(sub, -=, dsubu)
-ATOMIC64_OP(and, &=, and)
-ATOMIC64_OP(or, |=, or)
-ATOMIC64_OP(xor, ^=, xor)
+
+#define atomic64_add_return_relaxed atomic64_add_return_relaxed
+#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
+#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
+#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
+
+#undef ATOMIC64_OPS
+#define ATOMIC64_OPS(op, c_op, asm_op) \
+ ATOMIC64_OP(op, c_op, asm_op) \
+ ATOMIC64_FETCH_OP(op, c_op, asm_op)
+
+ATOMIC64_OPS(and, &=, and)
+ATOMIC64_OPS(or, |=, or)
+ATOMIC64_OPS(xor, ^=, xor)
+
+#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
+#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
+#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
#undef ATOMIC64_OPS
+#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
diff --git a/arch/mips/include/asm/bootinfo.h b/arch/mips/include/asm/bootinfo.h
index 9f6703396..ee9f5f2d1 100644
--- a/arch/mips/include/asm/bootinfo.h
+++ b/arch/mips/include/asm/bootinfo.h
@@ -127,6 +127,10 @@ extern char arcs_cmdline[COMMAND_LINE_SIZE];
*/
extern unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
+#ifdef CONFIG_USE_OF
+extern unsigned long fw_passed_dtb;
+#endif
+
/*
* Platform memory detection hook called by setup_arch
*/
diff --git a/arch/mips/include/asm/dsemul.h b/arch/mips/include/asm/dsemul.h
new file mode 100644
index 000000000..a6e067801
--- /dev/null
+++ b/arch/mips/include/asm/dsemul.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2016 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __MIPS_ASM_DSEMUL_H__
+#define __MIPS_ASM_DSEMUL_H__
+
+#include <asm/break.h>
+#include <asm/inst.h>
+
+/* Break instruction with special math emu break code set */
+#define BREAK_MATH(micromips) (((micromips) ? 0x7 : 0xd) | (BRK_MEMU << 16))
+
+/* When used as a frame index, indicates the lack of a frame */
+#define BD_EMUFRAME_NONE ((int)BIT(31))
+
+struct mm_struct;
+struct pt_regs;
+struct task_struct;
+
+/**
+ * mips_dsemul() - 'Emulate' an instruction from a branch delay slot
+ * @regs: User thread register context.
+ * @ir: The instruction to be 'emulated'.
+ * @branch_pc: The PC of the branch instruction.
+ * @cont_pc: The PC to continue at following 'emulation'.
+ *
+ * Emulate or execute an arbitrary MIPS instruction within the context of
+ * the current user thread. This is used primarily to handle instructions
+ * in the delay slots of emulated branch instructions, for example FP
+ * branch instructions on systems without an FPU.
+ *
+ * Return: Zero on success, negative if ir is a NOP, signal number on failure.
+ */
+extern int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
+ unsigned long branch_pc, unsigned long cont_pc);
+
+/**
+ * do_dsemulret() - Return from a delay slot 'emulation' frame
+ * @xcp: User thread register context.
+ *
+ * Call in response to the BRK_MEMU break instruction used to return to
+ * the kernel from branch delay slot 'emulation' frames following a call
+ * to mips_dsemul(). Restores the user thread PC to the value that was
+ * passed as the cpc parameter to mips_dsemul().
+ *
+ * Return: True if an emulation frame was returned from, else false.
+ */
+extern bool do_dsemulret(struct pt_regs *xcp);
+
+/**
+ * dsemul_thread_cleanup() - Cleanup thread 'emulation' frame
+ * @tsk: The task structure associated with the thread
+ *
+ * If the thread @tsk has a branch delay slot 'emulation' frame
+ * allocated to it then free that frame.
+ *
+ * Return: True if a frame was freed, else false.
+ */
+extern bool dsemul_thread_cleanup(struct task_struct *tsk);
+
+/**
+ * dsemul_thread_rollback() - Rollback from an 'emulation' frame
+ * @regs: User thread register context.
+ *
+ * If the current thread, whose register context is represented by @regs,
+ * is executing within a delay slot 'emulation' frame then exit that
+ * frame. The PC will be rolled back to the branch if the instruction
+ * that was being 'emulated' has not yet executed, or advanced to the
+ * continuation PC if it has.
+ *
+ * Return: True if a frame was exited, else false.
+ */
+extern bool dsemul_thread_rollback(struct pt_regs *regs);
+
+/**
+ * dsemul_mm_cleanup() - Cleanup per-mm delay slot 'emulation' state
+ * @mm: The struct mm_struct to cleanup state for.
+ *
+ * Cleanup state for the given @mm, ensuring that any memory allocated
+ * for delay slot 'emulation' book-keeping is freed. This is to be called
+ * before @mm is freed in order to avoid memory leaks.
+ */
+extern void dsemul_mm_cleanup(struct mm_struct *mm);
+
+#endif /* __MIPS_ASM_DSEMUL_H__ */
diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
index f5f457179..2b3dc2973 100644
--- a/arch/mips/include/asm/elf.h
+++ b/arch/mips/include/asm/elf.h
@@ -458,6 +458,7 @@ extern const char *__elf_platform;
#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
#endif
+/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
#define ARCH_DLINFO \
do { \
NEW_AUX_ENT(AT_SYSINFO_EHDR, \
@@ -498,4 +499,7 @@ extern int arch_check_elf(void *ehdr, bool has_interpreter, void *interp_ehdr,
extern void mips_set_personality_nan(struct arch_elf_state *state);
extern void mips_set_personality_fp(struct arch_elf_state *state);
+#define elf_read_implies_exec(ex, stk) mips_elf_read_implies_exec(&(ex), stk)
+extern int mips_elf_read_implies_exec(void *elf_ex, int exstack);
+
#endif /* _ASM_ELF_H */
diff --git a/arch/mips/include/asm/fpu_emulator.h b/arch/mips/include/asm/fpu_emulator.h
index 3225c3c07..355dc2517 100644
--- a/arch/mips/include/asm/fpu_emulator.h
+++ b/arch/mips/include/asm/fpu_emulator.h
@@ -24,7 +24,7 @@
#define _ASM_FPU_EMULATOR_H
#include <linux/sched.h>
-#include <asm/break.h>
+#include <asm/dsemul.h>
#include <asm/thread_info.h>
#include <asm/inst.h>
#include <asm/local.h>
@@ -60,27 +60,16 @@ do { \
#define MIPS_FPU_EMU_INC_STATS(M) do { } while (0)
#endif /* CONFIG_DEBUG_FS */
-extern int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
- unsigned long cpc);
-extern int do_dsemulret(struct pt_regs *xcp);
extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
struct mips_fpu_struct *ctx, int has_fpu,
void *__user *fault_addr);
int process_fpemu_return(int sig, void __user *fault_addr,
unsigned long fcr31);
+int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
+ unsigned long *contpc);
int mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
unsigned long *contpc);
-/*
- * Instruction inserted following the badinst to further tag the sequence
- */
-#define BD_COOKIE 0x0000bd36 /* tne $0, $0 with baggage */
-
-/*
- * Break instruction with special math emu break code set
- */
-#define BREAK_MATH(micromips) (((micromips) ? 0x7 : 0xd) | (BRK_MEMU << 16))
-
#define SIGNALLING_NAN 0x7ff800007ff80000LL
static inline void fpu_emulator_init_fpu(void)
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 36a391d28..b54bcadd8 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -19,6 +19,9 @@
#include <linux/threads.h>
#include <linux/spinlock.h>
+#include <asm/inst.h>
+#include <asm/mipsregs.h>
+
/* MIPS KVM register ids */
#define MIPS_CP0_32(_R, _S) \
(KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S)))
@@ -53,6 +56,12 @@
#define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7)
#define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0)
#define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
+#define KVM_REG_MIPS_CP0_KSCRATCH1 MIPS_CP0_64(31, 2)
+#define KVM_REG_MIPS_CP0_KSCRATCH2 MIPS_CP0_64(31, 3)
+#define KVM_REG_MIPS_CP0_KSCRATCH3 MIPS_CP0_64(31, 4)
+#define KVM_REG_MIPS_CP0_KSCRATCH4 MIPS_CP0_64(31, 5)
+#define KVM_REG_MIPS_CP0_KSCRATCH5 MIPS_CP0_64(31, 6)
+#define KVM_REG_MIPS_CP0_KSCRATCH6 MIPS_CP0_64(31, 7)
#define KVM_MAX_VCPUS 1
@@ -65,8 +74,14 @@
-/* Special address that contains the comm page, used for reducing # of traps */
-#define KVM_GUEST_COMMPAGE_ADDR 0x0
+/*
+ * Special address that contains the comm page, used for reducing # of traps
+ * This needs to be within 32Kb of 0x0 (so the zero register can be used), but
+ * preferably not at 0x0 so that most kernel NULL pointer dereferences can be
+ * caught.
+ */
+#define KVM_GUEST_COMMPAGE_ADDR ((PAGE_SIZE > 0x8000) ? 0 : \
+ (0x8000 - PAGE_SIZE))
#define KVM_GUEST_KERNEL_MODE(vcpu) ((kvm_read_c0_guest_status(vcpu->arch.cop0) & (ST0_EXL | ST0_ERL)) || \
((kvm_read_c0_guest_status(vcpu->arch.cop0) & KSU_USER) == 0))
@@ -93,9 +108,6 @@
#define KVM_INVALID_ADDR 0xdeadbeef
extern atomic_t kvm_mips_instance;
-extern kvm_pfn_t (*kvm_mips_gfn_to_pfn)(struct kvm *kvm, gfn_t gfn);
-extern void (*kvm_mips_release_pfn_clean)(kvm_pfn_t pfn);
-extern bool (*kvm_mips_is_error_pfn)(kvm_pfn_t pfn);
struct kvm_vm_stat {
u32 remote_tlb_flush;
@@ -126,28 +138,6 @@ struct kvm_vcpu_stat {
u32 halt_wakeup;
};
-enum kvm_mips_exit_types {
- WAIT_EXITS,
- CACHE_EXITS,
- SIGNAL_EXITS,
- INT_EXITS,
- COP_UNUSABLE_EXITS,
- TLBMOD_EXITS,
- TLBMISS_LD_EXITS,
- TLBMISS_ST_EXITS,
- ADDRERR_ST_EXITS,
- ADDRERR_LD_EXITS,
- SYSCALL_EXITS,
- RESVD_INST_EXITS,
- BREAK_INST_EXITS,
- TRAP_INST_EXITS,
- MSA_FPE_EXITS,
- FPE_EXITS,
- MSA_DISABLED_EXITS,
- FLUSH_DCACHE_EXITS,
- MAX_KVM_MIPS_EXIT_TYPES
-};
-
struct kvm_arch_memory_slot {
};
@@ -215,73 +205,6 @@ struct mips_coproc {
#define MIPS_CP0_CONFIG4_SEL 4
#define MIPS_CP0_CONFIG5_SEL 5
-/* Config0 register bits */
-#define CP0C0_M 31
-#define CP0C0_K23 28
-#define CP0C0_KU 25
-#define CP0C0_MDU 20
-#define CP0C0_MM 17
-#define CP0C0_BM 16
-#define CP0C0_BE 15
-#define CP0C0_AT 13
-#define CP0C0_AR 10
-#define CP0C0_MT 7
-#define CP0C0_VI 3
-#define CP0C0_K0 0
-
-/* Config1 register bits */
-#define CP0C1_M 31
-#define CP0C1_MMU 25
-#define CP0C1_IS 22
-#define CP0C1_IL 19
-#define CP0C1_IA 16
-#define CP0C1_DS 13
-#define CP0C1_DL 10
-#define CP0C1_DA 7
-#define CP0C1_C2 6
-#define CP0C1_MD 5
-#define CP0C1_PC 4
-#define CP0C1_WR 3
-#define CP0C1_CA 2
-#define CP0C1_EP 1
-#define CP0C1_FP 0
-
-/* Config2 Register bits */
-#define CP0C2_M 31
-#define CP0C2_TU 28
-#define CP0C2_TS 24
-#define CP0C2_TL 20
-#define CP0C2_TA 16
-#define CP0C2_SU 12
-#define CP0C2_SS 8
-#define CP0C2_SL 4
-#define CP0C2_SA 0
-
-/* Config3 Register bits */
-#define CP0C3_M 31
-#define CP0C3_ISA_ON_EXC 16
-#define CP0C3_ULRI 13
-#define CP0C3_DSPP 10
-#define CP0C3_LPA 7
-#define CP0C3_VEIC 6
-#define CP0C3_VInt 5
-#define CP0C3_SP 4
-#define CP0C3_MT 2
-#define CP0C3_SM 1
-#define CP0C3_TL 0
-
-/* MMU types, the first four entries have the same layout as the
- CP0C0_MT field. */
-enum mips_mmu_types {
- MMU_TYPE_NONE,
- MMU_TYPE_R4000,
- MMU_TYPE_RESERVED,
- MMU_TYPE_FMT,
- MMU_TYPE_R3000,
- MMU_TYPE_R6000,
- MMU_TYPE_R8000
-};
-
/* Resume Flags */
#define RESUME_FLAG_DR (1<<0) /* Reload guest nonvolatile state? */
#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
@@ -298,11 +221,6 @@ enum emulation_result {
EMULATE_PRIV_FAIL,
};
-#define MIPS3_PG_G 0x00000001 /* Global; ignore ASID if in lo0 & lo1 */
-#define MIPS3_PG_V 0x00000002 /* Valid */
-#define MIPS3_PG_NV 0x00000000
-#define MIPS3_PG_D 0x00000004 /* Dirty */
-
#define mips3_paddr_to_tlbpfn(x) \
(((unsigned long)(x) >> MIPS3_PG_SHIFT) & MIPS3_PG_FRAME)
#define mips3_tlbpfn_to_paddr(x) \
@@ -313,13 +231,11 @@ enum emulation_result {
#define VPN2_MASK 0xffffe000
#define KVM_ENTRYHI_ASID MIPS_ENTRYHI_ASID
-#define TLB_IS_GLOBAL(x) (((x).tlb_lo0 & MIPS3_PG_G) && \
- ((x).tlb_lo1 & MIPS3_PG_G))
+#define TLB_IS_GLOBAL(x) ((x).tlb_lo[0] & (x).tlb_lo[1] & ENTRYLO_G)
#define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK)
#define TLB_ASID(x) ((x).tlb_hi & KVM_ENTRYHI_ASID)
-#define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) \
- ? ((x).tlb_lo1 & MIPS3_PG_V) \
- : ((x).tlb_lo0 & MIPS3_PG_V))
+#define TLB_LO_IDX(x, va) (((va) >> PAGE_SHIFT) & 1)
+#define TLB_IS_VALID(x, va) ((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_V)
#define TLB_HI_VPN2_HIT(x, y) ((TLB_VPN2(x) & ~(x).tlb_mask) == \
((y) & VPN2_MASK & ~(x).tlb_mask))
#define TLB_HI_ASID_HIT(x, y) (TLB_IS_GLOBAL(x) || \
@@ -328,26 +244,23 @@ enum emulation_result {
struct kvm_mips_tlb {
long tlb_mask;
long tlb_hi;
- long tlb_lo0;
- long tlb_lo1;
+ long tlb_lo[2];
};
-#define KVM_MIPS_FPU_FPU 0x1
-#define KVM_MIPS_FPU_MSA 0x2
+#define KVM_MIPS_AUX_FPU 0x1
+#define KVM_MIPS_AUX_MSA 0x2
#define KVM_MIPS_GUEST_TLB_SIZE 64
struct kvm_vcpu_arch {
- void *host_ebase, *guest_ebase;
+ void *guest_ebase;
int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
unsigned long host_stack;
unsigned long host_gp;
/* Host CP0 registers used when handling exits from guest */
unsigned long host_cp0_badvaddr;
- unsigned long host_cp0_cause;
unsigned long host_cp0_epc;
- unsigned long host_cp0_entryhi;
- uint32_t guest_inst;
+ u32 host_cp0_cause;
/* GPRS */
unsigned long gprs[32];
@@ -357,8 +270,8 @@ struct kvm_vcpu_arch {
/* FPU State */
struct mips_fpu_struct fpu;
- /* Which FPU state is loaded (KVM_MIPS_FPU_*) */
- unsigned int fpu_inuse;
+ /* Which auxiliary state is loaded (KVM_MIPS_AUX_*) */
+ unsigned int aux_inuse;
/* COP0 State */
struct mips_coproc *cop0;
@@ -370,11 +283,11 @@ struct kvm_vcpu_arch {
struct hrtimer comparecount_timer;
/* Count timer control KVM register */
- uint32_t count_ctl;
+ u32 count_ctl;
/* Count bias from the raw time */
- uint32_t count_bias;
+ u32 count_bias;
/* Frequency of timer in Hz */
- uint32_t count_hz;
+ u32 count_hz;
/* Dynamic nanosecond bias (multiple of count_period) to avoid overflow */
s64 count_dyn_bias;
/* Resume time */
@@ -388,7 +301,7 @@ struct kvm_vcpu_arch {
/* Bitmask of pending exceptions to be cleared */
unsigned long pending_exceptions_clr;
- unsigned long pending_load_cause;
+ u32 pending_load_cause;
/* Save/Restore the entryhi register when are are preempted/scheduled back in */
unsigned long preempt_entryhi;
@@ -397,8 +310,8 @@ struct kvm_vcpu_arch {
struct kvm_mips_tlb guest_tlb[KVM_MIPS_GUEST_TLB_SIZE];
/* Cached guest kernel/user ASIDs */
- uint32_t guest_user_asid[NR_CPUS];
- uint32_t guest_kernel_asid[NR_CPUS];
+ u32 guest_user_asid[NR_CPUS];
+ u32 guest_kernel_asid[NR_CPUS];
struct mm_struct guest_kernel_mm, guest_user_mm;
int last_sched_cpu;
@@ -408,6 +321,7 @@ struct kvm_vcpu_arch {
u8 fpu_enabled;
u8 msa_enabled;
+ u8 kscratch_enabled;
};
@@ -461,6 +375,18 @@ struct kvm_vcpu_arch {
#define kvm_write_c0_guest_config7(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][7] = (val))
#define kvm_read_c0_guest_errorepc(cop0) (cop0->reg[MIPS_CP0_ERROR_PC][0])
#define kvm_write_c0_guest_errorepc(cop0, val) (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val))
+#define kvm_read_c0_guest_kscratch1(cop0) (cop0->reg[MIPS_CP0_DESAVE][2])
+#define kvm_read_c0_guest_kscratch2(cop0) (cop0->reg[MIPS_CP0_DESAVE][3])
+#define kvm_read_c0_guest_kscratch3(cop0) (cop0->reg[MIPS_CP0_DESAVE][4])
+#define kvm_read_c0_guest_kscratch4(cop0) (cop0->reg[MIPS_CP0_DESAVE][5])
+#define kvm_read_c0_guest_kscratch5(cop0) (cop0->reg[MIPS_CP0_DESAVE][6])
+#define kvm_read_c0_guest_kscratch6(cop0) (cop0->reg[MIPS_CP0_DESAVE][7])
+#define kvm_write_c0_guest_kscratch1(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][2] = (val))
+#define kvm_write_c0_guest_kscratch2(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][3] = (val))
+#define kvm_write_c0_guest_kscratch3(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][4] = (val))
+#define kvm_write_c0_guest_kscratch4(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][5] = (val))
+#define kvm_write_c0_guest_kscratch5(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][6] = (val))
+#define kvm_write_c0_guest_kscratch6(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][7] = (val))
/*
* Some of the guest registers may be modified asynchronously (e.g. from a
@@ -474,7 +400,7 @@ static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg,
unsigned long temp;
do {
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set "MIPS_ISA_ARCH_LEVEL" \n"
" " __LL "%0, %1 \n"
" or %0, %2 \n"
" " __SC "%0, %1 \n"
@@ -490,7 +416,7 @@ static inline void _kvm_atomic_clear_c0_guest_reg(unsigned long *reg,
unsigned long temp;
do {
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set "MIPS_ISA_ARCH_LEVEL" \n"
" " __LL "%0, %1 \n"
" and %0, %2 \n"
" " __SC "%0, %1 \n"
@@ -507,7 +433,7 @@ static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg,
unsigned long temp;
do {
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set "MIPS_ISA_ARCH_LEVEL" \n"
" " __LL "%0, %1 \n"
" and %0, %2 \n"
" or %0, %3 \n"
@@ -542,7 +468,7 @@ static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg,
static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu)
{
- return (!__builtin_constant_p(cpu_has_fpu) || cpu_has_fpu) &&
+ return (!__builtin_constant_p(raw_cpu_has_fpu) || raw_cpu_has_fpu) &&
vcpu->fpu_enabled;
}
@@ -589,9 +515,11 @@ struct kvm_mips_callbacks {
void (*dequeue_io_int)(struct kvm_vcpu *vcpu,
struct kvm_mips_interrupt *irq);
int (*irq_deliver)(struct kvm_vcpu *vcpu, unsigned int priority,
- uint32_t cause);
+ u32 cause);
int (*irq_clear)(struct kvm_vcpu *vcpu, unsigned int priority,
- uint32_t cause);
+ u32 cause);
+ unsigned long (*num_regs)(struct kvm_vcpu *vcpu);
+ int (*copy_reg_indices)(struct kvm_vcpu *vcpu, u64 __user *indices);
int (*get_one_reg)(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg, s64 *v);
int (*set_one_reg)(struct kvm_vcpu *vcpu,
@@ -605,8 +533,13 @@ int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
/* Debug: dump vcpu state */
int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
-/* Trampoline ASM routine to start running in "Guest" context */
-extern int __kvm_mips_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
+extern int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu);
+
+/* Building of entry/exception code */
+int kvm_mips_entry_setup(void);
+void *kvm_mips_build_vcpu_run(void *addr);
+void *kvm_mips_build_exception(void *addr, void *handler);
+void *kvm_mips_build_exit(void *addr);
/* FPU/MSA context management */
void __kvm_save_fpu(struct kvm_vcpu_arch *vcpu);
@@ -622,11 +555,11 @@ void kvm_drop_fpu(struct kvm_vcpu *vcpu);
void kvm_lose_fpu(struct kvm_vcpu *vcpu);
/* TLB handling */
-uint32_t kvm_get_kernel_asid(struct kvm_vcpu *vcpu);
+u32 kvm_get_kernel_asid(struct kvm_vcpu *vcpu);
-uint32_t kvm_get_user_asid(struct kvm_vcpu *vcpu);
+u32 kvm_get_user_asid(struct kvm_vcpu *vcpu);
-uint32_t kvm_get_commpage_asid (struct kvm_vcpu *vcpu);
+u32 kvm_get_commpage_asid (struct kvm_vcpu *vcpu);
extern int kvm_mips_handle_kseg0_tlb_fault(unsigned long badbaddr,
struct kvm_vcpu *vcpu);
@@ -635,22 +568,24 @@ extern int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
struct kvm_vcpu *vcpu);
extern int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
- struct kvm_mips_tlb *tlb,
- unsigned long *hpa0,
- unsigned long *hpa1);
+ struct kvm_mips_tlb *tlb);
-extern enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-extern enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_handle_tlbmod(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
extern void kvm_mips_dump_host_tlbs(void);
extern void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu);
+extern int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
+ unsigned long entrylo0,
+ unsigned long entrylo1,
+ int flush_dcache_mask);
extern void kvm_mips_flush_host_tlb(int skip_kseg0);
extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi);
@@ -667,90 +602,90 @@ extern void kvm_mips_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
extern void kvm_mips_vcpu_put(struct kvm_vcpu *vcpu);
/* Emulation */
-uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu);
-enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause);
+u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu);
+enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause);
-extern enum emulation_result kvm_mips_emulate_inst(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_emulate_inst(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-extern enum emulation_result kvm_mips_emulate_syscall(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_emulate_syscall(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-extern enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-extern enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-extern enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-extern enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_emulate_tlbmod(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-extern enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-extern enum emulation_result kvm_mips_handle_ri(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_handle_ri(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-extern enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_emulate_ri_exc(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-extern enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_emulate_bp_exc(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-extern enum emulation_result kvm_mips_emulate_trap_exc(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_emulate_trap_exc(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-extern enum emulation_result kvm_mips_emulate_msafpe_exc(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-extern enum emulation_result kvm_mips_emulate_fpe_exc(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-extern enum emulation_result kvm_mips_emulate_msadis_exc(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
struct kvm_run *run);
-uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu);
-void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count);
-void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare, bool ack);
+u32 kvm_mips_read_count(struct kvm_vcpu *vcpu);
+void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count);
+void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack);
void kvm_mips_init_count(struct kvm_vcpu *vcpu);
int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl);
int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume);
@@ -759,27 +694,27 @@ void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu);
void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu);
enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu);
-enum emulation_result kvm_mips_check_privilege(unsigned long cause,
- uint32_t *opc,
+enum emulation_result kvm_mips_check_privilege(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-enum emulation_result kvm_mips_emulate_cache(uint32_t inst,
- uint32_t *opc,
- uint32_t cause,
+enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
+ u32 *opc,
+ u32 cause,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-enum emulation_result kvm_mips_emulate_CP0(uint32_t inst,
- uint32_t *opc,
- uint32_t cause,
+enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
+ u32 *opc,
+ u32 cause,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-enum emulation_result kvm_mips_emulate_store(uint32_t inst,
- uint32_t cause,
+enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
+ u32 cause,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-enum emulation_result kvm_mips_emulate_load(uint32_t inst,
- uint32_t cause,
+enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
+ u32 cause,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
@@ -789,13 +724,13 @@ unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu);
unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu);
/* Dynamic binary translation */
-extern int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
- struct kvm_vcpu *vcpu);
-extern int kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
+extern int kvm_mips_trans_cache_index(union mips_instruction inst,
+ u32 *opc, struct kvm_vcpu *vcpu);
+extern int kvm_mips_trans_cache_va(union mips_instruction inst, u32 *opc,
struct kvm_vcpu *vcpu);
-extern int kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc,
+extern int kvm_mips_trans_mfc0(union mips_instruction inst, u32 *opc,
struct kvm_vcpu *vcpu);
-extern int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc,
+extern int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc,
struct kvm_vcpu *vcpu);
/* Misc */
diff --git a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
index d68e685cd..bd8b9bbe1 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
@@ -55,7 +55,7 @@
#define cpu_has_mipsmt 0
#define cpu_has_vint 0
#define cpu_has_veic 0
-#define cpu_hwrena_impl_bits 0xc0000000
+#define cpu_hwrena_impl_bits (MIPS_HWRENA_IMPL1 | MIPS_HWRENA_IMPL2)
#define cpu_has_wsbh 1
#define cpu_has_rixi (cpu_data[0].cputype != CPU_CAVIUM_OCTEON)
diff --git a/arch/mips/include/asm/mach-cavium-octeon/irq.h b/arch/mips/include/asm/mach-cavium-octeon/irq.h
index cceae32a0..64b86b9d3 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/irq.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/irq.h
@@ -42,8 +42,6 @@ enum octeon_irq {
OCTEON_IRQ_TIMER1,
OCTEON_IRQ_TIMER2,
OCTEON_IRQ_TIMER3,
- OCTEON_IRQ_USB0,
- OCTEON_IRQ_USB1,
#ifndef CONFIG_PCI_MSI
OCTEON_IRQ_LAST = 127
#endif
diff --git a/arch/mips/include/asm/mach-cavium-octeon/mangle-port.h b/arch/mips/include/asm/mach-cavium-octeon/mangle-port.h
index 374eefafb..8ff2cbdf2 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/mangle-port.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/mangle-port.h
@@ -12,6 +12,14 @@
#ifdef __BIG_ENDIAN
+static inline bool __should_swizzle_bits(volatile void *a)
+{
+ extern const bool octeon_should_swizzle_table[];
+ u64 did = ((u64)(uintptr_t)a >> 40) & 0xff;
+
+ return octeon_should_swizzle_table[did];
+}
+
# define __swizzle_addr_b(port) (port)
# define __swizzle_addr_w(port) (port)
# define __swizzle_addr_l(port) (port)
@@ -19,7 +27,9 @@
#else /* __LITTLE_ENDIAN */
-static inline bool __should_swizzle_addr(unsigned long p)
+#define __should_swizzle_bits(a) false
+
+static inline bool __should_swizzle_addr(u64 p)
{
/* boot bus? */
return ((p >> 40) & 0xff) == 0;
@@ -35,40 +45,14 @@ static inline bool __should_swizzle_addr(unsigned long p)
#endif /* __BIG_ENDIAN */
-/*
- * Sane hardware offers swapping of PCI/ISA I/O space accesses in hardware;
- * less sane hardware forces software to fiddle with this...
- *
- * Regardless, if the host bus endianness mismatches that of PCI/ISA, then
- * you can't have the numerical value of data and byte addresses within
- * multibyte quantities both preserved at the same time. Hence two
- * variations of functions: non-prefixed ones that preserve the value
- * and prefixed ones that preserve byte addresses. The latters are
- * typically used for moving raw data between a peripheral and memory (cf.
- * string I/O functions), hence the "__mem_" prefix.
- */
-#if defined(CONFIG_SWAP_IO_SPACE)
# define ioswabb(a, x) (x)
# define __mem_ioswabb(a, x) (x)
-# define ioswabw(a, x) le16_to_cpu(x)
+# define ioswabw(a, x) (__should_swizzle_bits(a) ? le16_to_cpu(x) : x)
# define __mem_ioswabw(a, x) (x)
-# define ioswabl(a, x) le32_to_cpu(x)
+# define ioswabl(a, x) (__should_swizzle_bits(a) ? le32_to_cpu(x) : x)
# define __mem_ioswabl(a, x) (x)
-# define ioswabq(a, x) le64_to_cpu(x)
+# define ioswabq(a, x) (__should_swizzle_bits(a) ? le64_to_cpu(x) : x)
# define __mem_ioswabq(a, x) (x)
-#else
-
-# define ioswabb(a, x) (x)
-# define __mem_ioswabb(a, x) (x)
-# define ioswabw(a, x) (x)
-# define __mem_ioswabw(a, x) cpu_to_le16(x)
-# define ioswabl(a, x) (x)
-# define __mem_ioswabl(a, x) cpu_to_le32(x)
-# define ioswabq(a, x) (x)
-# define __mem_ioswabq(a, x) cpu_to_le32(x)
-
-#endif
-
#endif /* __ASM_MACH_GENERIC_MANGLE_PORT_H */
diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h
index 9411a4c0b..4fafeefe6 100644
--- a/arch/mips/include/asm/mips-cm.h
+++ b/arch/mips/include/asm/mips-cm.h
@@ -458,11 +458,22 @@ static inline int mips_cm_revision(void)
static inline unsigned int mips_cm_max_vp_width(void)
{
extern int smp_num_siblings;
+ uint32_t cfg;
if (mips_cm_revision() >= CM_REV_CM3)
return read_gcr_sys_config2() & CM_GCR_SYS_CONFIG2_MAXVPW_MSK;
- if (config_enabled(CONFIG_SMP))
+ if (mips_cm_present()) {
+ /*
+ * We presume that all cores in the system will have the same
+ * number of VP(E)s, and if that ever changes then this will
+ * need revisiting.
+ */
+ cfg = read_gcr_cl_config() & CM_GCR_Cx_CONFIG_PVPE_MSK;
+ return (cfg >> CM_GCR_Cx_CONFIG_PVPE_SHF) + 1;
+ }
+
+ if (IS_ENABLED(CONFIG_SMP))
return smp_num_siblings;
return 1;
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index e1ca65c62..7dd2dd479 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -53,7 +53,7 @@
#define CP0_SEGCTL2 $5, 4
#define CP0_WIRED $6
#define CP0_INFO $7
-#define CP0_HWRENA $7, 0
+#define CP0_HWRENA $7
#define CP0_BADVADDR $8
#define CP0_BADINSTR $8, 1
#define CP0_COUNT $9
@@ -533,6 +533,7 @@
#define TX49_CONF_CWFON (_ULCAST_(1) << 27)
/* Bits specific to the MIPS32/64 PRA. */
+#define MIPS_CONF_VI (_ULCAST_(1) << 3)
#define MIPS_CONF_MT (_ULCAST_(7) << 7)
#define MIPS_CONF_MT_TLB (_ULCAST_(1) << 7)
#define MIPS_CONF_MT_FTLB (_ULCAST_(4) << 7)
@@ -659,8 +660,6 @@
#define MIPS_CONF7_IAR (_ULCAST_(1) << 10)
#define MIPS_CONF7_AR (_ULCAST_(1) << 16)
-/* FTLB probability bits for R6 */
-#define MIPS_CONF7_FTLBP_SHIFT (18)
/* WatchLo* register definitions */
#define MIPS_WATCHLO_IRW (_ULCAST_(0x7) << 0)
@@ -853,6 +852,24 @@
#define MIPS_CDMMBASE_ADDR_SHIFT 11
#define MIPS_CDMMBASE_ADDR_START 15
+/* RDHWR register numbers */
+#define MIPS_HWR_CPUNUM 0 /* CPU number */
+#define MIPS_HWR_SYNCISTEP 1 /* SYNCI step size */
+#define MIPS_HWR_CC 2 /* Cycle counter */
+#define MIPS_HWR_CCRES 3 /* Cycle counter resolution */
+#define MIPS_HWR_ULR 29 /* UserLocal */
+#define MIPS_HWR_IMPL1 30 /* Implementation dependent */
+#define MIPS_HWR_IMPL2 31 /* Implementation dependent */
+
+/* Bits in HWREna register */
+#define MIPS_HWRENA_CPUNUM (_ULCAST_(1) << MIPS_HWR_CPUNUM)
+#define MIPS_HWRENA_SYNCISTEP (_ULCAST_(1) << MIPS_HWR_SYNCISTEP)
+#define MIPS_HWRENA_CC (_ULCAST_(1) << MIPS_HWR_CC)
+#define MIPS_HWRENA_CCRES (_ULCAST_(1) << MIPS_HWR_CCRES)
+#define MIPS_HWRENA_ULR (_ULCAST_(1) << MIPS_HWR_ULR)
+#define MIPS_HWRENA_IMPL1 (_ULCAST_(1) << MIPS_HWR_IMPL1)
+#define MIPS_HWRENA_IMPL2 (_ULCAST_(1) << MIPS_HWR_IMPL2)
+
/*
* Bitfields in the TX39 family CP0 Configuration Register 3
*/
diff --git a/arch/mips/include/asm/mmu.h b/arch/mips/include/asm/mmu.h
index 1afa1f986..f6ba08d77 100644
--- a/arch/mips/include/asm/mmu.h
+++ b/arch/mips/include/asm/mmu.h
@@ -2,11 +2,20 @@
#define __ASM_MMU_H
#include <linux/atomic.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
typedef struct {
unsigned long asid[NR_CPUS];
void *vdso;
atomic_t fp_mode_switching;
+
+ /* lock to be held whilst modifying fp_bd_emupage_allocmap */
+ spinlock_t bd_emupage_lock;
+ /* bitmap tracking allocation of fp_bd_emupage */
+ unsigned long *bd_emupage_allocmap;
+ /* wait queue for threads requiring an emuframe */
+ wait_queue_head_t bd_emupage_queue;
} mm_context_t;
#endif /* __ASM_MMU_H */
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index fc57e135c..ddd57ade1 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -16,6 +16,7 @@
#include <linux/smp.h>
#include <linux/slab.h>
#include <asm/cacheflush.h>
+#include <asm/dsemul.h>
#include <asm/hazards.h>
#include <asm/tlbflush.h>
#include <asm-generic/mm_hooks.h>
@@ -128,6 +129,10 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
atomic_set(&mm->context.fp_mode_switching, 0);
+ mm->context.bd_emupage_allocmap = NULL;
+ spin_lock_init(&mm->context.bd_emupage_lock);
+ init_waitqueue_head(&mm->context.bd_emupage_queue);
+
return 0;
}
@@ -162,6 +167,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
*/
static inline void destroy_context(struct mm_struct *mm)
{
+ dsemul_mm_cleanup(mm);
}
#define deactivate_mm(tsk, mm) do { } while (0)
diff --git a/arch/mips/include/asm/msa.h b/arch/mips/include/asm/msa.h
index ddf496cb2..8967b475a 100644
--- a/arch/mips/include/asm/msa.h
+++ b/arch/mips/include/asm/msa.h
@@ -168,6 +168,7 @@ static inline unsigned int read_msa_##name(void) \
unsigned int reg; \
__asm__ __volatile__( \
" .set push\n" \
+ " .set fp=64\n" \
" .set msa\n" \
" cfcmsa %0, $" #cs "\n" \
" .set pop\n" \
@@ -179,6 +180,7 @@ static inline void write_msa_##name(unsigned int val) \
{ \
__asm__ __volatile__( \
" .set push\n" \
+ " .set fp=64\n" \
" .set msa\n" \
" ctcmsa $" #cs ", %0\n" \
" .set pop\n" \
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
index 21ed7150f..5f9875980 100644
--- a/arch/mips/include/asm/page.h
+++ b/arch/mips/include/asm/page.h
@@ -162,16 +162,34 @@ typedef struct { unsigned long pgprot; } pgprot_t;
/*
* __pa()/__va() should be used only during mem init.
*/
-#ifdef CONFIG_64BIT
-#define __pa(x) \
-({ \
- unsigned long __x = (unsigned long)(x); \
- __x < CKSEG0 ? XPHYSADDR(__x) : CPHYSADDR(__x); \
-})
-#else
-#define __pa(x) \
- ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET)
-#endif
+static inline unsigned long ___pa(unsigned long x)
+{
+ if (IS_ENABLED(CONFIG_64BIT)) {
+ /*
+ * For MIPS64 the virtual address may either be in one of
+ * the compatibility segements ckseg0 or ckseg1, or it may
+ * be in xkphys.
+ */
+ return x < CKSEG0 ? XPHYSADDR(x) : CPHYSADDR(x);
+ }
+
+ if (!IS_ENABLED(CONFIG_EVA)) {
+ /*
+ * We're using the standard MIPS32 legacy memory map, ie.
+ * the address x is going to be in kseg0 or kseg1. We can
+ * handle either case by masking out the desired bits using
+ * CPHYSADDR.
+ */
+ return CPHYSADDR(x);
+ }
+
+ /*
+ * EVA is in use so the memory map could be anything, making it not
+ * safe to just mask out bits.
+ */
+ return x - PAGE_OFFSET + PHYS_OFFSET;
+}
+#define __pa(x) ___pa((unsigned long)(x))
#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET))
#include <asm/io.h>
@@ -229,8 +247,10 @@ extern int __virt_addr_valid(const volatile void *kaddr);
#define virt_addr_valid(kaddr) \
__virt_addr_valid((const volatile void *) (kaddr))
-#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+#define VM_DATA_DEFAULT_FLAGS \
+ (VM_READ | VM_WRITE | \
+ ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + UNCAC_BASE)
#define CAC_ADDR(addr) ((addr) - UNCAC_BASE + PAGE_OFFSET)
diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h
index 86b239d9d..9b63cd412 100644
--- a/arch/mips/include/asm/pci.h
+++ b/arch/mips/include/asm/pci.h
@@ -80,16 +80,6 @@ extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
#define HAVE_ARCH_PCI_RESOURCE_TO_USER
-static inline void pci_resource_to_user(const struct pci_dev *dev, int bar,
- const struct resource *rsrc, resource_size_t *start,
- resource_size_t *end)
-{
- phys_addr_t size = resource_size(rsrc);
-
- *start = fixup_bigphys_addr(rsrc->start, size);
- *end = rsrc->start + size;
-}
-
/*
* Dynamic DMA mapping stuff.
* MIPS has everything mapped statically.
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 7d44e8881..70128d3f7 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -159,7 +159,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
* it better already be global)
*/
if (pte_none(*buddy)) {
- if (!config_enabled(CONFIG_XPA))
+ if (!IS_ENABLED(CONFIG_XPA))
buddy->pte_low |= _PAGE_GLOBAL;
buddy->pte_high |= _PAGE_GLOBAL;
}
@@ -172,7 +172,7 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt
htw_stop();
/* Preserve global status for the pair */
- if (config_enabled(CONFIG_XPA)) {
+ if (IS_ENABLED(CONFIG_XPA)) {
if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL)
null.pte_high = _PAGE_GLOBAL;
} else {
@@ -319,7 +319,7 @@ static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; }
static inline pte_t pte_wrprotect(pte_t pte)
{
pte.pte_low &= ~_PAGE_WRITE;
- if (!config_enabled(CONFIG_XPA))
+ if (!IS_ENABLED(CONFIG_XPA))
pte.pte_low &= ~_PAGE_SILENT_WRITE;
pte.pte_high &= ~_PAGE_SILENT_WRITE;
return pte;
@@ -328,7 +328,7 @@ static inline pte_t pte_wrprotect(pte_t pte)
static inline pte_t pte_mkclean(pte_t pte)
{
pte.pte_low &= ~_PAGE_MODIFIED;
- if (!config_enabled(CONFIG_XPA))
+ if (!IS_ENABLED(CONFIG_XPA))
pte.pte_low &= ~_PAGE_SILENT_WRITE;
pte.pte_high &= ~_PAGE_SILENT_WRITE;
return pte;
@@ -337,7 +337,7 @@ static inline pte_t pte_mkclean(pte_t pte)
static inline pte_t pte_mkold(pte_t pte)
{
pte.pte_low &= ~_PAGE_ACCESSED;
- if (!config_enabled(CONFIG_XPA))
+ if (!IS_ENABLED(CONFIG_XPA))
pte.pte_low &= ~_PAGE_SILENT_READ;
pte.pte_high &= ~_PAGE_SILENT_READ;
return pte;
@@ -347,7 +347,7 @@ static inline pte_t pte_mkwrite(pte_t pte)
{
pte.pte_low |= _PAGE_WRITE;
if (pte.pte_low & _PAGE_MODIFIED) {
- if (!config_enabled(CONFIG_XPA))
+ if (!IS_ENABLED(CONFIG_XPA))
pte.pte_low |= _PAGE_SILENT_WRITE;
pte.pte_high |= _PAGE_SILENT_WRITE;
}
@@ -358,7 +358,7 @@ static inline pte_t pte_mkdirty(pte_t pte)
{
pte.pte_low |= _PAGE_MODIFIED;
if (pte.pte_low & _PAGE_WRITE) {
- if (!config_enabled(CONFIG_XPA))
+ if (!IS_ENABLED(CONFIG_XPA))
pte.pte_low |= _PAGE_SILENT_WRITE;
pte.pte_high |= _PAGE_SILENT_WRITE;
}
@@ -369,7 +369,7 @@ static inline pte_t pte_mkyoung(pte_t pte)
{
pte.pte_low |= _PAGE_ACCESSED;
if (!(pte.pte_low & _PAGE_NO_READ)) {
- if (!config_enabled(CONFIG_XPA))
+ if (!IS_ENABLED(CONFIG_XPA))
pte.pte_low |= _PAGE_SILENT_READ;
pte.pte_high |= _PAGE_SILENT_READ;
}
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index 7e78b6208..0d36c87ac 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -11,12 +11,14 @@
#ifndef _ASM_PROCESSOR_H
#define _ASM_PROCESSOR_H
+#include <linux/atomic.h>
#include <linux/cpumask.h>
#include <linux/threads.h>
#include <asm/cachectl.h>
#include <asm/cpu.h>
#include <asm/cpu-info.h>
+#include <asm/dsemul.h>
#include <asm/mipsregs.h>
#include <asm/prefetch.h>
@@ -78,7 +80,11 @@ extern unsigned int vced_count, vcei_count;
#endif
-#define STACK_TOP (TASK_SIZE & PAGE_MASK)
+/*
+ * One page above the stack is used for branch delay slot "emulation".
+ * See dsemul.c for details.
+ */
+#define STACK_TOP ((TASK_SIZE & PAGE_MASK) - PAGE_SIZE)
/*
* This decides where the kernel will search for a free chunk of vm
@@ -256,6 +262,12 @@ struct thread_struct {
/* Saved fpu/fpu emulator stuff. */
struct mips_fpu_struct fpu FPU_ALIGN;
+ /* Assigned branch delay slot 'emulation' frame */
+ atomic_t bd_emu_frame;
+ /* PC of the branch from a branch delay slot 'emulation' */
+ unsigned long bd_emu_branch_pc;
+ /* PC to continue from following a branch delay slot 'emulation' */
+ unsigned long bd_emu_cont_pc;
#ifdef CONFIG_MIPS_MT_FPAFF
/* Emulated instruction count */
unsigned long emulated_fp;
@@ -323,6 +335,10 @@ struct thread_struct {
* FPU affinity state (null if not FPAFF) \
*/ \
FPAFF_INIT \
+ /* Delay slot emulation */ \
+ .bd_emu_frame = ATOMIC_INIT(BD_EMUFRAME_NONE), \
+ .bd_emu_branch_pc = 0, \
+ .bd_emu_cont_pc = 0, \
/* \
* Saved DSP stuff \
*/ \
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h
index 38902bf97..667ca3c46 100644
--- a/arch/mips/include/asm/r4kcache.h
+++ b/arch/mips/include/asm/r4kcache.h
@@ -210,7 +210,11 @@ static inline void protected_writeback_dcache_line(unsigned long addr)
static inline void protected_writeback_scache_line(unsigned long addr)
{
+#ifdef CONFIG_EVA
+ protected_cachee_op(Hit_Writeback_Inv_SD, addr);
+#else
protected_cache_op(Hit_Writeback_Inv_SD, addr);
+#endif
}
/*
diff --git a/arch/mips/include/asm/seccomp.h b/arch/mips/include/asm/seccomp.h
index 684fb3a12..d886d6f76 100644
--- a/arch/mips/include/asm/seccomp.h
+++ b/arch/mips/include/asm/seccomp.h
@@ -16,10 +16,10 @@ static inline const int *get_compat_mode1_syscalls(void)
0, /* null terminated */
};
- if (config_enabled(CONFIG_MIPS32_O32) && test_thread_flag(TIF_32BIT_REGS))
+ if (IS_ENABLED(CONFIG_MIPS32_O32) && test_thread_flag(TIF_32BIT_REGS))
return syscalls_O32;
- if (config_enabled(CONFIG_MIPS32_N32))
+ if (IS_ENABLED(CONFIG_MIPS32_N32))
return syscalls_N32;
BUG();
diff --git a/arch/mips/include/asm/setup.h b/arch/mips/include/asm/setup.h
index d7bfdeba9..4f5279a83 100644
--- a/arch/mips/include/asm/setup.h
+++ b/arch/mips/include/asm/setup.h
@@ -21,6 +21,7 @@ extern void *set_vi_handler(int n, vi_handler_t addr);
extern void *set_except_vector(int n, void *addr);
extern unsigned long ebase;
+extern unsigned int hwrena;
extern void per_cpu_trap_init(bool);
extern void cpu_cache_init(void);
diff --git a/arch/mips/include/asm/signal.h b/arch/mips/include/asm/signal.h
index 2292373ff..23d6b8015 100644
--- a/arch/mips/include/asm/signal.h
+++ b/arch/mips/include/asm/signal.h
@@ -11,7 +11,7 @@
#include <uapi/asm/signal.h>
-#ifdef CONFIG_MIPS32_COMPAT
+#ifdef CONFIG_MIPS32_O32
extern struct mips_abi mips_abi_32;
#define sig_uses_siginfo(ka, abi) \
@@ -19,8 +19,8 @@ extern struct mips_abi mips_abi_32;
((ka)->sa.sa_flags & SA_SIGINFO))
#else
#define sig_uses_siginfo(ka, abi) \
- (config_enabled(CONFIG_64BIT) ? 1 : \
- (config_enabled(CONFIG_TRAD_SIGNALS) ? \
+ (IS_ENABLED(CONFIG_64BIT) ? 1 : \
+ (IS_ENABLED(CONFIG_TRAD_SIGNALS) ? \
((ka)->sa.sa_flags & SA_SIGINFO) : 1) )
#endif
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index 03722d432..8bc6c70a4 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -23,7 +23,7 @@
extern int smp_num_siblings;
extern cpumask_t cpu_sibling_map[];
extern cpumask_t cpu_core_map[];
-extern cpumask_t cpu_foreign_map;
+extern cpumask_t cpu_foreign_map[];
#define raw_smp_processor_id() (current_thread_info()->cpu)
@@ -53,6 +53,8 @@ extern cpumask_t cpu_coherent_mask;
extern void asmlinkage smp_bootstrap(void);
+extern void calculate_cpu_foreign_map(void);
+
/*
* this function sends a 'reschedule' IPI to another CPU.
* it goes straight through and wastes no time serializing
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index 40196bebe..f485afe51 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -12,6 +12,7 @@
#include <linux/compiler.h>
#include <asm/barrier.h>
+#include <asm/processor.h>
#include <asm/compiler.h>
#include <asm/war.h>
@@ -48,8 +49,22 @@ static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
}
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-#define arch_spin_unlock_wait(x) \
- while (arch_spin_is_locked(x)) { cpu_relax(); }
+
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+ u16 owner = READ_ONCE(lock->h.serving_now);
+ smp_rmb();
+ for (;;) {
+ arch_spinlock_t tmp = READ_ONCE(*lock);
+
+ if (tmp.h.serving_now == tmp.h.ticket ||
+ tmp.h.serving_now != owner)
+ break;
+
+ cpu_relax();
+ }
+ smp_acquire__after_ctrl_dep();
+}
static inline int arch_spin_is_contended(arch_spinlock_t *lock)
{
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
index 47bc45a67..d87882513 100644
--- a/arch/mips/include/asm/syscall.h
+++ b/arch/mips/include/asm/syscall.h
@@ -99,7 +99,7 @@ static inline void syscall_get_arguments(struct task_struct *task,
{
int ret;
/* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */
- if ((config_enabled(CONFIG_32BIT) ||
+ if ((IS_ENABLED(CONFIG_32BIT) ||
test_tsk_thread_flag(task, TIF_32BIT_REGS)) &&
(regs->regs[2] == __NR_syscall))
i++;
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index b6e20f305..21a2aaba2 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -89,7 +89,7 @@ extern u64 __ua_limit;
*/
static inline bool eva_kernel_access(void)
{
- if (!config_enabled(CONFIG_EVA))
+ if (!IS_ENABLED(CONFIG_EVA))
return false;
return segment_eq(get_fs(), get_ds());
diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h
index b6ecfeee4..f7929f65f 100644
--- a/arch/mips/include/asm/uasm.h
+++ b/arch/mips/include/asm/uasm.h
@@ -104,8 +104,13 @@ Ip_u1s2(_bltz);
Ip_u1s2(_bltzl);
Ip_u1u2s3(_bne);
Ip_u2s3u1(_cache);
+Ip_u1u2(_cfc1);
+Ip_u2u1(_cfcmsa);
+Ip_u1u2(_ctc1);
+Ip_u2u1(_ctcmsa);
Ip_u2u1s3(_daddiu);
Ip_u3u1u2(_daddu);
+Ip_u1(_di);
Ip_u2u1msbu3(_dins);
Ip_u2u1msbu3(_dinsm);
Ip_u1u2(_divu);
@@ -141,6 +146,8 @@ Ip_u1(_mfhi);
Ip_u1(_mflo);
Ip_u1u2u3(_mtc0);
Ip_u1u2u3(_mthc0);
+Ip_u1(_mthi);
+Ip_u1(_mtlo);
Ip_u3u1u2(_mul);
Ip_u3u1u2(_or);
Ip_u2u1u3(_ori);
diff --git a/arch/mips/include/asm/uprobes.h b/arch/mips/include/asm/uprobes.h
index 34c325c67..70a4a2f17 100644
--- a/arch/mips/include/asm/uprobes.h
+++ b/arch/mips/include/asm/uprobes.h
@@ -36,7 +36,6 @@ struct arch_uprobe {
unsigned long resume_epc;
u32 insn[2];
u32 ixol[2];
- union mips_instruction orig_inst[MAX_UINSN_BYTES / 4];
};
struct arch_uprobe_task {
diff --git a/arch/mips/include/uapi/asm/auxvec.h b/arch/mips/include/uapi/asm/auxvec.h
index c9c719527..45ba259a3 100644
--- a/arch/mips/include/uapi/asm/auxvec.h
+++ b/arch/mips/include/uapi/asm/auxvec.h
@@ -14,4 +14,6 @@
/* Location of VDSO image. */
#define AT_SYSINFO_EHDR 33
+#define AT_VECTOR_SIZE_ARCH 1 /* entries in ARCH_DLINFO */
+
#endif /* __ASM_AUXVEC_H */
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
index 8051f9aa1..77429d162 100644
--- a/arch/mips/include/uapi/asm/inst.h
+++ b/arch/mips/include/uapi/asm/inst.h
@@ -21,20 +21,20 @@
enum major_op {
spec_op, bcond_op, j_op, jal_op,
beq_op, bne_op, blez_op, bgtz_op,
- addi_op, cbcond0_op = addi_op, addiu_op, slti_op, sltiu_op,
+ addi_op, pop10_op = addi_op, addiu_op, slti_op, sltiu_op,
andi_op, ori_op, xori_op, lui_op,
cop0_op, cop1_op, cop2_op, cop1x_op,
beql_op, bnel_op, blezl_op, bgtzl_op,
- daddi_op, cbcond1_op = daddi_op, daddiu_op, ldl_op, ldr_op,
+ daddi_op, pop30_op = daddi_op, daddiu_op, ldl_op, ldr_op,
spec2_op, jalx_op, mdmx_op, msa_op = mdmx_op, spec3_op,
lb_op, lh_op, lwl_op, lw_op,
lbu_op, lhu_op, lwr_op, lwu_op,
sb_op, sh_op, swl_op, sw_op,
sdl_op, sdr_op, swr_op, cache_op,
ll_op, lwc1_op, lwc2_op, bc6_op = lwc2_op, pref_op,
- lld_op, ldc1_op, ldc2_op, beqzcjic_op = ldc2_op, ld_op,
+ lld_op, ldc1_op, ldc2_op, pop66_op = ldc2_op, ld_op,
sc_op, swc1_op, swc2_op, balc6_op = swc2_op, major_3b_op,
- scd_op, sdc1_op, sdc2_op, bnezcjialc_op = sdc2_op, sd_op
+ scd_op, sdc1_op, sdc2_op, pop76_op = sdc2_op, sd_op
};
/*
@@ -93,6 +93,50 @@ enum spec3_op {
};
/*
+ * Bits 10-6 minor opcode for r6 spec mult/div encodings
+ */
+enum mult_op {
+ mult_mult_op = 0x0,
+ mult_mul_op = 0x2,
+ mult_muh_op = 0x3,
+};
+enum multu_op {
+ multu_multu_op = 0x0,
+ multu_mulu_op = 0x2,
+ multu_muhu_op = 0x3,
+};
+enum div_op {
+ div_div_op = 0x0,
+ div_div6_op = 0x2,
+ div_mod_op = 0x3,
+};
+enum divu_op {
+ divu_divu_op = 0x0,
+ divu_divu6_op = 0x2,
+ divu_modu_op = 0x3,
+};
+enum dmult_op {
+ dmult_dmult_op = 0x0,
+ dmult_dmul_op = 0x2,
+ dmult_dmuh_op = 0x3,
+};
+enum dmultu_op {
+ dmultu_dmultu_op = 0x0,
+ dmultu_dmulu_op = 0x2,
+ dmultu_dmuhu_op = 0x3,
+};
+enum ddiv_op {
+ ddiv_ddiv_op = 0x0,
+ ddiv_ddiv6_op = 0x2,
+ ddiv_dmod_op = 0x3,
+};
+enum ddivu_op {
+ ddivu_ddivu_op = 0x0,
+ ddivu_ddivu6_op = 0x2,
+ ddivu_dmodu_op = 0x3,
+};
+
+/*
* rt field of bcond opcodes.
*/
enum rt_op {
@@ -103,7 +147,7 @@ enum rt_op {
bltzal_op, bgezal_op, bltzall_op, bgezall_op,
rt_op_0x14, rt_op_0x15, rt_op_0x16, rt_op_0x17,
rt_op_0x18, rt_op_0x19, rt_op_0x1a, rt_op_0x1b,
- bposge32_op, rt_op_0x1d, rt_op_0x1e, rt_op_0x1f
+ bposge32_op, rt_op_0x1d, rt_op_0x1e, synci_op
};
/*
@@ -238,6 +282,21 @@ enum bshfl_func {
};
/*
+ * MSA minor opcodes.
+ */
+enum msa_func {
+ msa_elm_op = 0x19,
+};
+
+/*
+ * MSA ELM opcodes.
+ */
+enum msa_elm {
+ msa_ctc_op = 0x3e,
+ msa_cfc_op = 0x7e,
+};
+
+/*
* func field for MSA MI10 format.
*/
enum msa_mi10_func {
@@ -264,7 +323,7 @@ enum mm_major_op {
mm_pool32b_op, mm_pool16b_op, mm_lhu16_op, mm_andi16_op,
mm_addiu32_op, mm_lhu32_op, mm_sh32_op, mm_lh32_op,
mm_pool32i_op, mm_pool16c_op, mm_lwsp16_op, mm_pool16d_op,
- mm_ori32_op, mm_pool32f_op, mm_reserved1_op, mm_reserved2_op,
+ mm_ori32_op, mm_pool32f_op, mm_pool32s_op, mm_reserved2_op,
mm_pool32c_op, mm_lwgp16_op, mm_lw16_op, mm_pool16e_op,
mm_xori32_op, mm_jals32_op, mm_addiupc_op, mm_reserved3_op,
mm_reserved4_op, mm_pool16f_op, mm_sb16_op, mm_beqz16_op,
@@ -360,7 +419,10 @@ enum mm_32axf_minor_op {
mm_mflo32_op = 0x075,
mm_jalrhb_op = 0x07c,
mm_tlbwi_op = 0x08d,
+ mm_mthi32_op = 0x0b5,
mm_tlbwr_op = 0x0cd,
+ mm_mtlo32_op = 0x0f5,
+ mm_di_op = 0x11d,
mm_jalrs_op = 0x13c,
mm_jalrshb_op = 0x17c,
mm_sync_op = 0x1ad,
@@ -479,6 +541,13 @@ enum mm_32f_73_minor_op {
};
/*
+ * (microMIPS) POOL32S minor opcodes.
+ */
+enum mm_32s_minor_op {
+ mm_32s_elm_op = 0x16,
+};
+
+/*
* (microMIPS) POOL16C minor opcodes.
*/
enum mm_16c_minor_op {
@@ -586,6 +655,36 @@ struct r_format { /* Register format */
;))))))
};
+struct c0r_format { /* C0 register format */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int rs : 5,
+ __BITFIELD_FIELD(unsigned int rt : 5,
+ __BITFIELD_FIELD(unsigned int rd : 5,
+ __BITFIELD_FIELD(unsigned int z: 8,
+ __BITFIELD_FIELD(unsigned int sel : 3,
+ ;))))))
+};
+
+struct mfmc0_format { /* MFMC0 register format */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int rs : 5,
+ __BITFIELD_FIELD(unsigned int rt : 5,
+ __BITFIELD_FIELD(unsigned int rd : 5,
+ __BITFIELD_FIELD(unsigned int re : 5,
+ __BITFIELD_FIELD(unsigned int sc : 1,
+ __BITFIELD_FIELD(unsigned int : 2,
+ __BITFIELD_FIELD(unsigned int sel : 3,
+ ;))))))))
+};
+
+struct co_format { /* C0 CO format */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int co : 1,
+ __BITFIELD_FIELD(unsigned int code : 19,
+ __BITFIELD_FIELD(unsigned int func : 6,
+ ;))))
+};
+
struct p_format { /* Performance counter format (R10000) */
__BITFIELD_FIELD(unsigned int opcode : 6,
__BITFIELD_FIELD(unsigned int rs : 5,
@@ -937,6 +1036,9 @@ union mips_instruction {
struct u_format u_format;
struct c_format c_format;
struct r_format r_format;
+ struct c0r_format c0r_format;
+ struct mfmc0_format mfmc0_format;
+ struct co_format co_format;
struct p_format p_format;
struct f_format f_format;
struct ma_format ma_format;
diff --git a/arch/mips/jz4740/setup.c b/arch/mips/jz4740/setup.c
index 510fc0d96..6d0152321 100644
--- a/arch/mips/jz4740/setup.c
+++ b/arch/mips/jz4740/setup.c
@@ -20,7 +20,6 @@
#include <linux/kernel.h>
#include <linux/libfdt.h>
#include <linux/of_fdt.h>
-#include <linux/of_platform.h>
#include <asm/bootinfo.h>
#include <asm/prom.h>
@@ -74,16 +73,9 @@ void __init device_tree_init(void)
unflatten_and_copy_device_tree();
}
-static int __init populate_machine(void)
-{
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
- return 0;
-}
-arch_initcall(populate_machine);
-
const char *get_system_type(void)
{
- if (config_enabled(CONFIG_MACH_JZ4780))
+ if (IS_ENABLED(CONFIG_MACH_JZ4780))
return "JZ4780";
return "JZ4740";
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index e6053d070..4a603a3ea 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -71,7 +71,7 @@ obj-$(CONFIG_32BIT) += scall32-o32.o
obj-$(CONFIG_64BIT) += scall64-64.o
obj-$(CONFIG_MIPS32_COMPAT) += linux32.o ptrace32.o signal32.o
obj-$(CONFIG_MIPS32_N32) += binfmt_elfn32.o scall64-n32.o signal_n32.o
-obj-$(CONFIG_MIPS32_O32) += binfmt_elfo32.o scall64-o32.o
+obj-$(CONFIG_MIPS32_O32) += binfmt_elfo32.o scall64-o32.o signal_o32.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_PROC_FS) += proc.o
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 1ea973b2a..fae2f9447 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -339,71 +339,9 @@ void output_pm_defines(void)
}
#endif
-void output_cpuinfo_defines(void)
-{
- COMMENT(" MIPS cpuinfo offsets. ");
- DEFINE(CPUINFO_SIZE, sizeof(struct cpuinfo_mips));
-#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
- OFFSET(CPUINFO_ASID_MASK, cpuinfo_mips, asid_mask);
-#endif
-}
-
void output_kvm_defines(void)
{
COMMENT(" KVM/MIPS Specfic offsets. ");
- DEFINE(VCPU_ARCH_SIZE, sizeof(struct kvm_vcpu_arch));
- OFFSET(VCPU_RUN, kvm_vcpu, run);
- OFFSET(VCPU_HOST_ARCH, kvm_vcpu, arch);
-
- OFFSET(VCPU_HOST_EBASE, kvm_vcpu_arch, host_ebase);
- OFFSET(VCPU_GUEST_EBASE, kvm_vcpu_arch, guest_ebase);
-
- OFFSET(VCPU_HOST_STACK, kvm_vcpu_arch, host_stack);
- OFFSET(VCPU_HOST_GP, kvm_vcpu_arch, host_gp);
-
- OFFSET(VCPU_HOST_CP0_BADVADDR, kvm_vcpu_arch, host_cp0_badvaddr);
- OFFSET(VCPU_HOST_CP0_CAUSE, kvm_vcpu_arch, host_cp0_cause);
- OFFSET(VCPU_HOST_EPC, kvm_vcpu_arch, host_cp0_epc);
- OFFSET(VCPU_HOST_ENTRYHI, kvm_vcpu_arch, host_cp0_entryhi);
-
- OFFSET(VCPU_GUEST_INST, kvm_vcpu_arch, guest_inst);
-
- OFFSET(VCPU_R0, kvm_vcpu_arch, gprs[0]);
- OFFSET(VCPU_R1, kvm_vcpu_arch, gprs[1]);
- OFFSET(VCPU_R2, kvm_vcpu_arch, gprs[2]);
- OFFSET(VCPU_R3, kvm_vcpu_arch, gprs[3]);
- OFFSET(VCPU_R4, kvm_vcpu_arch, gprs[4]);
- OFFSET(VCPU_R5, kvm_vcpu_arch, gprs[5]);
- OFFSET(VCPU_R6, kvm_vcpu_arch, gprs[6]);
- OFFSET(VCPU_R7, kvm_vcpu_arch, gprs[7]);
- OFFSET(VCPU_R8, kvm_vcpu_arch, gprs[8]);
- OFFSET(VCPU_R9, kvm_vcpu_arch, gprs[9]);
- OFFSET(VCPU_R10, kvm_vcpu_arch, gprs[10]);
- OFFSET(VCPU_R11, kvm_vcpu_arch, gprs[11]);
- OFFSET(VCPU_R12, kvm_vcpu_arch, gprs[12]);
- OFFSET(VCPU_R13, kvm_vcpu_arch, gprs[13]);
- OFFSET(VCPU_R14, kvm_vcpu_arch, gprs[14]);
- OFFSET(VCPU_R15, kvm_vcpu_arch, gprs[15]);
- OFFSET(VCPU_R16, kvm_vcpu_arch, gprs[16]);
- OFFSET(VCPU_R17, kvm_vcpu_arch, gprs[17]);
- OFFSET(VCPU_R18, kvm_vcpu_arch, gprs[18]);
- OFFSET(VCPU_R19, kvm_vcpu_arch, gprs[19]);
- OFFSET(VCPU_R20, kvm_vcpu_arch, gprs[20]);
- OFFSET(VCPU_R21, kvm_vcpu_arch, gprs[21]);
- OFFSET(VCPU_R22, kvm_vcpu_arch, gprs[22]);
- OFFSET(VCPU_R23, kvm_vcpu_arch, gprs[23]);
- OFFSET(VCPU_R24, kvm_vcpu_arch, gprs[24]);
- OFFSET(VCPU_R25, kvm_vcpu_arch, gprs[25]);
- OFFSET(VCPU_R26, kvm_vcpu_arch, gprs[26]);
- OFFSET(VCPU_R27, kvm_vcpu_arch, gprs[27]);
- OFFSET(VCPU_R28, kvm_vcpu_arch, gprs[28]);
- OFFSET(VCPU_R29, kvm_vcpu_arch, gprs[29]);
- OFFSET(VCPU_R30, kvm_vcpu_arch, gprs[30]);
- OFFSET(VCPU_R31, kvm_vcpu_arch, gprs[31]);
- OFFSET(VCPU_LO, kvm_vcpu_arch, lo);
- OFFSET(VCPU_HI, kvm_vcpu_arch, hi);
- OFFSET(VCPU_PC, kvm_vcpu_arch, pc);
- BLANK();
OFFSET(VCPU_FPR0, kvm_vcpu_arch, fpu.fpr[0]);
OFFSET(VCPU_FPR1, kvm_vcpu_arch, fpu.fpr[1]);
@@ -441,14 +379,6 @@ void output_kvm_defines(void)
OFFSET(VCPU_FCR31, kvm_vcpu_arch, fpu.fcr31);
OFFSET(VCPU_MSA_CSR, kvm_vcpu_arch, fpu.msacsr);
BLANK();
-
- OFFSET(VCPU_COP0, kvm_vcpu_arch, cop0);
- OFFSET(VCPU_GUEST_KERNEL_ASID, kvm_vcpu_arch, guest_kernel_asid);
- OFFSET(VCPU_GUEST_USER_ASID, kvm_vcpu_arch, guest_user_asid);
-
- OFFSET(COP0_TLB_HI, mips_coproc, reg[MIPS_CP0_TLB_HI][0]);
- OFFSET(COP0_STATUS, mips_coproc, reg[MIPS_CP0_STATUS][0]);
- BLANK();
}
#ifdef CONFIG_MIPS_CPS
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index 6dc3f1fda..46c227fc9 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -790,7 +790,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
epc += 4 + (insn.i_format.simmediate << 2);
regs->cp0_epc = epc;
break;
- case beqzcjic_op:
+ case pop66_op:
if (!cpu_has_mips_r6) {
ret = -SIGILL;
break;
@@ -798,7 +798,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
/* Compact branch: BEQZC || JIC */
regs->cp0_epc += 8;
break;
- case bnezcjialc_op:
+ case pop76_op:
if (!cpu_has_mips_r6) {
ret = -SIGILL;
break;
@@ -809,8 +809,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
regs->cp0_epc += 8;
break;
#endif
- case cbcond0_op:
- case cbcond1_op:
+ case pop10_op:
+ case pop30_op:
/* Only valid for MIPS R6 */
if (!cpu_has_mips_r6) {
ret = -SIGILL;
diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/cpu-bugs64.c
index 6392dbe50..a378e4468 100644
--- a/arch/mips/kernel/cpu-bugs64.c
+++ b/arch/mips/kernel/cpu-bugs64.c
@@ -244,7 +244,7 @@ static inline void check_daddi(void)
panic(bug64hit, !DADDI_WAR ? daddiwar : nowar);
}
-int daddiu_bug = config_enabled(CONFIG_CPU_MIPSR6) ? 0 : -1;
+int daddiu_bug = IS_ENABLED(CONFIG_CPU_MIPSR6) ? 0 : -1;
static inline void check_daddiu(void)
{
@@ -314,7 +314,7 @@ static inline void check_daddiu(void)
void __init check_bugs64_early(void)
{
- if (!config_enabled(CONFIG_CPU_MIPSR6)) {
+ if (!IS_ENABLED(CONFIG_CPU_MIPSR6)) {
check_mult_sh();
check_daddiu();
}
@@ -322,6 +322,6 @@ void __init check_bugs64_early(void)
void __init check_bugs64(void)
{
- if (!config_enabled(CONFIG_CPU_MIPSR6))
+ if (!IS_ENABLED(CONFIG_CPU_MIPSR6))
check_daddi();
}
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index a88d44247..dd3175442 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -352,7 +352,12 @@ __setup("nohtw", htw_disable);
static int mips_ftlb_disabled;
static int mips_has_ftlb_configured;
-static int set_ftlb_enable(struct cpuinfo_mips *c, int enable);
+enum ftlb_flags {
+ FTLB_EN = 1 << 0,
+ FTLB_SET_PROB = 1 << 1,
+};
+
+static int set_ftlb_enable(struct cpuinfo_mips *c, enum ftlb_flags flags);
static int __init ftlb_disable(char *s)
{
@@ -371,8 +376,6 @@ static int __init ftlb_disable(char *s)
return 1;
}
- back_to_back_c0_hazard();
-
config4 = read_c0_config4();
/* Check that FTLB has been disabled */
@@ -531,7 +534,7 @@ static unsigned int calculate_ftlb_probability(struct cpuinfo_mips *c)
return 3;
}
-static int set_ftlb_enable(struct cpuinfo_mips *c, int enable)
+static int set_ftlb_enable(struct cpuinfo_mips *c, enum ftlb_flags flags)
{
unsigned int config;
@@ -542,33 +545,33 @@ static int set_ftlb_enable(struct cpuinfo_mips *c, int enable)
case CPU_P6600:
/* proAptiv & related cores use Config6 to enable the FTLB */
config = read_c0_config6();
- /* Clear the old probability value */
- config &= ~(3 << MIPS_CONF6_FTLBP_SHIFT);
- if (enable)
- /* Enable FTLB */
- write_c0_config6(config |
- (calculate_ftlb_probability(c)
- << MIPS_CONF6_FTLBP_SHIFT)
- | MIPS_CONF6_FTLBEN);
+
+ if (flags & FTLB_EN)
+ config |= MIPS_CONF6_FTLBEN;
else
- /* Disable FTLB */
- write_c0_config6(config & ~MIPS_CONF6_FTLBEN);
+ config &= ~MIPS_CONF6_FTLBEN;
+
+ if (flags & FTLB_SET_PROB) {
+ config &= ~(3 << MIPS_CONF6_FTLBP_SHIFT);
+ config |= calculate_ftlb_probability(c)
+ << MIPS_CONF6_FTLBP_SHIFT;
+ }
+
+ write_c0_config6(config);
+ back_to_back_c0_hazard();
break;
case CPU_I6400:
- /* I6400 & related cores use Config7 to configure FTLB */
- config = read_c0_config7();
- /* Clear the old probability value */
- config &= ~(3 << MIPS_CONF7_FTLBP_SHIFT);
- write_c0_config7(config | (calculate_ftlb_probability(c)
- << MIPS_CONF7_FTLBP_SHIFT));
- break;
+ /* There's no way to disable the FTLB */
+ if (!(flags & FTLB_EN))
+ return 1;
+ return 0;
case CPU_LOONGSON3:
/* Flush ITLB, DTLB, VTLB and FTLB */
write_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB |
LOONGSON_DIAG_VTLB | LOONGSON_DIAG_FTLB);
/* Loongson-3 cores use Config6 to enable the FTLB */
config = read_c0_config6();
- if (enable)
+ if (flags & FTLB_EN)
/* Enable FTLB */
write_c0_config6(config & ~MIPS_CONF6_FTLBDIS);
else
@@ -788,6 +791,7 @@ static inline unsigned int decode_config4(struct cpuinfo_mips *c)
PAGE_SIZE, config4);
/* Switch FTLB off */
set_ftlb_enable(c, 0);
+ mips_ftlb_disabled = 1;
break;
}
c->tlbsizeftlbsets = 1 <<
@@ -852,7 +856,7 @@ static void decode_configs(struct cpuinfo_mips *c)
c->scache.flags = MIPS_CACHE_NOT_PRESENT;
/* Enable FTLB if present and not disabled */
- set_ftlb_enable(c, !mips_ftlb_disabled);
+ set_ftlb_enable(c, mips_ftlb_disabled ? 0 : FTLB_EN);
ok = decode_config0(c); /* Read Config registers. */
BUG_ON(!ok); /* Arch spec violation! */
@@ -902,6 +906,9 @@ static void decode_configs(struct cpuinfo_mips *c)
}
}
+ /* configure the FTLB write probability */
+ set_ftlb_enable(c, (mips_ftlb_disabled ? 0 : FTLB_EN) | FTLB_SET_PROB);
+
mips_probe_watch_registers(c);
#ifndef CONFIG_MIPS_CPS
diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c
index 891f5ee63..6430bff21 100644
--- a/arch/mips/kernel/elf.c
+++ b/arch/mips/kernel/elf.c
@@ -8,9 +8,12 @@
* option) any later version.
*/
+#include <linux/binfmts.h>
#include <linux/elf.h>
+#include <linux/export.h>
#include <linux/sched.h>
+#include <asm/cpu-features.h>
#include <asm/cpu-info.h>
/* Whether to accept legacy-NaN and 2008-NaN user binaries. */
@@ -179,7 +182,7 @@ int arch_check_elf(void *_ehdr, bool has_interpreter, void *_interp_ehdr,
return -ELIBBAD;
}
- if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
+ if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
return 0;
fp_abi = state->fp_abi;
@@ -285,7 +288,7 @@ void mips_set_personality_fp(struct arch_elf_state *state)
* not be worried about N32/N64 binaries.
*/
- if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
+ if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
return;
switch (state->overall_fp_mode) {
@@ -326,3 +329,19 @@ void mips_set_personality_nan(struct arch_elf_state *state)
BUG();
}
}
+
+int mips_elf_read_implies_exec(void *elf_ex, int exstack)
+{
+ if (exstack != EXSTACK_DISABLE_X) {
+ /* The binary doesn't request a non-executable stack */
+ return 1;
+ }
+
+ if (!cpu_has_rixi) {
+ /* The CPU doesn't support non-executable memory */
+ return 1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mips_elf_read_implies_exec);
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index 17326a90d..dc0b29612 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -142,9 +142,8 @@ LEAF(__r4k_wait)
PTR_LA k1, __r4k_wait
ori k0, 0x1f /* 32 byte rollback region */
xori k0, 0x1f
- bne k0, k1, 9f
+ bne k0, k1, \handler
MTC0 k0, CP0_EPC
-9:
.set pop
.endm
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index 56e8fede3..cf052204e 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -93,21 +93,24 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
jr t0
0:
+#ifdef CONFIG_USE_OF
#ifdef CONFIG_MIPS_RAW_APPENDED_DTB
- PTR_LA t0, __appended_dtb
+ PTR_LA t2, __appended_dtb
#ifdef CONFIG_CPU_BIG_ENDIAN
li t1, 0xd00dfeed
#else
li t1, 0xedfe0dd0
#endif
- lw t2, (t0)
- bne t1, t2, not_found
- nop
+ lw t0, (t2)
+ beq t0, t1, dtb_found
+#endif
+ li t1, -2
+ beq a0, t1, dtb_found
+ move t2, a1
- move a1, t0
- PTR_LI a0, -2
-not_found:
+ li t2, 0
+dtb_found:
#endif
PTR_LA t0, __bss_start # clear .bss
LONG_S zero, (t0)
@@ -122,6 +125,10 @@ not_found:
LONG_S a2, fw_arg2
LONG_S a3, fw_arg3
+#ifdef CONFIG_USE_OF
+ LONG_S t2, fw_passed_dtb
+#endif
+
MTC0 zero, CP0_CONTEXT # clear context register
PTR_LA $28, init_thread_union
/* Set the SP after an empty pt_regs. */
diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c
index 760217bbb..659e6d3ae 100644
--- a/arch/mips/kernel/mips-cm.c
+++ b/arch/mips/kernel/mips-cm.c
@@ -251,7 +251,7 @@ int mips_cm_probe(void)
mips_cm_probe_l2sync();
/* determine register width for this CM */
- mips_cm_is64 = config_enabled(CONFIG_64BIT) && (mips_cm_revision() >= CM_REV_CM3);
+ mips_cm_is64 = IS_ENABLED(CONFIG_64BIT) && (mips_cm_revision() >= CM_REV_CM3);
for_each_possible_cpu(cpu)
spin_lock_init(&per_cpu(cm_core_lock, cpu));
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
index ae7757581..0a7e10b5f 100644
--- a/arch/mips/kernel/mips-r2-to-r6-emul.c
+++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
@@ -84,7 +84,7 @@ static inline int mipsr6_emul(struct pt_regs *regs, u32 ir)
(s32)MIPSInst_SIMM(ir);
return 0;
case daddiu_op:
- if (config_enabled(CONFIG_32BIT))
+ if (IS_ENABLED(CONFIG_32BIT))
break;
if (MIPSInst_RT(ir))
@@ -143,7 +143,7 @@ static inline int mipsr6_emul(struct pt_regs *regs, u32 ir)
(u32)regs->regs[MIPSInst_RT(ir)]);
return 0;
case dsll_op:
- if (config_enabled(CONFIG_32BIT) || MIPSInst_RS(ir))
+ if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_RS(ir))
break;
if (MIPSInst_RD(ir))
@@ -152,7 +152,7 @@ static inline int mipsr6_emul(struct pt_regs *regs, u32 ir)
MIPSInst_FD(ir));
return 0;
case dsrl_op:
- if (config_enabled(CONFIG_32BIT) || MIPSInst_RS(ir))
+ if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_RS(ir))
break;
if (MIPSInst_RD(ir))
@@ -161,7 +161,7 @@ static inline int mipsr6_emul(struct pt_regs *regs, u32 ir)
MIPSInst_FD(ir));
return 0;
case daddu_op:
- if (config_enabled(CONFIG_32BIT) || MIPSInst_FD(ir))
+ if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_FD(ir))
break;
if (MIPSInst_RD(ir))
@@ -170,7 +170,7 @@ static inline int mipsr6_emul(struct pt_regs *regs, u32 ir)
(u64)regs->regs[MIPSInst_RT(ir)];
return 0;
case dsubu_op:
- if (config_enabled(CONFIG_32BIT) || MIPSInst_FD(ir))
+ if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_FD(ir))
break;
if (MIPSInst_RD(ir))
@@ -283,7 +283,7 @@ static int jr_func(struct pt_regs *regs, u32 ir)
err = mipsr6_emul(regs, nir);
if (err > 0) {
regs->cp0_epc = nepc;
- err = mips_dsemul(regs, nir, cepc);
+ err = mips_dsemul(regs, nir, epc, cepc);
if (err == SIGILL)
err = SIGEMT;
MIPS_R2_STATS(dsemul);
@@ -498,7 +498,7 @@ static int dmult_func(struct pt_regs *regs, u32 ir)
s64 res;
s64 rt, rs;
- if (config_enabled(CONFIG_32BIT))
+ if (IS_ENABLED(CONFIG_32BIT))
return SIGILL;
rt = regs->regs[MIPSInst_RT(ir)];
@@ -530,7 +530,7 @@ static int dmultu_func(struct pt_regs *regs, u32 ir)
u64 res;
u64 rt, rs;
- if (config_enabled(CONFIG_32BIT))
+ if (IS_ENABLED(CONFIG_32BIT))
return SIGILL;
rt = regs->regs[MIPSInst_RT(ir)];
@@ -561,7 +561,7 @@ static int ddiv_func(struct pt_regs *regs, u32 ir)
{
s64 rt, rs;
- if (config_enabled(CONFIG_32BIT))
+ if (IS_ENABLED(CONFIG_32BIT))
return SIGILL;
rt = regs->regs[MIPSInst_RT(ir)];
@@ -586,7 +586,7 @@ static int ddivu_func(struct pt_regs *regs, u32 ir)
{
u64 rt, rs;
- if (config_enabled(CONFIG_32BIT))
+ if (IS_ENABLED(CONFIG_32BIT))
return SIGILL;
rt = regs->regs[MIPSInst_RT(ir)];
@@ -825,7 +825,7 @@ static int dclz_func(struct pt_regs *regs, u32 ir)
u64 res;
u64 rs;
- if (config_enabled(CONFIG_32BIT))
+ if (IS_ENABLED(CONFIG_32BIT))
return SIGILL;
if (!MIPSInst_RD(ir))
@@ -852,7 +852,7 @@ static int dclo_func(struct pt_regs *regs, u32 ir)
u64 res;
u64 rs;
- if (config_enabled(CONFIG_32BIT))
+ if (IS_ENABLED(CONFIG_32BIT))
return SIGILL;
if (!MIPSInst_RD(ir))
@@ -1033,7 +1033,7 @@ repeat:
if (nir) {
err = mipsr6_emul(regs, nir);
if (err > 0) {
- err = mips_dsemul(regs, nir, cpc);
+ err = mips_dsemul(regs, nir, epc, cpc);
if (err == SIGILL)
err = SIGEMT;
MIPS_R2_STATS(dsemul);
@@ -1082,7 +1082,7 @@ repeat:
if (nir) {
err = mipsr6_emul(regs, nir);
if (err > 0) {
- err = mips_dsemul(regs, nir, cpc);
+ err = mips_dsemul(regs, nir, epc, cpc);
if (err == SIGILL)
err = SIGEMT;
MIPS_R2_STATS(dsemul);
@@ -1149,7 +1149,7 @@ repeat:
if (nir) {
err = mipsr6_emul(regs, nir);
if (err > 0) {
- err = mips_dsemul(regs, nir, cpc);
+ err = mips_dsemul(regs, nir, epc, cpc);
if (err == SIGILL)
err = SIGEMT;
MIPS_R2_STATS(dsemul);
@@ -1486,7 +1486,7 @@ fpu_emul:
break;
case ldl_op:
- if (config_enabled(CONFIG_32BIT)) {
+ if (IS_ENABLED(CONFIG_32BIT)) {
err = SIGILL;
break;
}
@@ -1605,7 +1605,7 @@ fpu_emul:
break;
case ldr_op:
- if (config_enabled(CONFIG_32BIT)) {
+ if (IS_ENABLED(CONFIG_32BIT)) {
err = SIGILL;
break;
}
@@ -1724,7 +1724,7 @@ fpu_emul:
break;
case sdl_op:
- if (config_enabled(CONFIG_32BIT)) {
+ if (IS_ENABLED(CONFIG_32BIT)) {
err = SIGILL;
break;
}
@@ -1842,7 +1842,7 @@ fpu_emul:
break;
case sdr_op:
- if (config_enabled(CONFIG_32BIT)) {
+ if (IS_ENABLED(CONFIG_32BIT)) {
err = SIGILL;
break;
}
@@ -2074,7 +2074,7 @@ fpu_emul:
break;
case lld_op:
- if (config_enabled(CONFIG_32BIT)) {
+ if (IS_ENABLED(CONFIG_32BIT)) {
err = SIGILL;
break;
}
@@ -2135,7 +2135,7 @@ fpu_emul:
break;
case scd_op:
- if (config_enabled(CONFIG_32BIT)) {
+ if (IS_ENABLED(CONFIG_32BIT)) {
err = SIGILL;
break;
}
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
index adda3ffb9..5b31a9405 100644
--- a/arch/mips/kernel/pm-cps.c
+++ b/arch/mips/kernel/pm-cps.c
@@ -148,7 +148,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
}
/* Setup the VPE to run mips_cps_pm_restore when started again */
- if (config_enabled(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
+ if (IS_ENABLED(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
/* Power gating relies upon CPS SMP */
if (!mips_cps_smp_in_use())
return -EINVAL;
@@ -387,7 +387,7 @@ static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
- if (config_enabled(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
+ if (IS_ENABLED(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
/* Power gating relies upon CPS SMP */
if (!mips_cps_smp_in_use())
goto out_err;
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 45cff9fcf..d2d061520 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -30,6 +30,7 @@
#include <asm/asm.h>
#include <asm/bootinfo.h>
#include <asm/cpu.h>
+#include <asm/dsemul.h>
#include <asm/dsp.h>
#include <asm/fpu.h>
#include <asm/msa.h>
@@ -68,11 +69,22 @@ void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
lose_fpu(0);
clear_thread_flag(TIF_MSA_CTX_LIVE);
clear_used_math();
+ atomic_set(&current->thread.bd_emu_frame, BD_EMUFRAME_NONE);
init_dsp();
regs->cp0_epc = pc;
regs->regs[29] = sp;
}
+void exit_thread(struct task_struct *tsk)
+{
+ /*
+ * User threads may have allocated a delay slot emulation frame.
+ * If so, clean up that allocation.
+ */
+ if (!(current->flags & PF_KTHREAD))
+ dsemul_thread_cleanup(tsk);
+}
+
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
/*
@@ -159,6 +171,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
clear_tsk_thread_flag(p, TIF_FPUBOUND);
#endif /* CONFIG_MIPS_MT_FPAFF */
+ atomic_set(&p->thread.bd_emu_frame, BD_EMUFRAME_NONE);
+
if (clone_flags & CLONE_SETTLS)
ti->tp_value = regs->regs[7];
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 0dcf69194..6103b24d1 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -888,17 +888,16 @@ long arch_ptrace(struct task_struct *child, long request,
*/
asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
{
- long ret = 0;
user_exit();
current_thread_info()->syscall = syscall;
- if (secure_computing() == -1)
- return -1;
-
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
tracehook_report_syscall_entry(regs))
- ret = -1;
+ return -1;
+
+ if (secure_computing(NULL) == -1)
+ return -1;
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_enter(regs, regs->regs[2]);
diff --git a/arch/mips/kernel/segment.c b/arch/mips/kernel/segment.c
index 87bc74a5a..2703f2182 100644
--- a/arch/mips/kernel/segment.c
+++ b/arch/mips/kernel/segment.c
@@ -26,17 +26,20 @@ static void build_segment_config(char *str, unsigned int cfg)
/*
* Access modes MK, MSK and MUSK are mapped segments. Therefore
- * there is no direct physical address mapping.
+ * there is no direct physical address mapping unless it becomes
+ * unmapped uncached at error level due to EU.
*/
- if ((am == 0) || (am > 3)) {
+ if ((am == 0) || (am > 3) || (cfg & MIPS_SEGCFG_EU))
str += sprintf(str, " %03lx",
((cfg & MIPS_SEGCFG_PA) >> MIPS_SEGCFG_PA_SHIFT));
+ else
+ str += sprintf(str, " UND");
+
+ if ((am == 0) || (am > 3))
str += sprintf(str, " %01ld",
((cfg & MIPS_SEGCFG_C) >> MIPS_SEGCFG_C_SHIFT));
- } else {
- str += sprintf(str, " UND");
+ else
str += sprintf(str, " U");
- }
/* Exception configuration. */
str += sprintf(str, " %01ld\n",
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index ef408a03e..0d57909d9 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -87,6 +87,13 @@ void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type)
int x = boot_mem_map.nr_map;
int i;
+ /*
+ * If the region reaches the top of the physical address space, adjust
+ * the size slightly so that (start + size) doesn't overflow
+ */
+ if (start + size - 1 == (phys_addr_t)ULLONG_MAX)
+ --size;
+
/* Sanity check */
if (start + size < start) {
pr_warn("Trying to add an invalid memory region, skipped\n");
@@ -757,7 +764,6 @@ static void __init arch_mem_init(char **cmdline_p)
device_tree_init();
sparse_init();
plat_swiotlb_setup();
- paging_init();
dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
/* Tell bootmem about cma reserved memblock section */
@@ -870,11 +876,16 @@ void __init setup_arch(char **cmdline_p)
prefill_possible_map();
cpu_cache_init();
+ paging_init();
}
unsigned long kernelsp[NR_CPUS];
unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
+#ifdef CONFIG_USE_OF
+unsigned long fw_passed_dtb;
+#endif
+
#ifdef CONFIG_DEBUG_FS
struct dentry *mips_debugfs_dir;
static int __init debugfs_mips(void)
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index ae4231452..9e224469c 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -165,7 +165,7 @@ static int save_msa_extcontext(void __user *buf)
* should already have been done when handling scalar FP
* context.
*/
- BUG_ON(config_enabled(CONFIG_EVA));
+ BUG_ON(IS_ENABLED(CONFIG_EVA));
err = __put_user(read_msa_csr(), &msa->csr);
err |= _save_msa_all_upper(&msa->wr);
@@ -195,7 +195,7 @@ static int restore_msa_extcontext(void __user *buf, unsigned int size)
unsigned int csr;
int i, err;
- if (!config_enabled(CONFIG_CPU_HAS_MSA))
+ if (!IS_ENABLED(CONFIG_CPU_HAS_MSA))
return SIGSYS;
if (size != sizeof(*msa))
@@ -215,7 +215,7 @@ static int restore_msa_extcontext(void __user *buf, unsigned int size)
* scalar FP context, so FPU & MSA should have already been
* disabled whilst handling scalar FP context.
*/
- BUG_ON(config_enabled(CONFIG_EVA));
+ BUG_ON(IS_ENABLED(CONFIG_EVA));
write_msa_csr(csr);
err |= _restore_msa_all_upper(&msa->wr);
@@ -315,7 +315,7 @@ int protected_save_fp_context(void __user *sc)
* EVA does not have userland equivalents of ldc1 or sdc1, so
* save to the kernel FP context & copy that to userland below.
*/
- if (config_enabled(CONFIG_EVA))
+ if (IS_ENABLED(CONFIG_EVA))
lose_fpu(1);
while (1) {
@@ -378,7 +378,7 @@ int protected_restore_fp_context(void __user *sc)
* disable the FPU here such that the code below simply copies to
* the kernel FP context.
*/
- if (config_enabled(CONFIG_EVA))
+ if (IS_ENABLED(CONFIG_EVA))
lose_fpu(0);
while (1) {
@@ -772,6 +772,14 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
struct mips_abi *abi = current->thread.abi;
void *vdso = current->mm->context.vdso;
+ /*
+ * If we were emulating a delay slot instruction, exit that frame such
+ * that addresses in the sigframe are as expected for userland and we
+ * don't have a problem if we reuse the thread's frame for an
+ * instruction within the signal handler.
+ */
+ dsemul_thread_rollback(regs);
+
if (regs->regs[0]) {
switch(regs->regs[2]) {
case ERESTART_RESTARTBLOCK:
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 78c8349d1..97b7c51b8 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -6,129 +6,26 @@
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 1994 - 2000, 2006 Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2016, Imagination Technologies Ltd.
*/
-#include <linux/cache.h>
-#include <linux/compat.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
+#include <linux/compiler.h>
+#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/syscalls.h>
-#include <linux/errno.h>
-#include <linux/wait.h>
-#include <linux/ptrace.h>
-#include <linux/suspend.h>
-#include <linux/compiler.h>
-#include <linux/uaccess.h>
-#include <asm/abi.h>
-#include <asm/asm.h>
+#include <asm/compat.h>
#include <asm/compat-signal.h>
-#include <linux/bitops.h>
-#include <asm/cacheflush.h>
-#include <asm/sim.h>
-#include <asm/ucontext.h>
-#include <asm/fpu.h>
-#include <asm/war.h>
-#include <asm/dsp.h>
+#include <asm/uaccess.h>
+#include <asm/unistd.h>
#include "signal-common.h"
-/*
- * Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
- */
-#define __NR_O32_restart_syscall 4253
-
/* 32-bit compatibility types */
typedef unsigned int __sighandler32_t;
typedef void (*vfptr_t)(void);
-struct ucontext32 {
- u32 uc_flags;
- s32 uc_link;
- compat_stack_t uc_stack;
- struct sigcontext32 uc_mcontext;
- compat_sigset_t uc_sigmask; /* mask last for extensibility */
-};
-
-struct sigframe32 {
- u32 sf_ass[4]; /* argument save space for o32 */
- u32 sf_pad[2]; /* Was: signal trampoline */
- struct sigcontext32 sf_sc;
- compat_sigset_t sf_mask;
-};
-
-struct rt_sigframe32 {
- u32 rs_ass[4]; /* argument save space for o32 */
- u32 rs_pad[2]; /* Was: signal trampoline */
- compat_siginfo_t rs_info;
- struct ucontext32 rs_uc;
-};
-
-static int setup_sigcontext32(struct pt_regs *regs,
- struct sigcontext32 __user *sc)
-{
- int err = 0;
- int i;
-
- err |= __put_user(regs->cp0_epc, &sc->sc_pc);
-
- err |= __put_user(0, &sc->sc_regs[0]);
- for (i = 1; i < 32; i++)
- err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
-
- err |= __put_user(regs->hi, &sc->sc_mdhi);
- err |= __put_user(regs->lo, &sc->sc_mdlo);
- if (cpu_has_dsp) {
- err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
- err |= __put_user(mfhi1(), &sc->sc_hi1);
- err |= __put_user(mflo1(), &sc->sc_lo1);
- err |= __put_user(mfhi2(), &sc->sc_hi2);
- err |= __put_user(mflo2(), &sc->sc_lo2);
- err |= __put_user(mfhi3(), &sc->sc_hi3);
- err |= __put_user(mflo3(), &sc->sc_lo3);
- }
-
- /*
- * Save FPU state to signal context. Signal handler
- * will "inherit" current FPU state.
- */
- err |= protected_save_fp_context(sc);
-
- return err;
-}
-
-static int restore_sigcontext32(struct pt_regs *regs,
- struct sigcontext32 __user *sc)
-{
- int err = 0;
- s32 treg;
- int i;
-
- /* Always make any pending restarted system calls return -EINTR */
- current->restart_block.fn = do_no_restart_syscall;
-
- err |= __get_user(regs->cp0_epc, &sc->sc_pc);
- err |= __get_user(regs->hi, &sc->sc_mdhi);
- err |= __get_user(regs->lo, &sc->sc_mdlo);
- if (cpu_has_dsp) {
- err |= __get_user(treg, &sc->sc_hi1); mthi1(treg);
- err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg);
- err |= __get_user(treg, &sc->sc_hi2); mthi2(treg);
- err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg);
- err |= __get_user(treg, &sc->sc_hi3); mthi3(treg);
- err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg);
- err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK);
- }
-
- for (i = 1; i < 32; i++)
- err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
-
- return err ?: protected_restore_fp_context(sc);
-}
-
/*
* Atomically swap in the new signal mask, and wait for a signal.
*/
@@ -247,176 +144,3 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
return 0;
}
-
-asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs)
-{
- struct sigframe32 __user *frame;
- sigset_t blocked;
- int sig;
-
- frame = (struct sigframe32 __user *) regs.regs[29];
- if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
- goto badframe;
- if (__copy_conv_sigset_from_user(&blocked, &frame->sf_mask))
- goto badframe;
-
- set_current_blocked(&blocked);
-
- sig = restore_sigcontext32(&regs, &frame->sf_sc);
- if (sig < 0)
- goto badframe;
- else if (sig)
- force_sig(sig, current);
-
- /*
- * Don't let your children do this ...
- */
- __asm__ __volatile__(
- "move\t$29, %0\n\t"
- "j\tsyscall_exit"
- :/* no outputs */
- :"r" (&regs));
- /* Unreached */
-
-badframe:
- force_sig(SIGSEGV, current);
-}
-
-asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
-{
- struct rt_sigframe32 __user *frame;
- sigset_t set;
- int sig;
-
- frame = (struct rt_sigframe32 __user *) regs.regs[29];
- if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
- goto badframe;
- if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask))
- goto badframe;
-
- set_current_blocked(&set);
-
- sig = restore_sigcontext32(&regs, &frame->rs_uc.uc_mcontext);
- if (sig < 0)
- goto badframe;
- else if (sig)
- force_sig(sig, current);
-
- if (compat_restore_altstack(&frame->rs_uc.uc_stack))
- goto badframe;
-
- /*
- * Don't let your children do this ...
- */
- __asm__ __volatile__(
- "move\t$29, %0\n\t"
- "j\tsyscall_exit"
- :/* no outputs */
- :"r" (&regs));
- /* Unreached */
-
-badframe:
- force_sig(SIGSEGV, current);
-}
-
-static int setup_frame_32(void *sig_return, struct ksignal *ksig,
- struct pt_regs *regs, sigset_t *set)
-{
- struct sigframe32 __user *frame;
- int err = 0;
-
- frame = get_sigframe(ksig, regs, sizeof(*frame));
- if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
- return -EFAULT;
-
- err |= setup_sigcontext32(regs, &frame->sf_sc);
- err |= __copy_conv_sigset_to_user(&frame->sf_mask, set);
-
- if (err)
- return -EFAULT;
-
- /*
- * Arguments to signal handler:
- *
- * a0 = signal number
- * a1 = 0 (should be cause)
- * a2 = pointer to struct sigcontext
- *
- * $25 and c0_epc point to the signal handler, $29 points to the
- * struct sigframe.
- */
- regs->regs[ 4] = ksig->sig;
- regs->regs[ 5] = 0;
- regs->regs[ 6] = (unsigned long) &frame->sf_sc;
- regs->regs[29] = (unsigned long) frame;
- regs->regs[31] = (unsigned long) sig_return;
- regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler;
-
- DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
- current->comm, current->pid,
- frame, regs->cp0_epc, regs->regs[31]);
-
- return 0;
-}
-
-static int setup_rt_frame_32(void *sig_return, struct ksignal *ksig,
- struct pt_regs *regs, sigset_t *set)
-{
- struct rt_sigframe32 __user *frame;
- int err = 0;
-
- frame = get_sigframe(ksig, regs, sizeof(*frame));
- if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
- return -EFAULT;
-
- /* Convert (siginfo_t -> compat_siginfo_t) and copy to user. */
- err |= copy_siginfo_to_user32(&frame->rs_info, &ksig->info);
-
- /* Create the ucontext. */
- err |= __put_user(0, &frame->rs_uc.uc_flags);
- err |= __put_user(0, &frame->rs_uc.uc_link);
- err |= __compat_save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]);
- err |= setup_sigcontext32(regs, &frame->rs_uc.uc_mcontext);
- err |= __copy_conv_sigset_to_user(&frame->rs_uc.uc_sigmask, set);
-
- if (err)
- return -EFAULT;
-
- /*
- * Arguments to signal handler:
- *
- * a0 = signal number
- * a1 = 0 (should be cause)
- * a2 = pointer to ucontext
- *
- * $25 and c0_epc point to the signal handler, $29 points to
- * the struct rt_sigframe32.
- */
- regs->regs[ 4] = ksig->sig;
- regs->regs[ 5] = (unsigned long) &frame->rs_info;
- regs->regs[ 6] = (unsigned long) &frame->rs_uc;
- regs->regs[29] = (unsigned long) frame;
- regs->regs[31] = (unsigned long) sig_return;
- regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler;
-
- DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
- current->comm, current->pid,
- frame, regs->cp0_epc, regs->regs[31]);
-
- return 0;
-}
-
-/*
- * o32 compatibility on 64-bit kernels, without DSP ASE
- */
-struct mips_abi mips_abi_32 = {
- .setup_frame = setup_frame_32,
- .setup_rt_frame = setup_rt_frame_32,
- .restart = __NR_O32_restart_syscall,
-
- .off_sc_fpregs = offsetof(struct sigcontext32, sc_fpregs),
- .off_sc_fpc_csr = offsetof(struct sigcontext32, sc_fpc_csr),
- .off_sc_used_math = offsetof(struct sigcontext32, sc_used_math),
-
- .vdso = &vdso_image_o32,
-};
diff --git a/arch/mips/kernel/signal_o32.c b/arch/mips/kernel/signal_o32.c
new file mode 100644
index 000000000..5e169fc5c
--- /dev/null
+++ b/arch/mips/kernel/signal_o32.c
@@ -0,0 +1,285 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 1994 - 2000, 2006 Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2016, Imagination Technologies Ltd.
+ */
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/uaccess.h>
+
+#include <asm/abi.h>
+#include <asm/compat-signal.h>
+#include <asm/dsp.h>
+#include <asm/sim.h>
+#include <asm/unistd.h>
+
+#include "signal-common.h"
+
+/*
+ * Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
+ */
+#define __NR_O32_restart_syscall 4253
+
+struct sigframe32 {
+ u32 sf_ass[4]; /* argument save space for o32 */
+ u32 sf_pad[2]; /* Was: signal trampoline */
+ struct sigcontext32 sf_sc;
+ compat_sigset_t sf_mask;
+};
+
+struct ucontext32 {
+ u32 uc_flags;
+ s32 uc_link;
+ compat_stack_t uc_stack;
+ struct sigcontext32 uc_mcontext;
+ compat_sigset_t uc_sigmask; /* mask last for extensibility */
+};
+
+struct rt_sigframe32 {
+ u32 rs_ass[4]; /* argument save space for o32 */
+ u32 rs_pad[2]; /* Was: signal trampoline */
+ compat_siginfo_t rs_info;
+ struct ucontext32 rs_uc;
+};
+
+static int setup_sigcontext32(struct pt_regs *regs,
+ struct sigcontext32 __user *sc)
+{
+ int err = 0;
+ int i;
+
+ err |= __put_user(regs->cp0_epc, &sc->sc_pc);
+
+ err |= __put_user(0, &sc->sc_regs[0]);
+ for (i = 1; i < 32; i++)
+ err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
+
+ err |= __put_user(regs->hi, &sc->sc_mdhi);
+ err |= __put_user(regs->lo, &sc->sc_mdlo);
+ if (cpu_has_dsp) {
+ err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
+ err |= __put_user(mfhi1(), &sc->sc_hi1);
+ err |= __put_user(mflo1(), &sc->sc_lo1);
+ err |= __put_user(mfhi2(), &sc->sc_hi2);
+ err |= __put_user(mflo2(), &sc->sc_lo2);
+ err |= __put_user(mfhi3(), &sc->sc_hi3);
+ err |= __put_user(mflo3(), &sc->sc_lo3);
+ }
+
+ /*
+ * Save FPU state to signal context. Signal handler
+ * will "inherit" current FPU state.
+ */
+ err |= protected_save_fp_context(sc);
+
+ return err;
+}
+
+static int restore_sigcontext32(struct pt_regs *regs,
+ struct sigcontext32 __user *sc)
+{
+ int err = 0;
+ s32 treg;
+ int i;
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current->restart_block.fn = do_no_restart_syscall;
+
+ err |= __get_user(regs->cp0_epc, &sc->sc_pc);
+ err |= __get_user(regs->hi, &sc->sc_mdhi);
+ err |= __get_user(regs->lo, &sc->sc_mdlo);
+ if (cpu_has_dsp) {
+ err |= __get_user(treg, &sc->sc_hi1); mthi1(treg);
+ err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg);
+ err |= __get_user(treg, &sc->sc_hi2); mthi2(treg);
+ err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg);
+ err |= __get_user(treg, &sc->sc_hi3); mthi3(treg);
+ err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg);
+ err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK);
+ }
+
+ for (i = 1; i < 32; i++)
+ err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
+
+ return err ?: protected_restore_fp_context(sc);
+}
+
+static int setup_frame_32(void *sig_return, struct ksignal *ksig,
+ struct pt_regs *regs, sigset_t *set)
+{
+ struct sigframe32 __user *frame;
+ int err = 0;
+
+ frame = get_sigframe(ksig, regs, sizeof(*frame));
+ if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
+ return -EFAULT;
+
+ err |= setup_sigcontext32(regs, &frame->sf_sc);
+ err |= __copy_conv_sigset_to_user(&frame->sf_mask, set);
+
+ if (err)
+ return -EFAULT;
+
+ /*
+ * Arguments to signal handler:
+ *
+ * a0 = signal number
+ * a1 = 0 (should be cause)
+ * a2 = pointer to struct sigcontext
+ *
+ * $25 and c0_epc point to the signal handler, $29 points to the
+ * struct sigframe.
+ */
+ regs->regs[ 4] = ksig->sig;
+ regs->regs[ 5] = 0;
+ regs->regs[ 6] = (unsigned long) &frame->sf_sc;
+ regs->regs[29] = (unsigned long) frame;
+ regs->regs[31] = (unsigned long) sig_return;
+ regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler;
+
+ DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
+ current->comm, current->pid,
+ frame, regs->cp0_epc, regs->regs[31]);
+
+ return 0;
+}
+
+asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
+{
+ struct rt_sigframe32 __user *frame;
+ sigset_t set;
+ int sig;
+
+ frame = (struct rt_sigframe32 __user *) regs.regs[29];
+ if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+ goto badframe;
+ if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask))
+ goto badframe;
+
+ set_current_blocked(&set);
+
+ sig = restore_sigcontext32(&regs, &frame->rs_uc.uc_mcontext);
+ if (sig < 0)
+ goto badframe;
+ else if (sig)
+ force_sig(sig, current);
+
+ if (compat_restore_altstack(&frame->rs_uc.uc_stack))
+ goto badframe;
+
+ /*
+ * Don't let your children do this ...
+ */
+ __asm__ __volatile__(
+ "move\t$29, %0\n\t"
+ "j\tsyscall_exit"
+ :/* no outputs */
+ :"r" (&regs));
+ /* Unreached */
+
+badframe:
+ force_sig(SIGSEGV, current);
+}
+
+static int setup_rt_frame_32(void *sig_return, struct ksignal *ksig,
+ struct pt_regs *regs, sigset_t *set)
+{
+ struct rt_sigframe32 __user *frame;
+ int err = 0;
+
+ frame = get_sigframe(ksig, regs, sizeof(*frame));
+ if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
+ return -EFAULT;
+
+ /* Convert (siginfo_t -> compat_siginfo_t) and copy to user. */
+ err |= copy_siginfo_to_user32(&frame->rs_info, &ksig->info);
+
+ /* Create the ucontext. */
+ err |= __put_user(0, &frame->rs_uc.uc_flags);
+ err |= __put_user(0, &frame->rs_uc.uc_link);
+ err |= __compat_save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]);
+ err |= setup_sigcontext32(regs, &frame->rs_uc.uc_mcontext);
+ err |= __copy_conv_sigset_to_user(&frame->rs_uc.uc_sigmask, set);
+
+ if (err)
+ return -EFAULT;
+
+ /*
+ * Arguments to signal handler:
+ *
+ * a0 = signal number
+ * a1 = 0 (should be cause)
+ * a2 = pointer to ucontext
+ *
+ * $25 and c0_epc point to the signal handler, $29 points to
+ * the struct rt_sigframe32.
+ */
+ regs->regs[ 4] = ksig->sig;
+ regs->regs[ 5] = (unsigned long) &frame->rs_info;
+ regs->regs[ 6] = (unsigned long) &frame->rs_uc;
+ regs->regs[29] = (unsigned long) frame;
+ regs->regs[31] = (unsigned long) sig_return;
+ regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler;
+
+ DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
+ current->comm, current->pid,
+ frame, regs->cp0_epc, regs->regs[31]);
+
+ return 0;
+}
+
+/*
+ * o32 compatibility on 64-bit kernels, without DSP ASE
+ */
+struct mips_abi mips_abi_32 = {
+ .setup_frame = setup_frame_32,
+ .setup_rt_frame = setup_rt_frame_32,
+ .restart = __NR_O32_restart_syscall,
+
+ .off_sc_fpregs = offsetof(struct sigcontext32, sc_fpregs),
+ .off_sc_fpc_csr = offsetof(struct sigcontext32, sc_fpc_csr),
+ .off_sc_used_math = offsetof(struct sigcontext32, sc_used_math),
+
+ .vdso = &vdso_image_o32,
+};
+
+
+asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs)
+{
+ struct sigframe32 __user *frame;
+ sigset_t blocked;
+ int sig;
+
+ frame = (struct sigframe32 __user *) regs.regs[29];
+ if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+ goto badframe;
+ if (__copy_conv_sigset_from_user(&blocked, &frame->sf_mask))
+ goto badframe;
+
+ set_current_blocked(&blocked);
+
+ sig = restore_sigcontext32(&regs, &frame->sf_sc);
+ if (sig < 0)
+ goto badframe;
+ else if (sig)
+ force_sig(sig, current);
+
+ /*
+ * Don't let your children do this ...
+ */
+ __asm__ __volatile__(
+ "move\t$29, %0\n\t"
+ "j\tsyscall_exit"
+ :/* no outputs */
+ :"r" (&regs));
+ /* Unreached */
+
+badframe:
+ force_sig(SIGSEGV, current);
+}
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index e02addc03..6d0f1321e 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -363,6 +363,7 @@ static int bmips_cpu_disable(void)
pr_info("SMP: CPU%d is offline\n", cpu);
set_cpu_online(cpu, false);
+ calculate_cpu_foreign_map();
cpumask_clear_cpu(cpu, &cpu_callin_map);
clear_c0_status(IE_IRQ5);
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
index 4ed36f288..6183ad84c 100644
--- a/arch/mips/kernel/smp-cps.c
+++ b/arch/mips/kernel/smp-cps.c
@@ -46,8 +46,8 @@ static unsigned core_vpe_count(unsigned core)
if (threads_disabled)
return 1;
- if ((!config_enabled(CONFIG_MIPS_MT_SMP) || !cpu_has_mipsmt)
- && (!config_enabled(CONFIG_CPU_MIPSR6) || !cpu_has_vp))
+ if ((!IS_ENABLED(CONFIG_MIPS_MT_SMP) || !cpu_has_mipsmt)
+ && (!IS_ENABLED(CONFIG_CPU_MIPSR6) || !cpu_has_vp))
return 1;
mips_cm_lock_other(core, 0);
@@ -206,7 +206,7 @@ err_out:
}
}
-static void boot_core(unsigned core)
+static void boot_core(unsigned int core, unsigned int vpe_id)
{
u32 access, stat, seq_state;
unsigned timeout;
@@ -233,8 +233,9 @@ static void boot_core(unsigned core)
mips_cpc_lock_other(core);
if (mips_cm_revision() >= CM_REV_CM3) {
- /* Run VP0 following the reset */
- write_cpc_co_vp_run(0x1);
+ /* Run only the requested VP following the reset */
+ write_cpc_co_vp_stop(0xf);
+ write_cpc_co_vp_run(1 << vpe_id);
/*
* Ensure that the VP_RUN register is written before the
@@ -306,7 +307,7 @@ static void cps_boot_secondary(int cpu, struct task_struct *idle)
if (!test_bit(core, core_power)) {
/* Boot a VPE on a powered down core */
- boot_core(core);
+ boot_core(core, vpe_id);
goto out;
}
@@ -397,6 +398,7 @@ static int cps_cpu_disable(void)
atomic_sub(1 << cpu_vpe_id(&current_cpu_data), &core_cfg->vpe_mask);
smp_mb__after_atomic();
set_cpu_online(cpu, false);
+ calculate_cpu_foreign_map();
cpumask_clear_cpu(cpu, &cpu_callin_map);
return 0;
@@ -411,14 +413,16 @@ static enum {
void play_dead(void)
{
- unsigned cpu, core;
+ unsigned int cpu, core, vpe_id;
local_irq_disable();
idle_task_exit();
cpu = smp_processor_id();
cpu_death = CPU_DEATH_POWER;
- if (cpu_has_mipsmt) {
+ pr_debug("CPU%d going offline\n", cpu);
+
+ if (cpu_has_mipsmt || cpu_has_vp) {
core = cpu_data[cpu].core;
/* Look for another online VPE within the core */
@@ -439,10 +443,21 @@ void play_dead(void)
complete(&cpu_death_chosen);
if (cpu_death == CPU_DEATH_HALT) {
- /* Halt this TC */
- write_c0_tchalt(TCHALT_H);
- instruction_hazard();
+ vpe_id = cpu_vpe_id(&cpu_data[cpu]);
+
+ pr_debug("Halting core %d VP%d\n", core, vpe_id);
+ if (cpu_has_mipsmt) {
+ /* Halt this TC */
+ write_c0_tchalt(TCHALT_H);
+ instruction_hazard();
+ } else if (cpu_has_vp) {
+ write_cpc_cl_vp_stop(1 << vpe_id);
+
+ /* Ensure that the VP_STOP register is written */
+ wmb();
+ }
} else {
+ pr_debug("Gating power to core %d\n", core);
/* Power down the core */
cps_pm_enter_state(CPS_PM_POWER_GATED);
}
@@ -469,6 +484,7 @@ static void wait_for_sibling_halt(void *ptr_cpu)
static void cps_cpu_die(unsigned int cpu)
{
unsigned core = cpu_data[cpu].core;
+ unsigned int vpe_id = cpu_vpe_id(&cpu_data[cpu]);
unsigned stat;
int err;
@@ -497,10 +513,12 @@ static void cps_cpu_die(unsigned int cpu)
* in which case the CPC will refuse to power down the core.
*/
do {
+ mips_cm_lock_other(core, 0);
mips_cpc_lock_other(core);
stat = read_cpc_co_stat_conf();
stat &= CPC_Cx_STAT_CONF_SEQSTATE_MSK;
mips_cpc_unlock_other();
+ mips_cm_unlock_other();
} while (stat != CPC_Cx_STAT_CONF_SEQSTATE_D0 &&
stat != CPC_Cx_STAT_CONF_SEQSTATE_D2 &&
stat != CPC_Cx_STAT_CONF_SEQSTATE_U2);
@@ -517,6 +535,12 @@ static void cps_cpu_die(unsigned int cpu)
(void *)(unsigned long)cpu, 1);
if (err)
panic("Failed to call remote sibling CPU\n");
+ } else if (cpu_has_vp) {
+ do {
+ mips_cm_lock_other(core, vpe_id);
+ stat = read_cpc_co_vp_running();
+ mips_cm_unlock_other();
+ } while (stat & (1 << vpe_id));
}
}
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index dcf4a23ec..b0baf4895 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -72,7 +72,7 @@ EXPORT_SYMBOL(cpu_core_map);
* A logcal cpu mask containing only one VPE per core to
* reduce the number of IPIs on large MT systems.
*/
-cpumask_t cpu_foreign_map __read_mostly;
+cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_foreign_map);
/* representing cpus for which sibling maps can be computed */
@@ -124,7 +124,7 @@ static inline void set_cpu_core_map(int cpu)
* Calculate a new cpu_foreign_map mask whenever a
* new cpu appears or disappears.
*/
-static inline void calculate_cpu_foreign_map(void)
+void calculate_cpu_foreign_map(void)
{
int i, k, core_present;
cpumask_t temp_foreign_map;
@@ -141,7 +141,9 @@ static inline void calculate_cpu_foreign_map(void)
cpumask_set_cpu(i, &temp_foreign_map);
}
- cpumask_copy(&cpu_foreign_map, &temp_foreign_map);
+ for_each_online_cpu(i)
+ cpumask_andnot(&cpu_foreign_map[i],
+ &temp_foreign_map, &cpu_sibling_map[i]);
}
struct plat_smp_ops *mp_ops;
@@ -343,16 +345,9 @@ asmlinkage void start_secondary(void)
static void stop_this_cpu(void *dummy)
{
/*
- * Remove this CPU. Be a bit slow here and
- * set the bits for every online CPU so we don't miss
- * any IPI whilst taking this VPE down.
+ * Remove this CPU:
*/
- cpumask_copy(&cpu_foreign_map, cpu_online_mask);
-
- /* Make it visible to every other CPU */
- smp_mb();
-
set_cpu_online(smp_processor_id(), false);
calculate_cpu_foreign_map();
local_irq_disable();
@@ -511,10 +506,17 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l
smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
} else {
unsigned int cpu;
+ int exec = vma->vm_flags & VM_EXEC;
for_each_online_cpu(cpu) {
+ /*
+ * flush_cache_range() will only fully flush icache if
+ * the VMA is executable, otherwise we must invalidate
+ * ASID without it appearing to has_valid_asid() as if
+ * mm has been completely unused by that CPU.
+ */
if (cpu != smp_processor_id() && cpu_context(cpu, mm))
- cpu_context(cpu, mm) = 0;
+ cpu_context(cpu, mm) = !exec;
}
}
local_flush_tlb_range(vma, start, end);
@@ -559,8 +561,14 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
unsigned int cpu;
for_each_online_cpu(cpu) {
+ /*
+ * flush_cache_page() only does partial flushes, so
+ * invalidate ASID without it appearing to
+ * has_valid_asid() as if mm has been completely unused
+ * by that CPU.
+ */
if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
- cpu_context(cpu, vma->vm_mm) = 0;
+ cpu_context(cpu, vma->vm_mm) = 1;
}
}
local_flush_tlb_page(vma, page);
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 4a1712b5a..3de85be24 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -619,17 +619,17 @@ static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
1, regs, 0);
switch (rd) {
- case 0: /* CPU number */
+ case MIPS_HWR_CPUNUM: /* CPU number */
regs->regs[rt] = smp_processor_id();
return 0;
- case 1: /* SYNCI length */
+ case MIPS_HWR_SYNCISTEP: /* SYNCI length */
regs->regs[rt] = min(current_cpu_data.dcache.linesz,
current_cpu_data.icache.linesz);
return 0;
- case 2: /* Read count register */
+ case MIPS_HWR_CC: /* Read count register */
regs->regs[rt] = read_c0_count();
return 0;
- case 3: /* Count register resolution */
+ case MIPS_HWR_CCRES: /* Count register resolution */
switch (current_cpu_type()) {
case CPU_20KC:
case CPU_25KF:
@@ -639,7 +639,7 @@ static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
regs->regs[rt] = 2;
}
return 0;
- case 29:
+ case MIPS_HWR_ULR: /* Read UserLocal register */
regs->regs[rt] = ti->tp_value;
return 0;
default:
@@ -704,6 +704,7 @@ asmlinkage void do_ov(struct pt_regs *regs)
int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
{
struct siginfo si = { 0 };
+ struct vm_area_struct *vma;
switch (sig) {
case 0:
@@ -744,7 +745,8 @@ int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
si.si_addr = fault_addr;
si.si_signo = sig;
down_read(&current->mm->mmap_sem);
- if (find_vma(current->mm, (unsigned long)fault_addr))
+ vma = find_vma(current->mm, (unsigned long)fault_addr);
+ if (vma && (vma->vm_start <= (unsigned long)fault_addr))
si.si_code = SEGV_ACCERR;
else
si.si_code = SEGV_MAPERR;
@@ -1859,6 +1861,7 @@ void __noreturn nmi_exception_handler(struct pt_regs *regs)
#define VECTORSPACING 0x100 /* for EI/VI mode */
unsigned long ebase;
+EXPORT_SYMBOL_GPL(ebase);
unsigned long exception_handlers[32];
unsigned long vi_handlers[64];
@@ -2063,16 +2066,22 @@ static void configure_status(void)
status_set);
}
+unsigned int hwrena;
+EXPORT_SYMBOL_GPL(hwrena);
+
/* configure HWRENA register */
static void configure_hwrena(void)
{
- unsigned int hwrena = cpu_hwrena_impl_bits;
+ hwrena = cpu_hwrena_impl_bits;
if (cpu_has_mips_r2_r6)
- hwrena |= 0x0000000f;
+ hwrena |= MIPS_HWRENA_CPUNUM |
+ MIPS_HWRENA_SYNCISTEP |
+ MIPS_HWRENA_CC |
+ MIPS_HWRENA_CCRES;
if (!noulri && cpu_has_userlocal)
- hwrena |= (1 << 29);
+ hwrena |= MIPS_HWRENA_ULR;
if (hwrena)
write_c0_hwrena(hwrena);
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index 28b3af73a..f1c308dbb 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -1025,7 +1025,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
if (!access_ok(VERIFY_READ, addr, 2))
goto sigbus;
- if (config_enabled(CONFIG_EVA)) {
+ if (IS_ENABLED(CONFIG_EVA)) {
if (segment_eq(get_fs(), get_ds()))
LoadHW(addr, value, res);
else
@@ -1044,7 +1044,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
if (!access_ok(VERIFY_READ, addr, 4))
goto sigbus;
- if (config_enabled(CONFIG_EVA)) {
+ if (IS_ENABLED(CONFIG_EVA)) {
if (segment_eq(get_fs(), get_ds()))
LoadW(addr, value, res);
else
@@ -1063,7 +1063,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
if (!access_ok(VERIFY_READ, addr, 2))
goto sigbus;
- if (config_enabled(CONFIG_EVA)) {
+ if (IS_ENABLED(CONFIG_EVA)) {
if (segment_eq(get_fs(), get_ds()))
LoadHWU(addr, value, res);
else
@@ -1131,7 +1131,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
compute_return_epc(regs);
value = regs->regs[insn.i_format.rt];
- if (config_enabled(CONFIG_EVA)) {
+ if (IS_ENABLED(CONFIG_EVA)) {
if (segment_eq(get_fs(), get_ds()))
StoreHW(addr, value, res);
else
@@ -1151,7 +1151,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
compute_return_epc(regs);
value = regs->regs[insn.i_format.rt];
- if (config_enabled(CONFIG_EVA)) {
+ if (IS_ENABLED(CONFIG_EVA)) {
if (segment_eq(get_fs(), get_ds()))
StoreW(addr, value, res);
else
diff --git a/arch/mips/kernel/uprobes.c b/arch/mips/kernel/uprobes.c
index 8452d933a..4c7c15589 100644
--- a/arch/mips/kernel/uprobes.c
+++ b/arch/mips/kernel/uprobes.c
@@ -157,7 +157,6 @@ bool is_trap_insn(uprobe_opcode_t *insn)
int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs)
{
struct uprobe_task *utask = current->utask;
- union mips_instruction insn;
/*
* Now find the EPC where to resume after the breakpoint has been
@@ -168,10 +167,10 @@ int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs)
unsigned long epc;
epc = regs->cp0_epc;
- __compute_return_epc_for_insn(regs, insn);
+ __compute_return_epc_for_insn(regs,
+ (union mips_instruction) aup->insn[0]);
aup->resume_epc = regs->cp0_epc;
}
-
utask->autask.saved_trap_nr = current->thread.trap_nr;
current->thread.trap_nr = UPROBE_TRAP_NR;
regs->cp0_epc = current->utask->xol_vaddr;
@@ -222,7 +221,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self,
return NOTIFY_DONE;
switch (val) {
- case DIE_BREAK:
+ case DIE_UPROBE:
if (uprobe_pre_sstep_notifier(regs))
return NOTIFY_STOP;
break;
@@ -257,7 +256,7 @@ unsigned long arch_uretprobe_hijack_return_addr(
ra = regs->regs[31];
/* Replace the return address with the trampoline address */
- regs->regs[31] = ra;
+ regs->regs[31] = trampoline_vaddr;
return ra;
}
@@ -280,24 +279,6 @@ int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm,
return uprobe_write_opcode(mm, vaddr, UPROBE_SWBP_INSN);
}
-/**
- * set_orig_insn - Restore the original instruction.
- * @mm: the probed process address space.
- * @auprobe: arch specific probepoint information.
- * @vaddr: the virtual address to insert the opcode.
- *
- * For mm @mm, restore the original opcode (opcode) at @vaddr.
- * Return 0 (success) or a negative errno.
- *
- * This overrides the weak version in kernel/events/uprobes.c.
- */
-int set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
- unsigned long vaddr)
-{
- return uprobe_write_opcode(mm, vaddr,
- *(uprobe_opcode_t *)&auprobe->orig_inst[0].word);
-}
-
void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
void *src, unsigned long len)
{
diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
index 0b30c02a5..f9dbfb14a 100644
--- a/arch/mips/kernel/vdso.c
+++ b/arch/mips/kernel/vdso.c
@@ -107,6 +107,16 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
if (down_write_killable(&mm->mmap_sem))
return -EINTR;
+ /* Map delay slot emulation page */
+ base = mmap_region(NULL, STACK_TOP, PAGE_SIZE,
+ VM_READ|VM_WRITE|VM_EXEC|
+ VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
+ 0);
+ if (IS_ERR_VALUE(base)) {
+ ret = base;
+ goto out;
+ }
+
/*
* Determine total area size. This includes the VDSO data itself, the
* data page, and the GIC user page if present. Always create a mapping
diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig
index 2ae128255..7c56d6b12 100644
--- a/arch/mips/kvm/Kconfig
+++ b/arch/mips/kvm/Kconfig
@@ -17,6 +17,7 @@ if VIRTUALIZATION
config KVM
tristate "Kernel-based Virtual Machine (KVM) support"
depends on HAVE_KVM
+ select EXPORT_UASM
select PREEMPT_NOTIFIERS
select ANON_INODES
select KVM_MMIO
diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile
index 637ebbebd..847429de7 100644
--- a/arch/mips/kvm/Makefile
+++ b/arch/mips/kvm/Makefile
@@ -7,9 +7,10 @@ EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm
common-objs-$(CONFIG_CPU_HAS_MSA) += msa.o
-kvm-objs := $(common-objs-y) mips.o emulate.o locore.o \
+kvm-objs := $(common-objs-y) mips.o emulate.o entry.o \
interrupt.o stats.o commpage.o \
dyntrans.o trap_emul.o fpu.o
+kvm-objs += mmu.o
obj-$(CONFIG_KVM) += kvm.o
obj-y += callback.o tlb.o
diff --git a/arch/mips/kvm/commpage.c b/arch/mips/kvm/commpage.c
index 2d6e976d1..a36b77e17 100644
--- a/arch/mips/kvm/commpage.c
+++ b/arch/mips/kvm/commpage.c
@@ -4,7 +4,7 @@
* for more details.
*
* commpage, currently used for Virtual COP0 registers.
- * Mapped into the guest kernel @ 0x0.
+ * Mapped into the guest kernel @ KVM_GUEST_COMMPAGE_ADDR.
*
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
* Authors: Sanjay Lal <sanjayl@kymasys.com>
diff --git a/arch/mips/kvm/dyntrans.c b/arch/mips/kvm/dyntrans.c
index f1527a465..d28089491 100644
--- a/arch/mips/kvm/dyntrans.c
+++ b/arch/mips/kvm/dyntrans.c
@@ -11,6 +11,7 @@
#include <linux/errno.h>
#include <linux/err.h>
+#include <linux/highmem.h>
#include <linux/kvm_host.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
@@ -20,125 +21,114 @@
#include "commpage.h"
-#define SYNCI_TEMPLATE 0x041f0000
-#define SYNCI_BASE(x) (((x) >> 21) & 0x1f)
-#define SYNCI_OFFSET ((x) & 0xffff)
+/**
+ * kvm_mips_trans_replace() - Replace trapping instruction in guest memory.
+ * @vcpu: Virtual CPU.
+ * @opc: PC of instruction to replace.
+ * @replace: Instruction to write
+ */
+static int kvm_mips_trans_replace(struct kvm_vcpu *vcpu, u32 *opc,
+ union mips_instruction replace)
+{
+ unsigned long paddr, flags;
+ void *vaddr;
+
+ if (KVM_GUEST_KSEGX((unsigned long)opc) == KVM_GUEST_KSEG0) {
+ paddr = kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
+ (unsigned long)opc);
+ vaddr = kmap_atomic(pfn_to_page(PHYS_PFN(paddr)));
+ vaddr += paddr & ~PAGE_MASK;
+ memcpy(vaddr, (void *)&replace, sizeof(u32));
+ local_flush_icache_range((unsigned long)vaddr,
+ (unsigned long)vaddr + 32);
+ kunmap_atomic(vaddr);
+ } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
+ local_irq_save(flags);
+ memcpy((void *)opc, (void *)&replace, sizeof(u32));
+ local_flush_icache_range((unsigned long)opc,
+ (unsigned long)opc + 32);
+ local_irq_restore(flags);
+ } else {
+ kvm_err("%s: Invalid address: %p\n", __func__, opc);
+ return -EFAULT;
+ }
-#define LW_TEMPLATE 0x8c000000
-#define CLEAR_TEMPLATE 0x00000020
-#define SW_TEMPLATE 0xac000000
+ return 0;
+}
-int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
+int kvm_mips_trans_cache_index(union mips_instruction inst, u32 *opc,
struct kvm_vcpu *vcpu)
{
- int result = 0;
- unsigned long kseg0_opc;
- uint32_t synci_inst = 0x0;
+ union mips_instruction nop_inst = { 0 };
/* Replace the CACHE instruction, with a NOP */
- kseg0_opc =
- CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
- (vcpu, (unsigned long) opc));
- memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
- local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
-
- return result;
+ return kvm_mips_trans_replace(vcpu, opc, nop_inst);
}
/*
* Address based CACHE instructions are transformed into synci(s). A little
* heavy for just D-cache invalidates, but avoids an expensive trap
*/
-int kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
+int kvm_mips_trans_cache_va(union mips_instruction inst, u32 *opc,
struct kvm_vcpu *vcpu)
{
- int result = 0;
- unsigned long kseg0_opc;
- uint32_t synci_inst = SYNCI_TEMPLATE, base, offset;
-
- base = (inst >> 21) & 0x1f;
- offset = inst & 0xffff;
- synci_inst |= (base << 21);
- synci_inst |= offset;
-
- kseg0_opc =
- CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
- (vcpu, (unsigned long) opc));
- memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
- local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
-
- return result;
+ union mips_instruction synci_inst = { 0 };
+
+ synci_inst.i_format.opcode = bcond_op;
+ synci_inst.i_format.rs = inst.i_format.rs;
+ synci_inst.i_format.rt = synci_op;
+ if (cpu_has_mips_r6)
+ synci_inst.i_format.simmediate = inst.spec3_format.simmediate;
+ else
+ synci_inst.i_format.simmediate = inst.i_format.simmediate;
+
+ return kvm_mips_trans_replace(vcpu, opc, synci_inst);
}
-int kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
+int kvm_mips_trans_mfc0(union mips_instruction inst, u32 *opc,
+ struct kvm_vcpu *vcpu)
{
- int32_t rt, rd, sel;
- uint32_t mfc0_inst;
- unsigned long kseg0_opc, flags;
-
- rt = (inst >> 16) & 0x1f;
- rd = (inst >> 11) & 0x1f;
- sel = inst & 0x7;
+ union mips_instruction mfc0_inst = { 0 };
+ u32 rd, sel;
- if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
- mfc0_inst = CLEAR_TEMPLATE;
- mfc0_inst |= ((rt & 0x1f) << 16);
- } else {
- mfc0_inst = LW_TEMPLATE;
- mfc0_inst |= ((rt & 0x1f) << 16);
- mfc0_inst |= offsetof(struct kvm_mips_commpage,
- cop0.reg[rd][sel]);
- }
+ rd = inst.c0r_format.rd;
+ sel = inst.c0r_format.sel;
- if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
- kseg0_opc =
- CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
- (vcpu, (unsigned long) opc));
- memcpy((void *)kseg0_opc, (void *)&mfc0_inst, sizeof(uint32_t));
- local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
- } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
- local_irq_save(flags);
- memcpy((void *)opc, (void *)&mfc0_inst, sizeof(uint32_t));
- local_flush_icache_range((unsigned long)opc,
- (unsigned long)opc + 32);
- local_irq_restore(flags);
+ if (rd == MIPS_CP0_ERRCTL && sel == 0) {
+ mfc0_inst.r_format.opcode = spec_op;
+ mfc0_inst.r_format.rd = inst.c0r_format.rt;
+ mfc0_inst.r_format.func = add_op;
} else {
- kvm_err("%s: Invalid address: %p\n", __func__, opc);
- return -EFAULT;
+ mfc0_inst.i_format.opcode = lw_op;
+ mfc0_inst.i_format.rt = inst.c0r_format.rt;
+ mfc0_inst.i_format.simmediate = KVM_GUEST_COMMPAGE_ADDR |
+ offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]);
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ if (sizeof(vcpu->arch.cop0->reg[0][0]) == 8)
+ mfc0_inst.i_format.simmediate |= 4;
+#endif
}
- return 0;
+ return kvm_mips_trans_replace(vcpu, opc, mfc0_inst);
}
-int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
+int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc,
+ struct kvm_vcpu *vcpu)
{
- int32_t rt, rd, sel;
- uint32_t mtc0_inst = SW_TEMPLATE;
- unsigned long kseg0_opc, flags;
-
- rt = (inst >> 16) & 0x1f;
- rd = (inst >> 11) & 0x1f;
- sel = inst & 0x7;
-
- mtc0_inst |= ((rt & 0x1f) << 16);
- mtc0_inst |= offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]);
-
- if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
- kseg0_opc =
- CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
- (vcpu, (unsigned long) opc));
- memcpy((void *)kseg0_opc, (void *)&mtc0_inst, sizeof(uint32_t));
- local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
- } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
- local_irq_save(flags);
- memcpy((void *)opc, (void *)&mtc0_inst, sizeof(uint32_t));
- local_flush_icache_range((unsigned long)opc,
- (unsigned long)opc + 32);
- local_irq_restore(flags);
- } else {
- kvm_err("%s: Invalid address: %p\n", __func__, opc);
- return -EFAULT;
- }
-
- return 0;
+ union mips_instruction mtc0_inst = { 0 };
+ u32 rd, sel;
+
+ rd = inst.c0r_format.rd;
+ sel = inst.c0r_format.sel;
+
+ mtc0_inst.i_format.opcode = sw_op;
+ mtc0_inst.i_format.rt = inst.c0r_format.rt;
+ mtc0_inst.i_format.simmediate = KVM_GUEST_COMMPAGE_ADDR |
+ offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]);
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ if (sizeof(vcpu->arch.cop0->reg[0][0]) == 8)
+ mtc0_inst.i_format.simmediate |= 4;
+#endif
+
+ return kvm_mips_trans_replace(vcpu, opc, mtc0_inst);
}
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index 2b42a74ed..43853ec6e 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -52,7 +52,7 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
goto unaligned;
/* Read the instruction */
- insn.word = kvm_get_inst((uint32_t *) epc, vcpu);
+ insn.word = kvm_get_inst((u32 *) epc, vcpu);
if (insn.word == KVM_INVALID_INST)
return KVM_INVALID_INST;
@@ -161,9 +161,12 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
nextpc = epc;
break;
- case blez_op: /* not really i_format */
- case blezl_op:
- /* rt field assumed to be zero */
+ case blez_op: /* POP06 */
+#ifndef CONFIG_CPU_MIPSR6
+ case blezl_op: /* removed in R6 */
+#endif
+ if (insn.i_format.rt != 0)
+ goto compact_branch;
if ((long)arch->gprs[insn.i_format.rs] <= 0)
epc = epc + 4 + (insn.i_format.simmediate << 2);
else
@@ -171,9 +174,12 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
nextpc = epc;
break;
- case bgtz_op:
- case bgtzl_op:
- /* rt field assumed to be zero */
+ case bgtz_op: /* POP07 */
+#ifndef CONFIG_CPU_MIPSR6
+ case bgtzl_op: /* removed in R6 */
+#endif
+ if (insn.i_format.rt != 0)
+ goto compact_branch;
if ((long)arch->gprs[insn.i_format.rs] > 0)
epc = epc + 4 + (insn.i_format.simmediate << 2);
else
@@ -185,6 +191,40 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
case cop1_op:
kvm_err("%s: unsupported cop1_op\n", __func__);
break;
+
+#ifdef CONFIG_CPU_MIPSR6
+ /* R6 added the following compact branches with forbidden slots */
+ case blezl_op: /* POP26 */
+ case bgtzl_op: /* POP27 */
+ /* only rt == 0 isn't compact branch */
+ if (insn.i_format.rt != 0)
+ goto compact_branch;
+ break;
+ case pop10_op:
+ case pop30_op:
+ /* only rs == rt == 0 is reserved, rest are compact branches */
+ if (insn.i_format.rs != 0 || insn.i_format.rt != 0)
+ goto compact_branch;
+ break;
+ case pop66_op:
+ case pop76_op:
+ /* only rs == 0 isn't compact branch */
+ if (insn.i_format.rs != 0)
+ goto compact_branch;
+ break;
+compact_branch:
+ /*
+ * If we've hit an exception on the forbidden slot, then
+ * the branch must not have been taken.
+ */
+ epc += 8;
+ nextpc = epc;
+ break;
+#else
+compact_branch:
+ /* Compact branches not supported before R6 */
+ break;
+#endif
}
return nextpc;
@@ -198,7 +238,7 @@ sigill:
return nextpc;
}
-enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause)
+enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause)
{
unsigned long branch_pc;
enum emulation_result er = EMULATE_DONE;
@@ -243,7 +283,7 @@ static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
*
* Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
*/
-static uint32_t kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
+static u32 kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
{
s64 now_ns, periods;
u64 delta;
@@ -300,11 +340,11 @@ static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
*
* Returns: The current value of the guest CP0_Count register.
*/
-static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
+static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
ktime_t expires, threshold;
- uint32_t count, compare;
+ u32 count, compare;
int running;
/* Calculate the biased and scaled guest CP0_Count */
@@ -315,7 +355,7 @@ static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
* Find whether CP0_Count has reached the closest timer interrupt. If
* not, we shouldn't inject it.
*/
- if ((int32_t)(count - compare) < 0)
+ if ((s32)(count - compare) < 0)
return count;
/*
@@ -360,7 +400,7 @@ static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
*
* Returns: The current guest CP0_Count value.
*/
-uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu)
+u32 kvm_mips_read_count(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
@@ -387,8 +427,7 @@ uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu)
*
* Returns: The ktime at the point of freeze.
*/
-static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu,
- uint32_t *count)
+static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count)
{
ktime_t now;
@@ -419,16 +458,16 @@ static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu,
* Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
*/
static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
- ktime_t now, uint32_t count)
+ ktime_t now, u32 count)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
- uint32_t compare;
+ u32 compare;
u64 delta;
ktime_t expire;
/* Calculate timeout (wrap 0 to 2^32) */
compare = kvm_read_c0_guest_compare(cop0);
- delta = (u64)(uint32_t)(compare - count - 1) + 1;
+ delta = (u64)(u32)(compare - count - 1) + 1;
delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
expire = ktime_add_ns(now, delta);
@@ -444,7 +483,7 @@ static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
*
* Sets the CP0_Count value and updates the timer accordingly.
*/
-void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count)
+void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
ktime_t now;
@@ -538,13 +577,13 @@ int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
* If @ack, atomically acknowledge any pending timer interrupt, otherwise ensure
* any pending timer interrupt is preserved.
*/
-void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare, bool ack)
+void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
int dc;
u32 old_compare = kvm_read_c0_guest_compare(cop0);
ktime_t now;
- uint32_t count;
+ u32 count;
/* if unchanged, must just be an ack */
if (old_compare == compare) {
@@ -585,7 +624,7 @@ void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare, bool ack)
static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
- uint32_t count;
+ u32 count;
ktime_t now;
/* Stop hrtimer */
@@ -632,7 +671,7 @@ void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
- uint32_t count;
+ u32 count;
kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
@@ -661,7 +700,7 @@ int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
s64 changed = count_ctl ^ vcpu->arch.count_ctl;
s64 delta;
ktime_t expire, now;
- uint32_t count, compare;
+ u32 count, compare;
/* Only allow defined bits to be changed */
if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC))
@@ -687,7 +726,7 @@ int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
*/
count = kvm_read_c0_guest_count(cop0);
compare = kvm_read_c0_guest_compare(cop0);
- delta = (u64)(uint32_t)(compare - count - 1) + 1;
+ delta = (u64)(u32)(compare - count - 1) + 1;
delta = div_u64(delta * NSEC_PER_SEC,
vcpu->arch.count_hz);
expire = ktime_add_ns(vcpu->arch.count_resume, delta);
@@ -776,7 +815,7 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
vcpu->arch.pending_exceptions);
++vcpu->stat.wait_exits;
- trace_kvm_exit(vcpu, WAIT_EXITS);
+ trace_kvm_exit(vcpu, KVM_TRACE_EXIT_WAIT);
if (!vcpu->arch.pending_exceptions) {
vcpu->arch.wait = 1;
kvm_vcpu_block(vcpu);
@@ -801,23 +840,64 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
- uint32_t pc = vcpu->arch.pc;
+ unsigned long pc = vcpu->arch.pc;
- kvm_err("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
+ kvm_err("[%#lx] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
return EMULATE_FAIL;
}
+/**
+ * kvm_mips_invalidate_guest_tlb() - Indicates a change in guest MMU map.
+ * @vcpu: VCPU with changed mappings.
+ * @tlb: TLB entry being removed.
+ *
+ * This is called to indicate a single change in guest MMU mappings, so that we
+ * can arrange TLB flushes on this and other CPUs.
+ */
+static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu,
+ struct kvm_mips_tlb *tlb)
+{
+ int cpu, i;
+ bool user;
+
+ /* No need to flush for entries which are already invalid */
+ if (!((tlb->tlb_lo[0] | tlb->tlb_lo[1]) & ENTRYLO_V))
+ return;
+ /* User address space doesn't need flushing for KSeg2/3 changes */
+ user = tlb->tlb_hi < KVM_GUEST_KSEG0;
+
+ preempt_disable();
+
+ /*
+ * Probe the shadow host TLB for the entry being overwritten, if one
+ * matches, invalidate it
+ */
+ kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
+
+ /* Invalidate the whole ASID on other CPUs */
+ cpu = smp_processor_id();
+ for_each_possible_cpu(i) {
+ if (i == cpu)
+ continue;
+ if (user)
+ vcpu->arch.guest_user_asid[i] = 0;
+ vcpu->arch.guest_kernel_asid[i] = 0;
+ }
+
+ preempt_enable();
+}
+
/* Write Guest TLB Entry @ Index */
enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
int index = kvm_read_c0_guest_index(cop0);
struct kvm_mips_tlb *tlb = NULL;
- uint32_t pc = vcpu->arch.pc;
+ unsigned long pc = vcpu->arch.pc;
if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
kvm_debug("%s: illegal index: %d\n", __func__, index);
- kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
+ kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
pc, index, kvm_read_c0_guest_entryhi(cop0),
kvm_read_c0_guest_entrylo0(cop0),
kvm_read_c0_guest_entrylo1(cop0),
@@ -826,18 +906,15 @@ enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
}
tlb = &vcpu->arch.guest_tlb[index];
- /*
- * Probe the shadow host TLB for the entry being overwritten, if one
- * matches, invalidate it
- */
- kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
+
+ kvm_mips_invalidate_guest_tlb(vcpu, tlb);
tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
- tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
- tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
+ tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0);
+ tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0);
- kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
+ kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
pc, index, kvm_read_c0_guest_entryhi(cop0),
kvm_read_c0_guest_entrylo0(cop0),
kvm_read_c0_guest_entrylo1(cop0),
@@ -851,7 +928,7 @@ enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct kvm_mips_tlb *tlb = NULL;
- uint32_t pc = vcpu->arch.pc;
+ unsigned long pc = vcpu->arch.pc;
int index;
get_random_bytes(&index, sizeof(index));
@@ -859,18 +936,14 @@ enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
tlb = &vcpu->arch.guest_tlb[index];
- /*
- * Probe the shadow host TLB for the entry being overwritten, if one
- * matches, invalidate it
- */
- kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
+ kvm_mips_invalidate_guest_tlb(vcpu, tlb);
tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
- tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
- tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
+ tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0);
+ tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0);
- kvm_debug("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
+ kvm_debug("[%#lx] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
pc, index, kvm_read_c0_guest_entryhi(cop0),
kvm_read_c0_guest_entrylo0(cop0),
kvm_read_c0_guest_entrylo1(cop0));
@@ -882,14 +955,14 @@ enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
long entryhi = kvm_read_c0_guest_entryhi(cop0);
- uint32_t pc = vcpu->arch.pc;
+ unsigned long pc = vcpu->arch.pc;
int index = -1;
index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
kvm_write_c0_guest_index(cop0, index);
- kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
+ kvm_debug("[%#lx] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
index);
return EMULATE_DONE;
@@ -922,8 +995,8 @@ unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu)
*/
unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu)
{
- /* Config4 is optional */
- unsigned int mask = MIPS_CONF_M;
+ /* Config4 and ULRI are optional */
+ unsigned int mask = MIPS_CONF_M | MIPS_CONF3_ULRI;
/* Permit MSA to be present if MSA is supported */
if (kvm_mips_guest_can_have_msa(&vcpu->arch))
@@ -942,7 +1015,12 @@ unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu)
unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu)
{
/* Config5 is optional */
- return MIPS_CONF_M;
+ unsigned int mask = MIPS_CONF_M;
+
+ /* KScrExist */
+ mask |= (unsigned int)vcpu->arch.kscratch_enabled << 16;
+
+ return mask;
}
/**
@@ -973,15 +1051,16 @@ unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu)
return mask;
}
-enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
- uint32_t cause, struct kvm_run *run,
+enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
+ u32 *opc, u32 cause,
+ struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
enum emulation_result er = EMULATE_DONE;
- int32_t rt, rd, copz, sel, co_bit, op;
- uint32_t pc = vcpu->arch.pc;
+ u32 rt, rd, sel;
unsigned long curr_pc;
+ int cpu, i;
/*
* Update PC and hold onto current PC in case there is
@@ -992,16 +1071,8 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
if (er == EMULATE_FAIL)
return er;
- copz = (inst >> 21) & 0x1f;
- rt = (inst >> 16) & 0x1f;
- rd = (inst >> 11) & 0x1f;
- sel = inst & 0x7;
- co_bit = (inst >> 25) & 1;
-
- if (co_bit) {
- op = (inst) & 0xff;
-
- switch (op) {
+ if (inst.co_format.co) {
+ switch (inst.co_format.func) {
case tlbr_op: /* Read indexed TLB entry */
er = kvm_mips_emul_tlbr(vcpu);
break;
@@ -1020,47 +1091,58 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
case eret_op:
er = kvm_mips_emul_eret(vcpu);
goto dont_update_pc;
- break;
case wait_op:
er = kvm_mips_emul_wait(vcpu);
break;
}
} else {
- switch (copz) {
+ rt = inst.c0r_format.rt;
+ rd = inst.c0r_format.rd;
+ sel = inst.c0r_format.sel;
+
+ switch (inst.c0r_format.rs) {
case mfc_op:
#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
cop0->stat[rd][sel]++;
#endif
/* Get reg */
if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
- vcpu->arch.gprs[rt] = kvm_mips_read_count(vcpu);
+ vcpu->arch.gprs[rt] =
+ (s32)kvm_mips_read_count(vcpu);
} else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
vcpu->arch.gprs[rt] = 0x0;
#ifdef CONFIG_KVM_MIPS_DYN_TRANS
kvm_mips_trans_mfc0(inst, opc, vcpu);
#endif
} else {
- vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
+ vcpu->arch.gprs[rt] = (s32)cop0->reg[rd][sel];
#ifdef CONFIG_KVM_MIPS_DYN_TRANS
kvm_mips_trans_mfc0(inst, opc, vcpu);
#endif
}
- kvm_debug
- ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n",
- pc, rd, sel, rt, vcpu->arch.gprs[rt]);
-
+ trace_kvm_hwr(vcpu, KVM_TRACE_MFC0,
+ KVM_TRACE_COP0(rd, sel),
+ vcpu->arch.gprs[rt]);
break;
case dmfc_op:
vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
+
+ trace_kvm_hwr(vcpu, KVM_TRACE_DMFC0,
+ KVM_TRACE_COP0(rd, sel),
+ vcpu->arch.gprs[rt]);
break;
case mtc_op:
#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
cop0->stat[rd][sel]++;
#endif
+ trace_kvm_hwr(vcpu, KVM_TRACE_MTC0,
+ KVM_TRACE_COP0(rd, sel),
+ vcpu->arch.gprs[rt]);
+
if ((rd == MIPS_CP0_TLB_INDEX)
&& (vcpu->arch.gprs[rt] >=
KVM_MIPS_GUEST_TLB_SIZE)) {
@@ -1078,19 +1160,26 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
kvm_err("MTCz, cop0->reg[EBASE]: %#lx\n",
kvm_read_c0_guest_ebase(cop0));
} else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
- uint32_t nasid =
+ u32 nasid =
vcpu->arch.gprs[rt] & KVM_ENTRYHI_ASID;
if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) &&
((kvm_read_c0_guest_entryhi(cop0) &
KVM_ENTRYHI_ASID) != nasid)) {
- kvm_debug("MTCz, change ASID from %#lx to %#lx\n",
+ trace_kvm_asid_change(vcpu,
kvm_read_c0_guest_entryhi(cop0)
- & KVM_ENTRYHI_ASID,
- vcpu->arch.gprs[rt]
- & KVM_ENTRYHI_ASID);
+ & KVM_ENTRYHI_ASID,
+ nasid);
+ preempt_disable();
/* Blow away the shadow host TLBs */
kvm_mips_flush_host_tlb(1);
+ cpu = smp_processor_id();
+ for_each_possible_cpu(i)
+ if (i != cpu) {
+ vcpu->arch.guest_user_asid[i] = 0;
+ vcpu->arch.guest_kernel_asid[i] = 0;
+ }
+ preempt_enable();
}
kvm_write_c0_guest_entryhi(cop0,
vcpu->arch.gprs[rt]);
@@ -1100,10 +1189,6 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
goto done;
} else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
- kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n",
- pc, kvm_read_c0_guest_compare(cop0),
- vcpu->arch.gprs[rt]);
-
/* If we are writing to COMPARE */
/* Clear pending timer interrupt, if any */
kvm_mips_write_compare(vcpu,
@@ -1155,7 +1240,7 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
* it first.
*/
if (change & ST0_CU1 && !(val & ST0_FR) &&
- vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
+ vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
kvm_lose_fpu(vcpu);
/*
@@ -1166,7 +1251,7 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
* the near future.
*/
if (change & ST0_CU1 &&
- vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
+ vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
change_c0_status(ST0_CU1, val);
preempt_enable();
@@ -1201,7 +1286,7 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
* context is already loaded.
*/
if (change & MIPS_CONF5_FRE &&
- vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
+ vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
change_c0_config5(MIPS_CONF5_FRE, val);
/*
@@ -1211,7 +1296,7 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
* quickly enabled again in the near future.
*/
if (change & MIPS_CONF5_MSAEN &&
- vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
+ vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
change_c0_config5(MIPS_CONF5_MSAEN,
val);
@@ -1219,7 +1304,7 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
kvm_write_c0_guest_config5(cop0, val);
} else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
- uint32_t old_cause, new_cause;
+ u32 old_cause, new_cause;
old_cause = kvm_read_c0_guest_cause(cop0);
new_cause = vcpu->arch.gprs[rt];
@@ -1233,20 +1318,30 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
else
kvm_mips_count_enable_cause(vcpu);
}
+ } else if ((rd == MIPS_CP0_HWRENA) && (sel == 0)) {
+ u32 mask = MIPS_HWRENA_CPUNUM |
+ MIPS_HWRENA_SYNCISTEP |
+ MIPS_HWRENA_CC |
+ MIPS_HWRENA_CCRES;
+
+ if (kvm_read_c0_guest_config3(cop0) &
+ MIPS_CONF3_ULRI)
+ mask |= MIPS_HWRENA_ULR;
+ cop0->reg[rd][sel] = vcpu->arch.gprs[rt] & mask;
} else {
cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
#ifdef CONFIG_KVM_MIPS_DYN_TRANS
kvm_mips_trans_mtc0(inst, opc, vcpu);
#endif
}
-
- kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc,
- rd, sel, cop0->reg[rd][sel]);
break;
case dmtc_op:
kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
vcpu->arch.pc, rt, rd, sel);
+ trace_kvm_hwr(vcpu, KVM_TRACE_DMTC0,
+ KVM_TRACE_COP0(rd, sel),
+ vcpu->arch.gprs[rt]);
er = EMULATE_FAIL;
break;
@@ -1258,7 +1353,7 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
vcpu->arch.gprs[rt] =
kvm_read_c0_guest_status(cop0);
/* EI */
- if (inst & 0x20) {
+ if (inst.mfmc0_format.sc) {
kvm_debug("[%#lx] mfmc0_op: EI\n",
vcpu->arch.pc);
kvm_set_c0_guest_status(cop0, ST0_IE);
@@ -1272,9 +1367,8 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
case wrpgpr_op:
{
- uint32_t css =
- cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
- uint32_t pss =
+ u32 css = cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
+ u32 pss =
(cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
/*
* We don't support any shadow register sets, so
@@ -1291,7 +1385,7 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
break;
default:
kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
- vcpu->arch.pc, copz);
+ vcpu->arch.pc, inst.c0r_format.rs);
er = EMULATE_FAIL;
break;
}
@@ -1312,13 +1406,14 @@ dont_update_pc:
return er;
}
-enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
+enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
+ u32 cause,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
enum emulation_result er = EMULATE_DO_MMIO;
- int32_t op, base, rt, offset;
- uint32_t bytes;
+ u32 rt;
+ u32 bytes;
void *data = run->mmio.data;
unsigned long curr_pc;
@@ -1331,12 +1426,9 @@ enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
if (er == EMULATE_FAIL)
return er;
- rt = (inst >> 16) & 0x1f;
- base = (inst >> 21) & 0x1f;
- offset = inst & 0xffff;
- op = (inst >> 26) & 0x3f;
+ rt = inst.i_format.rt;
- switch (op) {
+ switch (inst.i_format.opcode) {
case sb_op:
bytes = 1;
if (bytes > sizeof(run->mmio.data)) {
@@ -1357,7 +1449,7 @@ enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
*(u8 *) data = vcpu->arch.gprs[rt];
kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
- *(uint8_t *) data);
+ *(u8 *) data);
break;
@@ -1379,11 +1471,11 @@ enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
run->mmio.is_write = 1;
vcpu->mmio_needed = 1;
vcpu->mmio_is_write = 1;
- *(uint32_t *) data = vcpu->arch.gprs[rt];
+ *(u32 *) data = vcpu->arch.gprs[rt];
kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
- vcpu->arch.gprs[rt], *(uint32_t *) data);
+ vcpu->arch.gprs[rt], *(u32 *) data);
break;
case sh_op:
@@ -1404,15 +1496,16 @@ enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
run->mmio.is_write = 1;
vcpu->mmio_needed = 1;
vcpu->mmio_is_write = 1;
- *(uint16_t *) data = vcpu->arch.gprs[rt];
+ *(u16 *) data = vcpu->arch.gprs[rt];
kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
- vcpu->arch.gprs[rt], *(uint32_t *) data);
+ vcpu->arch.gprs[rt], *(u32 *) data);
break;
default:
- kvm_err("Store not yet supported");
+ kvm_err("Store not yet supported (inst=0x%08x)\n",
+ inst.word);
er = EMULATE_FAIL;
break;
}
@@ -1424,18 +1517,16 @@ enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
return er;
}
-enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
- struct kvm_run *run,
+enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
+ u32 cause, struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
enum emulation_result er = EMULATE_DO_MMIO;
- int32_t op, base, rt, offset;
- uint32_t bytes;
+ u32 op, rt;
+ u32 bytes;
- rt = (inst >> 16) & 0x1f;
- base = (inst >> 21) & 0x1f;
- offset = inst & 0xffff;
- op = (inst >> 26) & 0x3f;
+ rt = inst.i_format.rt;
+ op = inst.i_format.opcode;
vcpu->arch.pending_load_cause = cause;
vcpu->arch.io_gpr = rt;
@@ -1521,7 +1612,8 @@ enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
break;
default:
- kvm_err("Load not yet supported");
+ kvm_err("Load not yet supported (inst=0x%08x)\n",
+ inst.word);
er = EMULATE_FAIL;
break;
}
@@ -1529,40 +1621,15 @@ enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
return er;
}
-int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
-{
- unsigned long offset = (va & ~PAGE_MASK);
- struct kvm *kvm = vcpu->kvm;
- unsigned long pa;
- gfn_t gfn;
- kvm_pfn_t pfn;
-
- gfn = va >> PAGE_SHIFT;
-
- if (gfn >= kvm->arch.guest_pmap_npages) {
- kvm_err("%s: Invalid gfn: %#llx\n", __func__, gfn);
- kvm_mips_dump_host_tlbs();
- kvm_arch_vcpu_dump_regs(vcpu);
- return -1;
- }
- pfn = kvm->arch.guest_pmap[gfn];
- pa = (pfn << PAGE_SHIFT) | offset;
-
- kvm_debug("%s: va: %#lx, unmapped: %#x\n", __func__, va,
- CKSEG0ADDR(pa));
-
- local_flush_icache_range(CKSEG0ADDR(pa), 32);
- return 0;
-}
-
-enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
- uint32_t cause,
+enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
+ u32 *opc, u32 cause,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
enum emulation_result er = EMULATE_DONE;
- int32_t offset, cache, op_inst, op, base;
+ u32 cache, op_inst, op, base;
+ s16 offset;
struct kvm_vcpu_arch *arch = &vcpu->arch;
unsigned long va;
unsigned long curr_pc;
@@ -1576,9 +1643,12 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
if (er == EMULATE_FAIL)
return er;
- base = (inst >> 21) & 0x1f;
- op_inst = (inst >> 16) & 0x1f;
- offset = (int16_t)inst;
+ base = inst.i_format.rs;
+ op_inst = inst.i_format.rt;
+ if (cpu_has_mips_r6)
+ offset = inst.spec3_format.simmediate;
+ else
+ offset = inst.i_format.simmediate;
cache = op_inst & CacheOp_Cache;
op = op_inst & CacheOp_Op;
@@ -1640,7 +1710,6 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
(cop0) & KVM_ENTRYHI_ASID));
if (index < 0) {
- vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
vcpu->arch.host_cp0_badvaddr = va;
vcpu->arch.pc = curr_pc;
er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
@@ -1665,8 +1734,7 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
* We fault an entry from the guest tlb to the
* shadow host TLB
*/
- if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
- NULL, NULL)) {
+ if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb)) {
kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
__func__, va, index, vcpu,
read_c0_entryhi());
@@ -1725,20 +1793,20 @@ dont_update_pc:
return er;
}
-enum emulation_result kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
+enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
+ union mips_instruction inst;
enum emulation_result er = EMULATE_DONE;
- uint32_t inst;
/* Fetch the instruction. */
if (cause & CAUSEF_BD)
opc += 1;
- inst = kvm_get_inst(opc, vcpu);
+ inst.word = kvm_get_inst(opc, vcpu);
- switch (((union mips_instruction)inst).r_format.opcode) {
+ switch (inst.r_format.opcode) {
case cop0_op:
er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
break;
@@ -1755,15 +1823,31 @@ enum emulation_result kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
er = kvm_mips_emulate_load(inst, cause, run, vcpu);
break;
+#ifndef CONFIG_CPU_MIPSR6
case cache_op:
++vcpu->stat.cache_exits;
- trace_kvm_exit(vcpu, CACHE_EXITS);
+ trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
break;
+#else
+ case spec3_op:
+ switch (inst.spec3_format.func) {
+ case cache6_op:
+ ++vcpu->stat.cache_exits;
+ trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
+ er = kvm_mips_emulate_cache(inst, opc, cause, run,
+ vcpu);
+ break;
+ default:
+ goto unknown;
+ };
+ break;
+unknown:
+#endif
default:
kvm_err("Instruction emulation not supported (%p/%#x)\n", opc,
- inst);
+ inst.word);
kvm_arch_vcpu_dump_regs(vcpu);
er = EMULATE_FAIL;
break;
@@ -1772,8 +1856,8 @@ enum emulation_result kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
return er;
}
-enum emulation_result kvm_mips_emulate_syscall(unsigned long cause,
- uint32_t *opc,
+enum emulation_result kvm_mips_emulate_syscall(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
@@ -1807,8 +1891,8 @@ enum emulation_result kvm_mips_emulate_syscall(unsigned long cause,
return er;
}
-enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause,
- uint32_t *opc,
+enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
@@ -1853,8 +1937,8 @@ enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause,
return EMULATE_DONE;
}
-enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause,
- uint32_t *opc,
+enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
@@ -1899,8 +1983,8 @@ enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause,
return EMULATE_DONE;
}
-enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause,
- uint32_t *opc,
+enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
@@ -1943,8 +2027,8 @@ enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause,
return EMULATE_DONE;
}
-enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause,
- uint32_t *opc,
+enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
@@ -1988,7 +2072,7 @@ enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause,
}
/* TLBMOD: store into address matching TLB with Dirty bit off */
-enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
+enum emulation_result kvm_mips_handle_tlbmod(u32 cause, u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
@@ -2016,8 +2100,8 @@ enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
return er;
}
-enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause,
- uint32_t *opc,
+enum emulation_result kvm_mips_emulate_tlbmod(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
@@ -2059,8 +2143,8 @@ enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause,
return EMULATE_DONE;
}
-enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause,
- uint32_t *opc,
+enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
@@ -2088,8 +2172,8 @@ enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause,
return EMULATE_DONE;
}
-enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause,
- uint32_t *opc,
+enum emulation_result kvm_mips_emulate_ri_exc(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
@@ -2123,8 +2207,8 @@ enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause,
return er;
}
-enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
- uint32_t *opc,
+enum emulation_result kvm_mips_emulate_bp_exc(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
@@ -2158,8 +2242,8 @@ enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
return er;
}
-enum emulation_result kvm_mips_emulate_trap_exc(unsigned long cause,
- uint32_t *opc,
+enum emulation_result kvm_mips_emulate_trap_exc(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
@@ -2193,8 +2277,8 @@ enum emulation_result kvm_mips_emulate_trap_exc(unsigned long cause,
return er;
}
-enum emulation_result kvm_mips_emulate_msafpe_exc(unsigned long cause,
- uint32_t *opc,
+enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
@@ -2228,8 +2312,8 @@ enum emulation_result kvm_mips_emulate_msafpe_exc(unsigned long cause,
return er;
}
-enum emulation_result kvm_mips_emulate_fpe_exc(unsigned long cause,
- uint32_t *opc,
+enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
@@ -2263,8 +2347,8 @@ enum emulation_result kvm_mips_emulate_fpe_exc(unsigned long cause,
return er;
}
-enum emulation_result kvm_mips_emulate_msadis_exc(unsigned long cause,
- uint32_t *opc,
+enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
@@ -2298,22 +2382,7 @@ enum emulation_result kvm_mips_emulate_msadis_exc(unsigned long cause,
return er;
}
-/* ll/sc, rdhwr, sync emulation */
-
-#define OPCODE 0xfc000000
-#define BASE 0x03e00000
-#define RT 0x001f0000
-#define OFFSET 0x0000ffff
-#define LL 0xc0000000
-#define SC 0xe0000000
-#define SPEC0 0x00000000
-#define SPEC3 0x7c000000
-#define RD 0x0000f800
-#define FUNC 0x0000003f
-#define SYNC 0x0000000f
-#define RDHWR 0x0000003b
-
-enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
+enum emulation_result kvm_mips_handle_ri(u32 cause, u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
@@ -2321,7 +2390,7 @@ enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
struct kvm_vcpu_arch *arch = &vcpu->arch;
enum emulation_result er = EMULATE_DONE;
unsigned long curr_pc;
- uint32_t inst;
+ union mips_instruction inst;
/*
* Update PC and hold onto current PC in case there is
@@ -2336,17 +2405,22 @@ enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
if (cause & CAUSEF_BD)
opc += 1;
- inst = kvm_get_inst(opc, vcpu);
+ inst.word = kvm_get_inst(opc, vcpu);
- if (inst == KVM_INVALID_INST) {
+ if (inst.word == KVM_INVALID_INST) {
kvm_err("%s: Cannot get inst @ %p\n", __func__, opc);
return EMULATE_FAIL;
}
- if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) {
+ if (inst.r_format.opcode == spec3_op &&
+ inst.r_format.func == rdhwr_op &&
+ inst.r_format.rs == 0 &&
+ (inst.r_format.re >> 3) == 0) {
int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
- int rd = (inst & RD) >> 11;
- int rt = (inst & RT) >> 16;
+ int rd = inst.r_format.rd;
+ int rt = inst.r_format.rt;
+ int sel = inst.r_format.re & 0x7;
+
/* If usermode, check RDHWR rd is allowed by guest HWREna */
if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) {
kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n",
@@ -2354,17 +2428,17 @@ enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
goto emulate_ri;
}
switch (rd) {
- case 0: /* CPU number */
- arch->gprs[rt] = 0;
+ case MIPS_HWR_CPUNUM: /* CPU number */
+ arch->gprs[rt] = vcpu->vcpu_id;
break;
- case 1: /* SYNCI length */
+ case MIPS_HWR_SYNCISTEP: /* SYNCI length */
arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
current_cpu_data.icache.linesz);
break;
- case 2: /* Read count register */
- arch->gprs[rt] = kvm_mips_read_count(vcpu);
+ case MIPS_HWR_CC: /* Read count register */
+ arch->gprs[rt] = (s32)kvm_mips_read_count(vcpu);
break;
- case 3: /* Count register resolution */
+ case MIPS_HWR_CCRES: /* Count register resolution */
switch (current_cpu_data.cputype) {
case CPU_20KC:
case CPU_25KF:
@@ -2374,7 +2448,7 @@ enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
arch->gprs[rt] = 2;
}
break;
- case 29:
+ case MIPS_HWR_ULR: /* Read UserLocal register */
arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
break;
@@ -2382,8 +2456,12 @@ enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc);
goto emulate_ri;
}
+
+ trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR, KVM_TRACE_HWR(rd, sel),
+ vcpu->arch.gprs[rt]);
} else {
- kvm_debug("Emulate RI not supported @ %p: %#x\n", opc, inst);
+ kvm_debug("Emulate RI not supported @ %p: %#x\n",
+ opc, inst.word);
goto emulate_ri;
}
@@ -2416,19 +2494,19 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
switch (run->mmio.len) {
case 4:
- *gpr = *(int32_t *) run->mmio.data;
+ *gpr = *(s32 *) run->mmio.data;
break;
case 2:
if (vcpu->mmio_needed == 2)
- *gpr = *(int16_t *) run->mmio.data;
+ *gpr = *(s16 *) run->mmio.data;
else
- *gpr = *(uint16_t *)run->mmio.data;
+ *gpr = *(u16 *)run->mmio.data;
break;
case 1:
if (vcpu->mmio_needed == 2)
- *gpr = *(int8_t *) run->mmio.data;
+ *gpr = *(s8 *) run->mmio.data;
else
*gpr = *(u8 *) run->mmio.data;
break;
@@ -2443,12 +2521,12 @@ done:
return er;
}
-static enum emulation_result kvm_mips_emulate_exc(unsigned long cause,
- uint32_t *opc,
+static enum emulation_result kvm_mips_emulate_exc(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
- uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+ u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct kvm_vcpu_arch *arch = &vcpu->arch;
enum emulation_result er = EMULATE_DONE;
@@ -2481,13 +2559,13 @@ static enum emulation_result kvm_mips_emulate_exc(unsigned long cause,
return er;
}
-enum emulation_result kvm_mips_check_privilege(unsigned long cause,
- uint32_t *opc,
+enum emulation_result kvm_mips_check_privilege(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
enum emulation_result er = EMULATE_DONE;
- uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+ u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
@@ -2577,18 +2655,18 @@ enum emulation_result kvm_mips_check_privilege(unsigned long cause,
* (2) TLB entry is present in the Guest TLB but not in the shadow, in this
* case we inject the TLB from the Guest TLB into the shadow host TLB
*/
-enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
- uint32_t *opc,
+enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
enum emulation_result er = EMULATE_DONE;
- uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+ u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
unsigned long va = vcpu->arch.host_cp0_badvaddr;
int index;
- kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n",
- vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi);
+ kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx\n",
+ vcpu->arch.host_cp0_badvaddr);
/*
* KVM would not have got the exception if this entry was valid in the
@@ -2631,13 +2709,12 @@ enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
}
} else {
kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
- tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
+ tlb->tlb_hi, tlb->tlb_lo[0], tlb->tlb_lo[1]);
/*
* OK we have a Guest TLB entry, now inject it into the
* shadow host TLB
*/
- if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
- NULL, NULL)) {
+ if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb)) {
kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
__func__, va, index, vcpu,
read_c0_entryhi());
diff --git a/arch/mips/kvm/entry.c b/arch/mips/kvm/entry.c
new file mode 100644
index 000000000..6a02b3a3f
--- /dev/null
+++ b/arch/mips/kvm/entry.c
@@ -0,0 +1,701 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Generation of main entry point for the guest, exception handling.
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ *
+ * Copyright (C) 2016 Imagination Technologies Ltd.
+ */
+
+#include <linux/kvm_host.h>
+#include <asm/msa.h>
+#include <asm/setup.h>
+#include <asm/uasm.h>
+
+/* Register names */
+#define ZERO 0
+#define AT 1
+#define V0 2
+#define V1 3
+#define A0 4
+#define A1 5
+
+#if _MIPS_SIM == _MIPS_SIM_ABI32
+#define T0 8
+#define T1 9
+#define T2 10
+#define T3 11
+#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
+
+#if _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32
+#define T0 12
+#define T1 13
+#define T2 14
+#define T3 15
+#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */
+
+#define S0 16
+#define S1 17
+#define T9 25
+#define K0 26
+#define K1 27
+#define GP 28
+#define SP 29
+#define RA 31
+
+/* Some CP0 registers */
+#define C0_HWRENA 7, 0
+#define C0_BADVADDR 8, 0
+#define C0_ENTRYHI 10, 0
+#define C0_STATUS 12, 0
+#define C0_CAUSE 13, 0
+#define C0_EPC 14, 0
+#define C0_EBASE 15, 1
+#define C0_CONFIG5 16, 5
+#define C0_DDATA_LO 28, 3
+#define C0_ERROREPC 30, 0
+
+#define CALLFRAME_SIZ 32
+
+#ifdef CONFIG_64BIT
+#define ST0_KX_IF_64 ST0_KX
+#else
+#define ST0_KX_IF_64 0
+#endif
+
+static unsigned int scratch_vcpu[2] = { C0_DDATA_LO };
+static unsigned int scratch_tmp[2] = { C0_ERROREPC };
+
+enum label_id {
+ label_fpu_1 = 1,
+ label_msa_1,
+ label_return_to_host,
+ label_kernel_asid,
+ label_exit_common,
+};
+
+UASM_L_LA(_fpu_1)
+UASM_L_LA(_msa_1)
+UASM_L_LA(_return_to_host)
+UASM_L_LA(_kernel_asid)
+UASM_L_LA(_exit_common)
+
+static void *kvm_mips_build_enter_guest(void *addr);
+static void *kvm_mips_build_ret_from_exit(void *addr);
+static void *kvm_mips_build_ret_to_guest(void *addr);
+static void *kvm_mips_build_ret_to_host(void *addr);
+
+/**
+ * kvm_mips_entry_setup() - Perform global setup for entry code.
+ *
+ * Perform global setup for entry code, such as choosing a scratch register.
+ *
+ * Returns: 0 on success.
+ * -errno on failure.
+ */
+int kvm_mips_entry_setup(void)
+{
+ /*
+ * We prefer to use KScratchN registers if they are available over the
+ * defaults above, which may not work on all cores.
+ */
+ unsigned int kscratch_mask = cpu_data[0].kscratch_mask & 0xfc;
+
+ /* Pick a scratch register for storing VCPU */
+ if (kscratch_mask) {
+ scratch_vcpu[0] = 31;
+ scratch_vcpu[1] = ffs(kscratch_mask) - 1;
+ kscratch_mask &= ~BIT(scratch_vcpu[1]);
+ }
+
+ /* Pick a scratch register to use as a temp for saving state */
+ if (kscratch_mask) {
+ scratch_tmp[0] = 31;
+ scratch_tmp[1] = ffs(kscratch_mask) - 1;
+ kscratch_mask &= ~BIT(scratch_tmp[1]);
+ }
+
+ return 0;
+}
+
+static void kvm_mips_build_save_scratch(u32 **p, unsigned int tmp,
+ unsigned int frame)
+{
+ /* Save the VCPU scratch register value in cp0_epc of the stack frame */
+ UASM_i_MFC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]);
+ UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame);
+
+ /* Save the temp scratch register value in cp0_cause of stack frame */
+ if (scratch_tmp[0] == 31) {
+ UASM_i_MFC0(p, tmp, scratch_tmp[0], scratch_tmp[1]);
+ UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame);
+ }
+}
+
+static void kvm_mips_build_restore_scratch(u32 **p, unsigned int tmp,
+ unsigned int frame)
+{
+ /*
+ * Restore host scratch register values saved by
+ * kvm_mips_build_save_scratch().
+ */
+ UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame);
+ UASM_i_MTC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]);
+
+ if (scratch_tmp[0] == 31) {
+ UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame);
+ UASM_i_MTC0(p, tmp, scratch_tmp[0], scratch_tmp[1]);
+ }
+}
+
+/**
+ * build_set_exc_base() - Assemble code to write exception base address.
+ * @p: Code buffer pointer.
+ * @reg: Source register (generated code may set WG bit in @reg).
+ *
+ * Assemble code to modify the exception base address in the EBase register,
+ * using the appropriately sized access and setting the WG bit if necessary.
+ */
+static inline void build_set_exc_base(u32 **p, unsigned int reg)
+{
+ if (cpu_has_ebase_wg) {
+ /* Set WG so that all the bits get written */
+ uasm_i_ori(p, reg, reg, MIPS_EBASE_WG);
+ UASM_i_MTC0(p, reg, C0_EBASE);
+ } else {
+ uasm_i_mtc0(p, reg, C0_EBASE);
+ }
+}
+
+/**
+ * kvm_mips_build_vcpu_run() - Assemble function to start running a guest VCPU.
+ * @addr: Address to start writing code.
+ *
+ * Assemble the start of the vcpu_run function to run a guest VCPU. The function
+ * conforms to the following prototype:
+ *
+ * int vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
+ *
+ * The exit from the guest and return to the caller is handled by the code
+ * generated by kvm_mips_build_ret_to_host().
+ *
+ * Returns: Next address after end of written function.
+ */
+void *kvm_mips_build_vcpu_run(void *addr)
+{
+ u32 *p = addr;
+ unsigned int i;
+
+ /*
+ * A0: run
+ * A1: vcpu
+ */
+
+ /* k0/k1 not being used in host kernel context */
+ UASM_i_ADDIU(&p, K1, SP, -(int)sizeof(struct pt_regs));
+ for (i = 16; i < 32; ++i) {
+ if (i == 24)
+ i = 28;
+ UASM_i_SW(&p, i, offsetof(struct pt_regs, regs[i]), K1);
+ }
+
+ /* Save host status */
+ uasm_i_mfc0(&p, V0, C0_STATUS);
+ UASM_i_SW(&p, V0, offsetof(struct pt_regs, cp0_status), K1);
+
+ /* Save scratch registers, will be used to store pointer to vcpu etc */
+ kvm_mips_build_save_scratch(&p, V1, K1);
+
+ /* VCPU scratch register has pointer to vcpu */
+ UASM_i_MTC0(&p, A1, scratch_vcpu[0], scratch_vcpu[1]);
+
+ /* Offset into vcpu->arch */
+ UASM_i_ADDIU(&p, K1, A1, offsetof(struct kvm_vcpu, arch));
+
+ /*
+ * Save the host stack to VCPU, used for exception processing
+ * when we exit from the Guest
+ */
+ UASM_i_SW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1);
+
+ /* Save the kernel gp as well */
+ UASM_i_SW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1);
+
+ /*
+ * Setup status register for running the guest in UM, interrupts
+ * are disabled
+ */
+ UASM_i_LA(&p, K0, ST0_EXL | KSU_USER | ST0_BEV | ST0_KX_IF_64);
+ uasm_i_mtc0(&p, K0, C0_STATUS);
+ uasm_i_ehb(&p);
+
+ /* load up the new EBASE */
+ UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1);
+ build_set_exc_base(&p, K0);
+
+ /*
+ * Now that the new EBASE has been loaded, unset BEV, set
+ * interrupt mask as it was but make sure that timer interrupts
+ * are enabled
+ */
+ uasm_i_addiu(&p, K0, ZERO, ST0_EXL | KSU_USER | ST0_IE | ST0_KX_IF_64);
+ uasm_i_andi(&p, V0, V0, ST0_IM);
+ uasm_i_or(&p, K0, K0, V0);
+ uasm_i_mtc0(&p, K0, C0_STATUS);
+ uasm_i_ehb(&p);
+
+ p = kvm_mips_build_enter_guest(p);
+
+ return p;
+}
+
+/**
+ * kvm_mips_build_enter_guest() - Assemble code to resume guest execution.
+ * @addr: Address to start writing code.
+ *
+ * Assemble the code to resume guest execution. This code is common between the
+ * initial entry into the guest from the host, and returning from the exit
+ * handler back to the guest.
+ *
+ * Returns: Next address after end of written function.
+ */
+static void *kvm_mips_build_enter_guest(void *addr)
+{
+ u32 *p = addr;
+ unsigned int i;
+ struct uasm_label labels[2];
+ struct uasm_reloc relocs[2];
+ struct uasm_label *l = labels;
+ struct uasm_reloc *r = relocs;
+
+ memset(labels, 0, sizeof(labels));
+ memset(relocs, 0, sizeof(relocs));
+
+ /* Set Guest EPC */
+ UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, pc), K1);
+ UASM_i_MTC0(&p, T0, C0_EPC);
+
+ /* Set the ASID for the Guest Kernel */
+ UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, cop0), K1);
+ UASM_i_LW(&p, T0, offsetof(struct mips_coproc, reg[MIPS_CP0_STATUS][0]),
+ T0);
+ uasm_i_andi(&p, T0, T0, KSU_USER | ST0_ERL | ST0_EXL);
+ uasm_i_xori(&p, T0, T0, KSU_USER);
+ uasm_il_bnez(&p, &r, T0, label_kernel_asid);
+ UASM_i_ADDIU(&p, T1, K1,
+ offsetof(struct kvm_vcpu_arch, guest_kernel_asid));
+ /* else user */
+ UASM_i_ADDIU(&p, T1, K1,
+ offsetof(struct kvm_vcpu_arch, guest_user_asid));
+ uasm_l_kernel_asid(&l, p);
+
+ /* t1: contains the base of the ASID array, need to get the cpu id */
+ /* smp_processor_id */
+ uasm_i_lw(&p, T2, offsetof(struct thread_info, cpu), GP);
+ /* x4 */
+ uasm_i_sll(&p, T2, T2, 2);
+ UASM_i_ADDU(&p, T3, T1, T2);
+ uasm_i_lw(&p, K0, 0, T3);
+#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
+ /* x sizeof(struct cpuinfo_mips)/4 */
+ uasm_i_addiu(&p, T3, ZERO, sizeof(struct cpuinfo_mips)/4);
+ uasm_i_mul(&p, T2, T2, T3);
+
+ UASM_i_LA_mostly(&p, AT, (long)&cpu_data[0].asid_mask);
+ UASM_i_ADDU(&p, AT, AT, T2);
+ UASM_i_LW(&p, T2, uasm_rel_lo((long)&cpu_data[0].asid_mask), AT);
+ uasm_i_and(&p, K0, K0, T2);
+#else
+ uasm_i_andi(&p, K0, K0, MIPS_ENTRYHI_ASID);
+#endif
+ uasm_i_mtc0(&p, K0, C0_ENTRYHI);
+ uasm_i_ehb(&p);
+
+ /* Disable RDHWR access */
+ uasm_i_mtc0(&p, ZERO, C0_HWRENA);
+
+ /* load the guest context from VCPU and return */
+ for (i = 1; i < 32; ++i) {
+ /* Guest k0/k1 loaded later */
+ if (i == K0 || i == K1)
+ continue;
+ UASM_i_LW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1);
+ }
+
+#ifndef CONFIG_CPU_MIPSR6
+ /* Restore hi/lo */
+ UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, hi), K1);
+ uasm_i_mthi(&p, K0);
+
+ UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, lo), K1);
+ uasm_i_mtlo(&p, K0);
+#endif
+
+ /* Restore the guest's k0/k1 registers */
+ UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1);
+ UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1);
+
+ /* Jump to guest */
+ uasm_i_eret(&p);
+
+ uasm_resolve_relocs(relocs, labels);
+
+ return p;
+}
+
+/**
+ * kvm_mips_build_exception() - Assemble first level guest exception handler.
+ * @addr: Address to start writing code.
+ * @handler: Address of common handler (within range of @addr).
+ *
+ * Assemble exception vector code for guest execution. The generated vector will
+ * branch to the common exception handler generated by kvm_mips_build_exit().
+ *
+ * Returns: Next address after end of written function.
+ */
+void *kvm_mips_build_exception(void *addr, void *handler)
+{
+ u32 *p = addr;
+ struct uasm_label labels[2];
+ struct uasm_reloc relocs[2];
+ struct uasm_label *l = labels;
+ struct uasm_reloc *r = relocs;
+
+ memset(labels, 0, sizeof(labels));
+ memset(relocs, 0, sizeof(relocs));
+
+ /* Save guest k1 into scratch register */
+ UASM_i_MTC0(&p, K1, scratch_tmp[0], scratch_tmp[1]);
+
+ /* Get the VCPU pointer from the VCPU scratch register */
+ UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]);
+ UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch));
+
+ /* Save guest k0 into VCPU structure */
+ UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1);
+
+ /* Branch to the common handler */
+ uasm_il_b(&p, &r, label_exit_common);
+ uasm_i_nop(&p);
+
+ uasm_l_exit_common(&l, handler);
+ uasm_resolve_relocs(relocs, labels);
+
+ return p;
+}
+
+/**
+ * kvm_mips_build_exit() - Assemble common guest exit handler.
+ * @addr: Address to start writing code.
+ *
+ * Assemble the generic guest exit handling code. This is called by the
+ * exception vectors (generated by kvm_mips_build_exception()), and calls
+ * kvm_mips_handle_exit(), then either resumes the guest or returns to the host
+ * depending on the return value.
+ *
+ * Returns: Next address after end of written function.
+ */
+void *kvm_mips_build_exit(void *addr)
+{
+ u32 *p = addr;
+ unsigned int i;
+ struct uasm_label labels[3];
+ struct uasm_reloc relocs[3];
+ struct uasm_label *l = labels;
+ struct uasm_reloc *r = relocs;
+
+ memset(labels, 0, sizeof(labels));
+ memset(relocs, 0, sizeof(relocs));
+
+ /*
+ * Generic Guest exception handler. We end up here when the guest
+ * does something that causes a trap to kernel mode.
+ *
+ * Both k0/k1 registers will have already been saved (k0 into the vcpu
+ * structure, and k1 into the scratch_tmp register).
+ *
+ * The k1 register will already contain the kvm_vcpu_arch pointer.
+ */
+
+ /* Start saving Guest context to VCPU */
+ for (i = 0; i < 32; ++i) {
+ /* Guest k0/k1 saved later */
+ if (i == K0 || i == K1)
+ continue;
+ UASM_i_SW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1);
+ }
+
+#ifndef CONFIG_CPU_MIPSR6
+ /* We need to save hi/lo and restore them on the way out */
+ uasm_i_mfhi(&p, T0);
+ UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, hi), K1);
+
+ uasm_i_mflo(&p, T0);
+ UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, lo), K1);
+#endif
+
+ /* Finally save guest k1 to VCPU */
+ uasm_i_ehb(&p);
+ UASM_i_MFC0(&p, T0, scratch_tmp[0], scratch_tmp[1]);
+ UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1);
+
+ /* Now that context has been saved, we can use other registers */
+
+ /* Restore vcpu */
+ UASM_i_MFC0(&p, A1, scratch_vcpu[0], scratch_vcpu[1]);
+ uasm_i_move(&p, S1, A1);
+
+ /* Restore run (vcpu->run) */
+ UASM_i_LW(&p, A0, offsetof(struct kvm_vcpu, run), A1);
+ /* Save pointer to run in s0, will be saved by the compiler */
+ uasm_i_move(&p, S0, A0);
+
+ /*
+ * Save Host level EPC, BadVaddr and Cause to VCPU, useful to process
+ * the exception
+ */
+ UASM_i_MFC0(&p, K0, C0_EPC);
+ UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, pc), K1);
+
+ UASM_i_MFC0(&p, K0, C0_BADVADDR);
+ UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_badvaddr),
+ K1);
+
+ uasm_i_mfc0(&p, K0, C0_CAUSE);
+ uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_cause), K1);
+
+ /* Now restore the host state just enough to run the handlers */
+
+ /* Switch EBASE to the one used by Linux */
+ /* load up the host EBASE */
+ uasm_i_mfc0(&p, V0, C0_STATUS);
+
+ uasm_i_lui(&p, AT, ST0_BEV >> 16);
+ uasm_i_or(&p, K0, V0, AT);
+
+ uasm_i_mtc0(&p, K0, C0_STATUS);
+ uasm_i_ehb(&p);
+
+ UASM_i_LA_mostly(&p, K0, (long)&ebase);
+ UASM_i_LW(&p, K0, uasm_rel_lo((long)&ebase), K0);
+ build_set_exc_base(&p, K0);
+
+ if (raw_cpu_has_fpu) {
+ /*
+ * If FPU is enabled, save FCR31 and clear it so that later
+ * ctc1's don't trigger FPE for pending exceptions.
+ */
+ uasm_i_lui(&p, AT, ST0_CU1 >> 16);
+ uasm_i_and(&p, V1, V0, AT);
+ uasm_il_beqz(&p, &r, V1, label_fpu_1);
+ uasm_i_nop(&p);
+ uasm_i_cfc1(&p, T0, 31);
+ uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.fcr31),
+ K1);
+ uasm_i_ctc1(&p, ZERO, 31);
+ uasm_l_fpu_1(&l, p);
+ }
+
+ if (cpu_has_msa) {
+ /*
+ * If MSA is enabled, save MSACSR and clear it so that later
+ * instructions don't trigger MSAFPE for pending exceptions.
+ */
+ uasm_i_mfc0(&p, T0, C0_CONFIG5);
+ uasm_i_ext(&p, T0, T0, 27, 1); /* MIPS_CONF5_MSAEN */
+ uasm_il_beqz(&p, &r, T0, label_msa_1);
+ uasm_i_nop(&p);
+ uasm_i_cfcmsa(&p, T0, MSA_CSR);
+ uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.msacsr),
+ K1);
+ uasm_i_ctcmsa(&p, MSA_CSR, ZERO);
+ uasm_l_msa_1(&l, p);
+ }
+
+ /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
+ uasm_i_addiu(&p, AT, ZERO, ~(ST0_EXL | KSU_USER | ST0_IE));
+ uasm_i_and(&p, V0, V0, AT);
+ uasm_i_lui(&p, AT, ST0_CU0 >> 16);
+ uasm_i_or(&p, V0, V0, AT);
+ uasm_i_mtc0(&p, V0, C0_STATUS);
+ uasm_i_ehb(&p);
+
+ /* Load up host GP */
+ UASM_i_LW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1);
+
+ /* Need a stack before we can jump to "C" */
+ UASM_i_LW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1);
+
+ /* Saved host state */
+ UASM_i_ADDIU(&p, SP, SP, -(int)sizeof(struct pt_regs));
+
+ /*
+ * XXXKYMA do we need to load the host ASID, maybe not because the
+ * kernel entries are marked GLOBAL, need to verify
+ */
+
+ /* Restore host scratch registers, as we'll have clobbered them */
+ kvm_mips_build_restore_scratch(&p, K0, SP);
+
+ /* Restore RDHWR access */
+ UASM_i_LA_mostly(&p, K0, (long)&hwrena);
+ uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0);
+ uasm_i_mtc0(&p, K0, C0_HWRENA);
+
+ /* Jump to handler */
+ /*
+ * XXXKYMA: not sure if this is safe, how large is the stack??
+ * Now jump to the kvm_mips_handle_exit() to see if we can deal
+ * with this in the kernel
+ */
+ UASM_i_LA(&p, T9, (unsigned long)kvm_mips_handle_exit);
+ uasm_i_jalr(&p, RA, T9);
+ UASM_i_ADDIU(&p, SP, SP, -CALLFRAME_SIZ);
+
+ uasm_resolve_relocs(relocs, labels);
+
+ p = kvm_mips_build_ret_from_exit(p);
+
+ return p;
+}
+
+/**
+ * kvm_mips_build_ret_from_exit() - Assemble guest exit return handler.
+ * @addr: Address to start writing code.
+ *
+ * Assemble the code to handle the return from kvm_mips_handle_exit(), either
+ * resuming the guest or returning to the host depending on the return value.
+ *
+ * Returns: Next address after end of written function.
+ */
+static void *kvm_mips_build_ret_from_exit(void *addr)
+{
+ u32 *p = addr;
+ struct uasm_label labels[2];
+ struct uasm_reloc relocs[2];
+ struct uasm_label *l = labels;
+ struct uasm_reloc *r = relocs;
+
+ memset(labels, 0, sizeof(labels));
+ memset(relocs, 0, sizeof(relocs));
+
+ /* Return from handler Make sure interrupts are disabled */
+ uasm_i_di(&p, ZERO);
+ uasm_i_ehb(&p);
+
+ /*
+ * XXXKYMA: k0/k1 could have been blown away if we processed
+ * an exception while we were handling the exception from the
+ * guest, reload k1
+ */
+
+ uasm_i_move(&p, K1, S1);
+ UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch));
+
+ /*
+ * Check return value, should tell us if we are returning to the
+ * host (handle I/O etc)or resuming the guest
+ */
+ uasm_i_andi(&p, T0, V0, RESUME_HOST);
+ uasm_il_bnez(&p, &r, T0, label_return_to_host);
+ uasm_i_nop(&p);
+
+ p = kvm_mips_build_ret_to_guest(p);
+
+ uasm_l_return_to_host(&l, p);
+ p = kvm_mips_build_ret_to_host(p);
+
+ uasm_resolve_relocs(relocs, labels);
+
+ return p;
+}
+
+/**
+ * kvm_mips_build_ret_to_guest() - Assemble code to return to the guest.
+ * @addr: Address to start writing code.
+ *
+ * Assemble the code to handle return from the guest exit handler
+ * (kvm_mips_handle_exit()) back to the guest.
+ *
+ * Returns: Next address after end of written function.
+ */
+static void *kvm_mips_build_ret_to_guest(void *addr)
+{
+ u32 *p = addr;
+
+ /* Put the saved pointer to vcpu (s1) back into the scratch register */
+ UASM_i_MTC0(&p, S1, scratch_vcpu[0], scratch_vcpu[1]);
+
+ /* Load up the Guest EBASE to minimize the window where BEV is set */
+ UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1);
+
+ /* Switch EBASE back to the one used by KVM */
+ uasm_i_mfc0(&p, V1, C0_STATUS);
+ uasm_i_lui(&p, AT, ST0_BEV >> 16);
+ uasm_i_or(&p, K0, V1, AT);
+ uasm_i_mtc0(&p, K0, C0_STATUS);
+ uasm_i_ehb(&p);
+ build_set_exc_base(&p, T0);
+
+ /* Setup status register for running guest in UM */
+ uasm_i_ori(&p, V1, V1, ST0_EXL | KSU_USER | ST0_IE);
+ UASM_i_LA(&p, AT, ~(ST0_CU0 | ST0_MX));
+ uasm_i_and(&p, V1, V1, AT);
+ uasm_i_mtc0(&p, V1, C0_STATUS);
+ uasm_i_ehb(&p);
+
+ p = kvm_mips_build_enter_guest(p);
+
+ return p;
+}
+
+/**
+ * kvm_mips_build_ret_to_host() - Assemble code to return to the host.
+ * @addr: Address to start writing code.
+ *
+ * Assemble the code to handle return from the guest exit handler
+ * (kvm_mips_handle_exit()) back to the host, i.e. to the caller of the vcpu_run
+ * function generated by kvm_mips_build_vcpu_run().
+ *
+ * Returns: Next address after end of written function.
+ */
+static void *kvm_mips_build_ret_to_host(void *addr)
+{
+ u32 *p = addr;
+ unsigned int i;
+
+ /* EBASE is already pointing to Linux */
+ UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, host_stack), K1);
+ UASM_i_ADDIU(&p, K1, K1, -(int)sizeof(struct pt_regs));
+
+ /*
+ * r2/v0 is the return code, shift it down by 2 (arithmetic)
+ * to recover the err code
+ */
+ uasm_i_sra(&p, K0, V0, 2);
+ uasm_i_move(&p, V0, K0);
+
+ /* Load context saved on the host stack */
+ for (i = 16; i < 31; ++i) {
+ if (i == 24)
+ i = 28;
+ UASM_i_LW(&p, i, offsetof(struct pt_regs, regs[i]), K1);
+ }
+
+ /* Restore RDHWR access */
+ UASM_i_LA_mostly(&p, K0, (long)&hwrena);
+ uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0);
+ uasm_i_mtc0(&p, K0, C0_HWRENA);
+
+ /* Restore RA, which is the address we will return to */
+ UASM_i_LW(&p, RA, offsetof(struct pt_regs, regs[RA]), K1);
+ uasm_i_jr(&p, RA);
+ uasm_i_nop(&p);
+
+ return p;
+}
+
diff --git a/arch/mips/kvm/fpu.S b/arch/mips/kvm/fpu.S
index 531fbf513..16f17c639 100644
--- a/arch/mips/kvm/fpu.S
+++ b/arch/mips/kvm/fpu.S
@@ -14,13 +14,16 @@
#include <asm/mipsregs.h>
#include <asm/regdef.h>
+/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */
+#undef fp
+
.set noreorder
.set noat
LEAF(__kvm_save_fpu)
.set push
- .set mips64r2
SET_HARDFLOAT
+ .set fp=64
mfc0 t0, CP0_STATUS
sll t0, t0, 5 # is Status.FR set?
bgez t0, 1f # no: skip odd doubles
@@ -63,8 +66,8 @@ LEAF(__kvm_save_fpu)
LEAF(__kvm_restore_fpu)
.set push
- .set mips64r2
SET_HARDFLOAT
+ .set fp=64
mfc0 t0, CP0_STATUS
sll t0, t0, 5 # is Status.FR set?
bgez t0, 1f # no: skip odd doubles
diff --git a/arch/mips/kvm/interrupt.c b/arch/mips/kvm/interrupt.c
index 95f790663..ad28dac6b 100644
--- a/arch/mips/kvm/interrupt.c
+++ b/arch/mips/kvm/interrupt.c
@@ -22,12 +22,12 @@
#include "interrupt.h"
-void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
+void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
{
set_bit(priority, &vcpu->arch.pending_exceptions);
}
-void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
+void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
{
clear_bit(priority, &vcpu->arch.pending_exceptions);
}
@@ -114,10 +114,10 @@ void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
/* Deliver the interrupt of the corresponding priority, if possible. */
int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
- uint32_t cause)
+ u32 cause)
{
int allowed = 0;
- uint32_t exccode;
+ u32 exccode;
struct kvm_vcpu_arch *arch = &vcpu->arch;
struct mips_coproc *cop0 = vcpu->arch.cop0;
@@ -196,12 +196,12 @@ int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
}
int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
- uint32_t cause)
+ u32 cause)
{
return 1;
}
-void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, uint32_t cause)
+void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, u32 cause)
{
unsigned long *pending = &vcpu->arch.pending_exceptions;
unsigned long *pending_clr = &vcpu->arch.pending_exceptions_clr;
diff --git a/arch/mips/kvm/interrupt.h b/arch/mips/kvm/interrupt.h
index 214388470..fb118a2c8 100644
--- a/arch/mips/kvm/interrupt.h
+++ b/arch/mips/kvm/interrupt.h
@@ -28,17 +28,13 @@
#define MIPS_EXC_MAX 12
/* XXXSL More to follow */
-extern char __kvm_mips_vcpu_run_end[];
-extern char mips32_exception[], mips32_exceptionEnd[];
-extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
-
#define C_TI (_ULCAST_(1) << 30)
#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0)
#define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE (0)
-void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority);
-void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority);
+void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority);
+void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority);
int kvm_mips_pending_timer(struct kvm_vcpu *vcpu);
void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu);
@@ -48,7 +44,7 @@ void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu,
void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
struct kvm_mips_interrupt *irq);
int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
- uint32_t cause);
+ u32 cause);
int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
- uint32_t cause);
-void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, uint32_t cause);
+ u32 cause);
+void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, u32 cause);
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 44da5259f..a6ea084b4 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -9,6 +9,7 @@
* Authors: Sanjay Lal <sanjayl@kymasys.com>
*/
+#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kdebug.h>
@@ -147,7 +148,7 @@ void kvm_mips_free_vcpus(struct kvm *kvm)
/* Put the pages we reserved for the guest pmap */
for (i = 0; i < kvm->arch.guest_pmap_npages; i++) {
if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE)
- kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]);
+ kvm_release_pfn_clean(kvm->arch.guest_pmap[i]);
}
kfree(kvm->arch.guest_pmap);
@@ -244,10 +245,27 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
}
}
+static inline void dump_handler(const char *symbol, void *start, void *end)
+{
+ u32 *p;
+
+ pr_debug("LEAF(%s)\n", symbol);
+
+ pr_debug("\t.set push\n");
+ pr_debug("\t.set noreorder\n");
+
+ for (p = start; p < (u32 *)end; ++p)
+ pr_debug("\t.word\t0x%08x\t\t# %p\n", *p, p);
+
+ pr_debug("\t.set\tpop\n");
+
+ pr_debug("\tEND(%s)\n", symbol);
+}
+
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
{
- int err, size, offset;
- void *gebase;
+ int err, size;
+ void *gebase, *p, *handler;
int i;
struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
@@ -273,9 +291,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
else
size = 0x4000;
- /* Save Linux EBASE */
- vcpu->arch.host_ebase = (void *)read_c0_ebase();
-
gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
if (!gebase) {
@@ -285,44 +300,53 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
ALIGN(size, PAGE_SIZE), gebase);
+ /*
+ * Check new ebase actually fits in CP0_EBase. The lack of a write gate
+ * limits us to the low 512MB of physical address space. If the memory
+ * we allocate is out of range, just give up now.
+ */
+ if (!cpu_has_ebase_wg && virt_to_phys(gebase) >= 0x20000000) {
+ kvm_err("CP0_EBase.WG required for guest exception base %pK\n",
+ gebase);
+ err = -ENOMEM;
+ goto out_free_gebase;
+ }
+
/* Save new ebase */
vcpu->arch.guest_ebase = gebase;
- /* Copy L1 Guest Exception handler to correct offset */
+ /* Build guest exception vectors dynamically in unmapped memory */
+ handler = gebase + 0x2000;
/* TLB Refill, EXL = 0 */
- memcpy(gebase, mips32_exception,
- mips32_exceptionEnd - mips32_exception);
+ kvm_mips_build_exception(gebase, handler);
/* General Exception Entry point */
- memcpy(gebase + 0x180, mips32_exception,
- mips32_exceptionEnd - mips32_exception);
+ kvm_mips_build_exception(gebase + 0x180, handler);
/* For vectored interrupts poke the exception code @ all offsets 0-7 */
for (i = 0; i < 8; i++) {
kvm_debug("L1 Vectored handler @ %p\n",
gebase + 0x200 + (i * VECTORSPACING));
- memcpy(gebase + 0x200 + (i * VECTORSPACING), mips32_exception,
- mips32_exceptionEnd - mips32_exception);
+ kvm_mips_build_exception(gebase + 0x200 + i * VECTORSPACING,
+ handler);
}
- /* General handler, relocate to unmapped space for sanity's sake */
- offset = 0x2000;
- kvm_debug("Installing KVM Exception handlers @ %p, %#x bytes\n",
- gebase + offset,
- mips32_GuestExceptionEnd - mips32_GuestException);
+ /* General exit handler */
+ p = handler;
+ p = kvm_mips_build_exit(p);
- memcpy(gebase + offset, mips32_GuestException,
- mips32_GuestExceptionEnd - mips32_GuestException);
+ /* Guest entry routine */
+ vcpu->arch.vcpu_run = p;
+ p = kvm_mips_build_vcpu_run(p);
-#ifdef MODULE
- offset += mips32_GuestExceptionEnd - mips32_GuestException;
- memcpy(gebase + offset, (char *)__kvm_mips_vcpu_run,
- __kvm_mips_vcpu_run_end - (char *)__kvm_mips_vcpu_run);
- vcpu->arch.vcpu_run = gebase + offset;
-#else
- vcpu->arch.vcpu_run = __kvm_mips_vcpu_run;
-#endif
+ /* Dump the generated code */
+ pr_debug("#include <asm/asm.h>\n");
+ pr_debug("#include <asm/regdef.h>\n");
+ pr_debug("\n");
+ dump_handler("kvm_vcpu_run", vcpu->arch.vcpu_run, p);
+ dump_handler("kvm_gen_exc", gebase + 0x180, gebase + 0x200);
+ dump_handler("kvm_exit", gebase + 0x2000, vcpu->arch.vcpu_run);
/* Invalidate the icache for these ranges */
local_flush_icache_range((unsigned long)gebase,
@@ -408,17 +432,19 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
kvm_mips_deliver_interrupts(vcpu,
kvm_read_c0_guest_cause(vcpu->arch.cop0));
- __kvm_guest_enter();
+ guest_enter_irqoff();
/* Disable hardware page table walking while in guest */
htw_stop();
+ trace_kvm_enter(vcpu);
r = vcpu->arch.vcpu_run(run, vcpu);
+ trace_kvm_out(vcpu);
/* Re-enable HTW before enabling interrupts */
htw_start();
- __kvm_guest_exit();
+ guest_exit_irqoff();
local_irq_enable();
if (vcpu->sigset_active)
@@ -507,8 +533,10 @@ static u64 kvm_mips_get_one_regs[] = {
KVM_REG_MIPS_R30,
KVM_REG_MIPS_R31,
+#ifndef CONFIG_CPU_MIPSR6
KVM_REG_MIPS_HI,
KVM_REG_MIPS_LO,
+#endif
KVM_REG_MIPS_PC,
KVM_REG_MIPS_CP0_INDEX,
@@ -539,6 +567,104 @@ static u64 kvm_mips_get_one_regs[] = {
KVM_REG_MIPS_COUNT_HZ,
};
+static u64 kvm_mips_get_one_regs_fpu[] = {
+ KVM_REG_MIPS_FCR_IR,
+ KVM_REG_MIPS_FCR_CSR,
+};
+
+static u64 kvm_mips_get_one_regs_msa[] = {
+ KVM_REG_MIPS_MSA_IR,
+ KVM_REG_MIPS_MSA_CSR,
+};
+
+static u64 kvm_mips_get_one_regs_kscratch[] = {
+ KVM_REG_MIPS_CP0_KSCRATCH1,
+ KVM_REG_MIPS_CP0_KSCRATCH2,
+ KVM_REG_MIPS_CP0_KSCRATCH3,
+ KVM_REG_MIPS_CP0_KSCRATCH4,
+ KVM_REG_MIPS_CP0_KSCRATCH5,
+ KVM_REG_MIPS_CP0_KSCRATCH6,
+};
+
+static unsigned long kvm_mips_num_regs(struct kvm_vcpu *vcpu)
+{
+ unsigned long ret;
+
+ ret = ARRAY_SIZE(kvm_mips_get_one_regs);
+ if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
+ ret += ARRAY_SIZE(kvm_mips_get_one_regs_fpu) + 48;
+ /* odd doubles */
+ if (boot_cpu_data.fpu_id & MIPS_FPIR_F64)
+ ret += 16;
+ }
+ if (kvm_mips_guest_can_have_msa(&vcpu->arch))
+ ret += ARRAY_SIZE(kvm_mips_get_one_regs_msa) + 32;
+ ret += __arch_hweight8(vcpu->arch.kscratch_enabled);
+ ret += kvm_mips_callbacks->num_regs(vcpu);
+
+ return ret;
+}
+
+static int kvm_mips_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
+{
+ u64 index;
+ unsigned int i;
+
+ if (copy_to_user(indices, kvm_mips_get_one_regs,
+ sizeof(kvm_mips_get_one_regs)))
+ return -EFAULT;
+ indices += ARRAY_SIZE(kvm_mips_get_one_regs);
+
+ if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
+ if (copy_to_user(indices, kvm_mips_get_one_regs_fpu,
+ sizeof(kvm_mips_get_one_regs_fpu)))
+ return -EFAULT;
+ indices += ARRAY_SIZE(kvm_mips_get_one_regs_fpu);
+
+ for (i = 0; i < 32; ++i) {
+ index = KVM_REG_MIPS_FPR_32(i);
+ if (copy_to_user(indices, &index, sizeof(index)))
+ return -EFAULT;
+ ++indices;
+
+ /* skip odd doubles if no F64 */
+ if (i & 1 && !(boot_cpu_data.fpu_id & MIPS_FPIR_F64))
+ continue;
+
+ index = KVM_REG_MIPS_FPR_64(i);
+ if (copy_to_user(indices, &index, sizeof(index)))
+ return -EFAULT;
+ ++indices;
+ }
+ }
+
+ if (kvm_mips_guest_can_have_msa(&vcpu->arch)) {
+ if (copy_to_user(indices, kvm_mips_get_one_regs_msa,
+ sizeof(kvm_mips_get_one_regs_msa)))
+ return -EFAULT;
+ indices += ARRAY_SIZE(kvm_mips_get_one_regs_msa);
+
+ for (i = 0; i < 32; ++i) {
+ index = KVM_REG_MIPS_VEC_128(i);
+ if (copy_to_user(indices, &index, sizeof(index)))
+ return -EFAULT;
+ ++indices;
+ }
+ }
+
+ for (i = 0; i < 6; ++i) {
+ if (!(vcpu->arch.kscratch_enabled & BIT(i + 2)))
+ continue;
+
+ if (copy_to_user(indices, &kvm_mips_get_one_regs_kscratch[i],
+ sizeof(kvm_mips_get_one_regs_kscratch[i])))
+ return -EFAULT;
+ ++indices;
+ }
+
+ return kvm_mips_callbacks->copy_reg_indices(vcpu, indices);
+}
+
static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
@@ -554,12 +680,14 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
break;
+#ifndef CONFIG_CPU_MIPSR6
case KVM_REG_MIPS_HI:
v = (long)vcpu->arch.hi;
break;
case KVM_REG_MIPS_LO:
v = (long)vcpu->arch.lo;
break;
+#endif
case KVM_REG_MIPS_PC:
v = (long)vcpu->arch.pc;
break;
@@ -688,17 +816,37 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
case KVM_REG_MIPS_CP0_ERROREPC:
v = (long)kvm_read_c0_guest_errorepc(cop0);
break;
+ case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
+ idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
+ if (!(vcpu->arch.kscratch_enabled & BIT(idx)))
+ return -EINVAL;
+ switch (idx) {
+ case 2:
+ v = (long)kvm_read_c0_guest_kscratch1(cop0);
+ break;
+ case 3:
+ v = (long)kvm_read_c0_guest_kscratch2(cop0);
+ break;
+ case 4:
+ v = (long)kvm_read_c0_guest_kscratch3(cop0);
+ break;
+ case 5:
+ v = (long)kvm_read_c0_guest_kscratch4(cop0);
+ break;
+ case 6:
+ v = (long)kvm_read_c0_guest_kscratch5(cop0);
+ break;
+ case 7:
+ v = (long)kvm_read_c0_guest_kscratch6(cop0);
+ break;
+ }
+ break;
/* registers to be handled specially */
- case KVM_REG_MIPS_CP0_COUNT:
- case KVM_REG_MIPS_COUNT_CTL:
- case KVM_REG_MIPS_COUNT_RESUME:
- case KVM_REG_MIPS_COUNT_HZ:
+ default:
ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v);
if (ret)
return ret;
break;
- default:
- return -EINVAL;
}
if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
@@ -755,12 +903,14 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31:
vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v;
break;
+#ifndef CONFIG_CPU_MIPSR6
case KVM_REG_MIPS_HI:
vcpu->arch.hi = v;
break;
case KVM_REG_MIPS_LO:
vcpu->arch.lo = v;
break;
+#endif
case KVM_REG_MIPS_PC:
vcpu->arch.pc = v;
break;
@@ -859,22 +1009,34 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
case KVM_REG_MIPS_CP0_ERROREPC:
kvm_write_c0_guest_errorepc(cop0, v);
break;
+ case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
+ idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
+ if (!(vcpu->arch.kscratch_enabled & BIT(idx)))
+ return -EINVAL;
+ switch (idx) {
+ case 2:
+ kvm_write_c0_guest_kscratch1(cop0, v);
+ break;
+ case 3:
+ kvm_write_c0_guest_kscratch2(cop0, v);
+ break;
+ case 4:
+ kvm_write_c0_guest_kscratch3(cop0, v);
+ break;
+ case 5:
+ kvm_write_c0_guest_kscratch4(cop0, v);
+ break;
+ case 6:
+ kvm_write_c0_guest_kscratch5(cop0, v);
+ break;
+ case 7:
+ kvm_write_c0_guest_kscratch6(cop0, v);
+ break;
+ }
+ break;
/* registers to be handled specially */
- case KVM_REG_MIPS_CP0_COUNT:
- case KVM_REG_MIPS_CP0_COMPARE:
- case KVM_REG_MIPS_CP0_CAUSE:
- case KVM_REG_MIPS_CP0_CONFIG:
- case KVM_REG_MIPS_CP0_CONFIG1:
- case KVM_REG_MIPS_CP0_CONFIG2:
- case KVM_REG_MIPS_CP0_CONFIG3:
- case KVM_REG_MIPS_CP0_CONFIG4:
- case KVM_REG_MIPS_CP0_CONFIG5:
- case KVM_REG_MIPS_COUNT_CTL:
- case KVM_REG_MIPS_COUNT_RESUME:
- case KVM_REG_MIPS_COUNT_HZ:
- return kvm_mips_callbacks->set_one_reg(vcpu, reg, v);
default:
- return -EINVAL;
+ return kvm_mips_callbacks->set_one_reg(vcpu, reg, v);
}
return 0;
}
@@ -927,23 +1089,18 @@ long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
}
case KVM_GET_REG_LIST: {
struct kvm_reg_list __user *user_list = argp;
- u64 __user *reg_dest;
struct kvm_reg_list reg_list;
unsigned n;
if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
return -EFAULT;
n = reg_list.n;
- reg_list.n = ARRAY_SIZE(kvm_mips_get_one_regs);
+ reg_list.n = kvm_mips_num_regs(vcpu);
if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
return -EFAULT;
if (n < reg_list.n)
return -E2BIG;
- reg_dest = user_list->reg;
- if (copy_to_user(reg_dest, kvm_mips_get_one_regs,
- sizeof(kvm_mips_get_one_regs)))
- return -EFAULT;
- return 0;
+ return kvm_mips_copy_reg_indices(vcpu, user_list->reg);
}
case KVM_NMI:
/* Treat the NMI as a CPU reset */
@@ -1222,7 +1379,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
static void kvm_mips_set_c0_status(void)
{
- uint32_t status = read_c0_status();
+ u32 status = read_c0_status();
if (cpu_has_dsp)
status |= (ST0_MX);
@@ -1236,9 +1393,9 @@ static void kvm_mips_set_c0_status(void)
*/
int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
- uint32_t cause = vcpu->arch.host_cp0_cause;
- uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
- uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+ u32 cause = vcpu->arch.host_cp0_cause;
+ u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+ u32 __user *opc = (u32 __user *) vcpu->arch.pc;
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
@@ -1260,6 +1417,7 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
cause, opc, run, vcpu);
+ trace_kvm_exit(vcpu, exccode);
/*
* Do a privilege check, if in UM most of these exit conditions end up
@@ -1279,7 +1437,6 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu->vcpu_id, opc);
++vcpu->stat.int_exits;
- trace_kvm_exit(vcpu, INT_EXITS);
if (need_resched())
cond_resched();
@@ -1291,7 +1448,6 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
kvm_debug("EXCCODE_CPU: @ PC: %p\n", opc);
++vcpu->stat.cop_unusable_exits;
- trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS);
ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
/* XXXKYMA: Might need to return to user space */
if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN)
@@ -1300,7 +1456,6 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
case EXCCODE_MOD:
++vcpu->stat.tlbmod_exits;
- trace_kvm_exit(vcpu, TLBMOD_EXITS);
ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
break;
@@ -1310,7 +1465,6 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
badvaddr);
++vcpu->stat.tlbmiss_st_exits;
- trace_kvm_exit(vcpu, TLBMISS_ST_EXITS);
ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
break;
@@ -1319,61 +1473,51 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
cause, opc, badvaddr);
++vcpu->stat.tlbmiss_ld_exits;
- trace_kvm_exit(vcpu, TLBMISS_LD_EXITS);
ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
break;
case EXCCODE_ADES:
++vcpu->stat.addrerr_st_exits;
- trace_kvm_exit(vcpu, ADDRERR_ST_EXITS);
ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
break;
case EXCCODE_ADEL:
++vcpu->stat.addrerr_ld_exits;
- trace_kvm_exit(vcpu, ADDRERR_LD_EXITS);
ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
break;
case EXCCODE_SYS:
++vcpu->stat.syscall_exits;
- trace_kvm_exit(vcpu, SYSCALL_EXITS);
ret = kvm_mips_callbacks->handle_syscall(vcpu);
break;
case EXCCODE_RI:
++vcpu->stat.resvd_inst_exits;
- trace_kvm_exit(vcpu, RESVD_INST_EXITS);
ret = kvm_mips_callbacks->handle_res_inst(vcpu);
break;
case EXCCODE_BP:
++vcpu->stat.break_inst_exits;
- trace_kvm_exit(vcpu, BREAK_INST_EXITS);
ret = kvm_mips_callbacks->handle_break(vcpu);
break;
case EXCCODE_TR:
++vcpu->stat.trap_inst_exits;
- trace_kvm_exit(vcpu, TRAP_INST_EXITS);
ret = kvm_mips_callbacks->handle_trap(vcpu);
break;
case EXCCODE_MSAFPE:
++vcpu->stat.msa_fpe_exits;
- trace_kvm_exit(vcpu, MSA_FPE_EXITS);
ret = kvm_mips_callbacks->handle_msa_fpe(vcpu);
break;
case EXCCODE_FPE:
++vcpu->stat.fpe_exits;
- trace_kvm_exit(vcpu, FPE_EXITS);
ret = kvm_mips_callbacks->handle_fpe(vcpu);
break;
case EXCCODE_MSADIS:
++vcpu->stat.msa_disabled_exits;
- trace_kvm_exit(vcpu, MSA_DISABLED_EXITS);
ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
break;
@@ -1400,11 +1544,13 @@ skip_emul:
run->exit_reason = KVM_EXIT_INTR;
ret = (-EINTR << 2) | RESUME_HOST;
++vcpu->stat.signal_exits;
- trace_kvm_exit(vcpu, SIGNAL_EXITS);
+ trace_kvm_exit(vcpu, KVM_TRACE_EXIT_SIGNAL);
}
}
if (ret == RESUME_GUEST) {
+ trace_kvm_reenter(vcpu);
+
/*
* If FPU / MSA are enabled (i.e. the guest's FPU / MSA context
* is live), restore FCR31 / MSACSR.
@@ -1450,7 +1596,7 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu)
* not to clobber the status register directly via the commpage.
*/
if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) &&
- vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
+ vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
kvm_lose_fpu(vcpu);
/*
@@ -1465,9 +1611,12 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu)
enable_fpu_hazard();
/* If guest FPU state not active, restore it now */
- if (!(vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)) {
+ if (!(vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
__kvm_restore_fpu(&vcpu->arch);
- vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU;
+ vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
+ trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
+ } else {
+ trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_FPU);
}
preempt_enable();
@@ -1494,8 +1643,8 @@ void kvm_own_msa(struct kvm_vcpu *vcpu)
* interacts with MSA state, so play it safe and save it first.
*/
if (!(sr & ST0_FR) &&
- (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU |
- KVM_MIPS_FPU_MSA)) == KVM_MIPS_FPU_FPU)
+ (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU |
+ KVM_MIPS_AUX_MSA)) == KVM_MIPS_AUX_FPU)
kvm_lose_fpu(vcpu);
change_c0_status(ST0_CU1 | ST0_FR, sr);
@@ -1509,22 +1658,26 @@ void kvm_own_msa(struct kvm_vcpu *vcpu)
set_c0_config5(MIPS_CONF5_MSAEN);
enable_fpu_hazard();
- switch (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA)) {
- case KVM_MIPS_FPU_FPU:
+ switch (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA)) {
+ case KVM_MIPS_AUX_FPU:
/*
* Guest FPU state already loaded, only restore upper MSA state
*/
__kvm_restore_msa_upper(&vcpu->arch);
- vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA;
+ vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
+ trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_MSA);
break;
case 0:
/* Neither FPU or MSA already active, restore full MSA state */
__kvm_restore_msa(&vcpu->arch);
- vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA;
+ vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
if (kvm_mips_guest_has_fpu(&vcpu->arch))
- vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU;
+ vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
+ trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE,
+ KVM_TRACE_AUX_FPU_MSA);
break;
default:
+ trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_MSA);
break;
}
@@ -1536,13 +1689,15 @@ void kvm_own_msa(struct kvm_vcpu *vcpu)
void kvm_drop_fpu(struct kvm_vcpu *vcpu)
{
preempt_disable();
- if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) {
+ if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
disable_msa();
- vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_MSA;
+ trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_MSA);
+ vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_MSA;
}
- if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) {
+ if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
clear_c0_status(ST0_CU1 | ST0_FR);
- vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU;
+ trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_FPU);
+ vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
}
preempt_enable();
}
@@ -1558,25 +1713,27 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu)
*/
preempt_disable();
- if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) {
+ if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
set_c0_config5(MIPS_CONF5_MSAEN);
enable_fpu_hazard();
__kvm_save_msa(&vcpu->arch);
+ trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA);
/* Disable MSA & FPU */
disable_msa();
- if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) {
+ if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
clear_c0_status(ST0_CU1 | ST0_FR);
disable_fpu_hazard();
}
- vcpu->arch.fpu_inuse &= ~(KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA);
- } else if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) {
+ vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA);
+ } else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
set_c0_status(ST0_CU1);
enable_fpu_hazard();
__kvm_save_fpu(&vcpu->arch);
- vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU;
+ vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
+ trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
/* Disable FPU */
clear_c0_status(ST0_CU1 | ST0_FR);
@@ -1638,6 +1795,10 @@ static int __init kvm_mips_init(void)
{
int ret;
+ ret = kvm_mips_entry_setup();
+ if (ret)
+ return ret;
+
ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
if (ret)
@@ -1645,18 +1806,6 @@ static int __init kvm_mips_init(void)
register_die_notifier(&kvm_mips_csr_die_notifier);
- /*
- * On MIPS, kernel modules are executed from "mapped space", which
- * requires TLBs. The TLB handling code is statically linked with
- * the rest of the kernel (tlb.c) to avoid the possibility of
- * double faulting. The issue is that the TLB code references
- * routines that are part of the the KVM module, which are only
- * available once the module is loaded.
- */
- kvm_mips_gfn_to_pfn = gfn_to_pfn;
- kvm_mips_release_pfn_clean = kvm_release_pfn_clean;
- kvm_mips_is_error_pfn = is_error_pfn;
-
return 0;
}
@@ -1664,10 +1813,6 @@ static void __exit kvm_mips_exit(void)
{
kvm_exit();
- kvm_mips_gfn_to_pfn = NULL;
- kvm_mips_release_pfn_clean = NULL;
- kvm_mips_is_error_pfn = NULL;
-
unregister_die_notifier(&kvm_mips_csr_die_notifier);
}
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
new file mode 100644
index 000000000..121008c0f
--- /dev/null
+++ b/arch/mips/kvm/mmu.c
@@ -0,0 +1,395 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS MMU handling in the KVM module.
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#include <linux/highmem.h>
+#include <linux/kvm_host.h>
+#include <asm/mmu_context.h>
+
+static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
+{
+ int cpu = smp_processor_id();
+
+ return vcpu->arch.guest_kernel_asid[cpu] &
+ cpu_asid_mask(&cpu_data[cpu]);
+}
+
+static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
+{
+ int cpu = smp_processor_id();
+
+ return vcpu->arch.guest_user_asid[cpu] &
+ cpu_asid_mask(&cpu_data[cpu]);
+}
+
+static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
+{
+ int srcu_idx, err = 0;
+ kvm_pfn_t pfn;
+
+ if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
+ return 0;
+
+ srcu_idx = srcu_read_lock(&kvm->srcu);
+ pfn = gfn_to_pfn(kvm, gfn);
+
+ if (is_error_noslot_pfn(pfn)) {
+ kvm_err("Couldn't get pfn for gfn %#llx!\n", gfn);
+ err = -EFAULT;
+ goto out;
+ }
+
+ kvm->arch.guest_pmap[gfn] = pfn;
+out:
+ srcu_read_unlock(&kvm->srcu, srcu_idx);
+ return err;
+}
+
+/* Translate guest KSEG0 addresses to Host PA */
+unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
+ unsigned long gva)
+{
+ gfn_t gfn;
+ unsigned long offset = gva & ~PAGE_MASK;
+ struct kvm *kvm = vcpu->kvm;
+
+ if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
+ kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
+ __builtin_return_address(0), gva);
+ return KVM_INVALID_PAGE;
+ }
+
+ gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
+
+ if (gfn >= kvm->arch.guest_pmap_npages) {
+ kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
+ gva);
+ return KVM_INVALID_PAGE;
+ }
+
+ if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
+ return KVM_INVALID_ADDR;
+
+ return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
+}
+
+/* XXXKYMA: Must be called with interrupts disabled */
+int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
+ struct kvm_vcpu *vcpu)
+{
+ gfn_t gfn;
+ kvm_pfn_t pfn0, pfn1;
+ unsigned long vaddr = 0;
+ unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
+ struct kvm *kvm = vcpu->kvm;
+ const int flush_dcache_mask = 0;
+ int ret;
+
+ if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
+ kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
+ kvm_mips_dump_host_tlbs();
+ return -1;
+ }
+
+ gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
+ if ((gfn | 1) >= kvm->arch.guest_pmap_npages) {
+ kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
+ gfn, badvaddr);
+ kvm_mips_dump_host_tlbs();
+ return -1;
+ }
+ vaddr = badvaddr & (PAGE_MASK << 1);
+
+ if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
+ return -1;
+
+ if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
+ return -1;
+
+ pfn0 = kvm->arch.guest_pmap[gfn & ~0x1];
+ pfn1 = kvm->arch.guest_pmap[gfn | 0x1];
+
+ entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) |
+ ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
+ ENTRYLO_D | ENTRYLO_V;
+ entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) |
+ ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
+ ENTRYLO_D | ENTRYLO_V;
+
+ preempt_disable();
+ entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
+ ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
+ flush_dcache_mask);
+ preempt_enable();
+
+ return ret;
+}
+
+int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
+ struct kvm_mips_tlb *tlb)
+{
+ unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
+ struct kvm *kvm = vcpu->kvm;
+ kvm_pfn_t pfn0, pfn1;
+ gfn_t gfn0, gfn1;
+ long tlb_lo[2];
+ int ret;
+
+ tlb_lo[0] = tlb->tlb_lo[0];
+ tlb_lo[1] = tlb->tlb_lo[1];
+
+ /*
+ * The commpage address must not be mapped to anything else if the guest
+ * TLB contains entries nearby, or commpage accesses will break.
+ */
+ if (!((tlb->tlb_hi ^ KVM_GUEST_COMMPAGE_ADDR) &
+ VPN2_MASK & (PAGE_MASK << 1)))
+ tlb_lo[(KVM_GUEST_COMMPAGE_ADDR >> PAGE_SHIFT) & 1] = 0;
+
+ gfn0 = mips3_tlbpfn_to_paddr(tlb_lo[0]) >> PAGE_SHIFT;
+ gfn1 = mips3_tlbpfn_to_paddr(tlb_lo[1]) >> PAGE_SHIFT;
+ if (gfn0 >= kvm->arch.guest_pmap_npages ||
+ gfn1 >= kvm->arch.guest_pmap_npages) {
+ kvm_err("%s: Invalid gfn: [%#llx, %#llx], EHi: %#lx\n",
+ __func__, gfn0, gfn1, tlb->tlb_hi);
+ kvm_mips_dump_guest_tlbs(vcpu);
+ return -1;
+ }
+
+ if (kvm_mips_map_page(kvm, gfn0) < 0)
+ return -1;
+
+ if (kvm_mips_map_page(kvm, gfn1) < 0)
+ return -1;
+
+ pfn0 = kvm->arch.guest_pmap[gfn0];
+ pfn1 = kvm->arch.guest_pmap[gfn1];
+
+ /* Get attributes from the Guest TLB */
+ entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) |
+ ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
+ (tlb_lo[0] & ENTRYLO_D) |
+ (tlb_lo[0] & ENTRYLO_V);
+ entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) |
+ ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
+ (tlb_lo[1] & ENTRYLO_D) |
+ (tlb_lo[1] & ENTRYLO_V);
+
+ kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
+ tlb->tlb_lo[0], tlb->tlb_lo[1]);
+
+ preempt_disable();
+ entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
+ kvm_mips_get_kernel_asid(vcpu) :
+ kvm_mips_get_user_asid(vcpu));
+ ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
+ tlb->tlb_mask);
+ preempt_enable();
+
+ return ret;
+}
+
+void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
+ struct kvm_vcpu *vcpu)
+{
+ unsigned long asid = asid_cache(cpu);
+
+ asid += cpu_asid_inc();
+ if (!(asid & cpu_asid_mask(&cpu_data[cpu]))) {
+ if (cpu_has_vtag_icache)
+ flush_icache_all();
+
+ kvm_local_flush_tlb_all(); /* start new asid cycle */
+
+ if (!asid) /* fix version if needed */
+ asid = asid_first_version(cpu);
+ }
+
+ cpu_context(cpu, mm) = asid_cache(cpu) = asid;
+}
+
+/**
+ * kvm_mips_migrate_count() - Migrate timer.
+ * @vcpu: Virtual CPU.
+ *
+ * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
+ * if it was running prior to being cancelled.
+ *
+ * Must be called when the VCPU is migrated to a different CPU to ensure that
+ * timer expiry during guest execution interrupts the guest and causes the
+ * interrupt to be delivered in a timely manner.
+ */
+static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
+{
+ if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
+ hrtimer_restart(&vcpu->arch.comparecount_timer);
+}
+
+/* Restore ASID once we are scheduled back after preemption */
+void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+ unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
+ unsigned long flags;
+ int newasid = 0;
+
+ kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
+
+ /* Allocate new kernel and user ASIDs if needed */
+
+ local_irq_save(flags);
+
+ if ((vcpu->arch.guest_kernel_asid[cpu] ^ asid_cache(cpu)) &
+ asid_version_mask(cpu)) {
+ kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
+ vcpu->arch.guest_kernel_asid[cpu] =
+ vcpu->arch.guest_kernel_mm.context.asid[cpu];
+ kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
+ vcpu->arch.guest_user_asid[cpu] =
+ vcpu->arch.guest_user_mm.context.asid[cpu];
+ newasid++;
+
+ kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
+ cpu_context(cpu, current->mm));
+ kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
+ cpu, vcpu->arch.guest_kernel_asid[cpu]);
+ kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
+ vcpu->arch.guest_user_asid[cpu]);
+ }
+
+ if (vcpu->arch.last_sched_cpu != cpu) {
+ kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
+ vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
+ /*
+ * Migrate the timer interrupt to the current CPU so that it
+ * always interrupts the guest and synchronously triggers a
+ * guest timer interrupt.
+ */
+ kvm_mips_migrate_count(vcpu);
+ }
+
+ if (!newasid) {
+ /*
+ * If we preempted while the guest was executing, then reload
+ * the pre-empted ASID
+ */
+ if (current->flags & PF_VCPU) {
+ write_c0_entryhi(vcpu->arch.
+ preempt_entryhi & asid_mask);
+ ehb();
+ }
+ } else {
+ /* New ASIDs were allocated for the VM */
+
+ /*
+ * Were we in guest context? If so then the pre-empted ASID is
+ * no longer valid, we need to set it to what it should be based
+ * on the mode of the Guest (Kernel/User)
+ */
+ if (current->flags & PF_VCPU) {
+ if (KVM_GUEST_KERNEL_MODE(vcpu))
+ write_c0_entryhi(vcpu->arch.
+ guest_kernel_asid[cpu] &
+ asid_mask);
+ else
+ write_c0_entryhi(vcpu->arch.
+ guest_user_asid[cpu] &
+ asid_mask);
+ ehb();
+ }
+ }
+
+ /* restore guest state to registers */
+ kvm_mips_callbacks->vcpu_set_regs(vcpu);
+
+ local_irq_restore(flags);
+
+}
+
+/* ASID can change if another task is scheduled during preemption */
+void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
+{
+ unsigned long flags;
+ int cpu;
+
+ local_irq_save(flags);
+
+ cpu = smp_processor_id();
+
+ vcpu->arch.preempt_entryhi = read_c0_entryhi();
+ vcpu->arch.last_sched_cpu = cpu;
+
+ /* save guest state in registers */
+ kvm_mips_callbacks->vcpu_get_regs(vcpu);
+
+ if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
+ asid_version_mask(cpu))) {
+ kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
+ cpu_context(cpu, current->mm));
+ drop_mmu_context(current->mm, cpu);
+ }
+ write_c0_entryhi(cpu_asid(cpu, current->mm));
+ ehb();
+
+ local_irq_restore(flags);
+}
+
+u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ unsigned long paddr, flags, vpn2, asid;
+ unsigned long va = (unsigned long)opc;
+ void *vaddr;
+ u32 inst;
+ int index;
+
+ if (KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0 ||
+ KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
+ local_irq_save(flags);
+ index = kvm_mips_host_tlb_lookup(vcpu, va);
+ if (index >= 0) {
+ inst = *(opc);
+ } else {
+ vpn2 = va & VPN2_MASK;
+ asid = kvm_read_c0_guest_entryhi(cop0) &
+ KVM_ENTRYHI_ASID;
+ index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid);
+ if (index < 0) {
+ kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
+ __func__, opc, vcpu, read_c0_entryhi());
+ kvm_mips_dump_host_tlbs();
+ kvm_mips_dump_guest_tlbs(vcpu);
+ local_irq_restore(flags);
+ return KVM_INVALID_INST;
+ }
+ if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
+ &vcpu->arch.guest_tlb[index])) {
+ kvm_err("%s: handling mapped seg tlb fault failed for %p, index: %u, vcpu: %p, ASID: %#lx\n",
+ __func__, opc, index, vcpu,
+ read_c0_entryhi());
+ kvm_mips_dump_guest_tlbs(vcpu);
+ local_irq_restore(flags);
+ return KVM_INVALID_INST;
+ }
+ inst = *(opc);
+ }
+ local_irq_restore(flags);
+ } else if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
+ paddr = kvm_mips_translate_guest_kseg0_to_hpa(vcpu, va);
+ vaddr = kmap_atomic(pfn_to_page(PHYS_PFN(paddr)));
+ vaddr += paddr & ~PAGE_MASK;
+ inst = *(u32 *)vaddr;
+ kunmap_atomic(vaddr);
+ } else {
+ kvm_err("%s: illegal address: %p\n", __func__, opc);
+ return KVM_INVALID_INST;
+ }
+
+ return inst;
+}
diff --git a/arch/mips/kvm/stats.c b/arch/mips/kvm/stats.c
index 888bb6707..53f851a61 100644
--- a/arch/mips/kvm/stats.c
+++ b/arch/mips/kvm/stats.c
@@ -11,27 +11,6 @@
#include <linux/kvm_host.h>
-char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES] = {
- "WAIT",
- "CACHE",
- "Signal",
- "Interrupt",
- "COP0/1 Unusable",
- "TLB Mod",
- "TLB Miss (LD)",
- "TLB Miss (ST)",
- "Address Err (ST)",
- "Address Error (LD)",
- "System Call",
- "Reserved Inst",
- "Break Inst",
- "Trap Inst",
- "MSA FPE",
- "FPE",
- "MSA Disabled",
- "D-Cache Flushes",
-};
-
char *kvm_cop0_str[N_MIPS_COPROC_REGS] = {
"Index",
"Random",
diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c
index 38bfbc906..254377d8e 100644
--- a/arch/mips/kvm/tlb.c
+++ b/arch/mips/kvm/tlb.c
@@ -14,7 +14,7 @@
#include <linux/smp.h>
#include <linux/mm.h>
#include <linux/delay.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/kvm_host.h>
#include <linux/srcu.h>
@@ -24,6 +24,7 @@
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/tlb.h>
+#include <asm/tlbdebug.h>
#undef CONFIG_MIPS_MT
#include <asm/r4kcache.h>
@@ -32,22 +33,10 @@
#define KVM_GUEST_PC_TLB 0
#define KVM_GUEST_SP_TLB 1
-#define PRIx64 "llx"
-
atomic_t kvm_mips_instance;
EXPORT_SYMBOL_GPL(kvm_mips_instance);
-/* These function pointers are initialized once the KVM module is loaded */
-kvm_pfn_t (*kvm_mips_gfn_to_pfn)(struct kvm *kvm, gfn_t gfn);
-EXPORT_SYMBOL_GPL(kvm_mips_gfn_to_pfn);
-
-void (*kvm_mips_release_pfn_clean)(kvm_pfn_t pfn);
-EXPORT_SYMBOL_GPL(kvm_mips_release_pfn_clean);
-
-bool (*kvm_mips_is_error_pfn)(kvm_pfn_t pfn);
-EXPORT_SYMBOL_GPL(kvm_mips_is_error_pfn);
-
-uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
+static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
{
int cpu = smp_processor_id();
@@ -55,7 +44,7 @@ uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
cpu_asid_mask(&cpu_data[cpu]);
}
-uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
+static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
{
int cpu = smp_processor_id();
@@ -63,7 +52,7 @@ uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
cpu_asid_mask(&cpu_data[cpu]);
}
-inline uint32_t kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu)
+inline u32 kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu)
{
return vcpu->kvm->arch.commpage_tlb;
}
@@ -72,50 +61,15 @@ inline uint32_t kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu)
void kvm_mips_dump_host_tlbs(void)
{
- unsigned long old_entryhi;
- unsigned long old_pagemask;
- struct kvm_mips_tlb tlb;
unsigned long flags;
- int i;
local_irq_save(flags);
- old_entryhi = read_c0_entryhi();
- old_pagemask = read_c0_pagemask();
-
kvm_info("HOST TLBs:\n");
- kvm_info("ASID: %#lx\n", read_c0_entryhi() &
- cpu_asid_mask(&current_cpu_data));
-
- for (i = 0; i < current_cpu_data.tlbsize; i++) {
- write_c0_index(i);
- mtc0_tlbw_hazard();
-
- tlb_read();
- tlbw_use_hazard();
+ dump_tlb_regs();
+ pr_info("\n");
+ dump_tlb_all();
- tlb.tlb_hi = read_c0_entryhi();
- tlb.tlb_lo0 = read_c0_entrylo0();
- tlb.tlb_lo1 = read_c0_entrylo1();
- tlb.tlb_mask = read_c0_pagemask();
-
- kvm_info("TLB%c%3d Hi 0x%08lx ",
- (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
- i, tlb.tlb_hi);
- kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
- (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
- (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
- (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
- (tlb.tlb_lo0 >> 3) & 7);
- kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
- (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
- (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
- (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
- (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
- }
- write_c0_entryhi(old_entryhi);
- write_c0_pagemask(old_pagemask);
- mtc0_tlbw_hazard();
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(kvm_mips_dump_host_tlbs);
@@ -132,74 +86,24 @@ void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
tlb = vcpu->arch.guest_tlb[i];
kvm_info("TLB%c%3d Hi 0x%08lx ",
- (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
+ (tlb.tlb_lo[0] | tlb.tlb_lo[1]) & ENTRYLO_V
+ ? ' ' : '*',
i, tlb.tlb_hi);
- kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
- (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
- (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
- (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
- (tlb.tlb_lo0 >> 3) & 7);
- kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
- (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
- (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
- (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
- (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
+ kvm_info("Lo0=0x%09llx %c%c attr %lx ",
+ (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[0]),
+ (tlb.tlb_lo[0] & ENTRYLO_D) ? 'D' : ' ',
+ (tlb.tlb_lo[0] & ENTRYLO_G) ? 'G' : ' ',
+ (tlb.tlb_lo[0] & ENTRYLO_C) >> ENTRYLO_C_SHIFT);
+ kvm_info("Lo1=0x%09llx %c%c attr %lx sz=%lx\n",
+ (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[1]),
+ (tlb.tlb_lo[1] & ENTRYLO_D) ? 'D' : ' ',
+ (tlb.tlb_lo[1] & ENTRYLO_G) ? 'G' : ' ',
+ (tlb.tlb_lo[1] & ENTRYLO_C) >> ENTRYLO_C_SHIFT,
+ tlb.tlb_mask);
}
}
EXPORT_SYMBOL_GPL(kvm_mips_dump_guest_tlbs);
-static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
-{
- int srcu_idx, err = 0;
- kvm_pfn_t pfn;
-
- if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
- return 0;
-
- srcu_idx = srcu_read_lock(&kvm->srcu);
- pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
-
- if (is_error_noslot_pfn(pfn)) {
- kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
- err = -EFAULT;
- goto out;
- }
-
- kvm->arch.guest_pmap[gfn] = pfn;
-out:
- srcu_read_unlock(&kvm->srcu, srcu_idx);
- return err;
-}
-
-/* Translate guest KSEG0 addresses to Host PA */
-unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
- unsigned long gva)
-{
- gfn_t gfn;
- uint32_t offset = gva & ~PAGE_MASK;
- struct kvm *kvm = vcpu->kvm;
-
- if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
- kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
- __builtin_return_address(0), gva);
- return KVM_INVALID_PAGE;
- }
-
- gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
-
- if (gfn >= kvm->arch.guest_pmap_npages) {
- kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
- gva);
- return KVM_INVALID_PAGE;
- }
-
- if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
- return KVM_INVALID_ADDR;
-
- return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
-}
-EXPORT_SYMBOL_GPL(kvm_mips_translate_guest_kseg0_to_hpa);
-
/* XXXKYMA: Must be called with interrupts disabled */
/* set flush_dcache_mask == 0 if no dcache flush required */
int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
@@ -243,12 +147,12 @@ int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
/* Flush D-cache */
if (flush_dcache_mask) {
- if (entrylo0 & MIPS3_PG_V) {
+ if (entrylo0 & ENTRYLO_V) {
++vcpu->stat.flush_dcache_exits;
flush_data_cache_page((entryhi & VPN2_MASK) &
~flush_dcache_mask);
}
- if (entrylo1 & MIPS3_PG_V) {
+ if (entrylo1 & ENTRYLO_V) {
++vcpu->stat.flush_dcache_exits;
flush_data_cache_page(((entryhi & VPN2_MASK) &
~flush_dcache_mask) |
@@ -259,96 +163,35 @@ int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
/* Restore old ASID */
write_c0_entryhi(old_entryhi);
mtc0_tlbw_hazard();
- tlbw_use_hazard();
local_irq_restore(flags);
return 0;
}
-
-/* XXXKYMA: Must be called with interrupts disabled */
-int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
- struct kvm_vcpu *vcpu)
-{
- gfn_t gfn;
- kvm_pfn_t pfn0, pfn1;
- unsigned long vaddr = 0;
- unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
- int even;
- struct kvm *kvm = vcpu->kvm;
- const int flush_dcache_mask = 0;
- int ret;
-
- if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
- kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
- kvm_mips_dump_host_tlbs();
- return -1;
- }
-
- gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
- if ((gfn | 1) >= kvm->arch.guest_pmap_npages) {
- kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
- gfn, badvaddr);
- kvm_mips_dump_host_tlbs();
- return -1;
- }
- even = !(gfn & 0x1);
- vaddr = badvaddr & (PAGE_MASK << 1);
-
- if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
- return -1;
-
- if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
- return -1;
-
- if (even) {
- pfn0 = kvm->arch.guest_pmap[gfn];
- pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1];
- } else {
- pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1];
- pfn1 = kvm->arch.guest_pmap[gfn];
- }
-
- entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
- (1 << 2) | (0x1 << 1);
- entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
- (1 << 2) | (0x1 << 1);
-
- preempt_disable();
- entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
- ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
- flush_dcache_mask);
- preempt_enable();
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(kvm_mips_handle_kseg0_tlb_fault);
+EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_write);
int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
struct kvm_vcpu *vcpu)
{
- kvm_pfn_t pfn0, pfn1;
+ kvm_pfn_t pfn;
unsigned long flags, old_entryhi = 0, vaddr = 0;
- unsigned long entrylo0 = 0, entrylo1 = 0;
+ unsigned long entrylo[2] = { 0, 0 };
+ unsigned int pair_idx;
- pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
- pfn1 = 0;
- entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
- (1 << 2) | (0x1 << 1);
- entrylo1 = 0;
+ pfn = PFN_DOWN(virt_to_phys(vcpu->arch.kseg0_commpage));
+ pair_idx = (badvaddr >> PAGE_SHIFT) & 1;
+ entrylo[pair_idx] = mips3_paddr_to_tlbpfn(pfn << PAGE_SHIFT) |
+ ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
+ ENTRYLO_D | ENTRYLO_V;
local_irq_save(flags);
old_entryhi = read_c0_entryhi();
vaddr = badvaddr & (PAGE_MASK << 1);
write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu));
- mtc0_tlbw_hazard();
- write_c0_entrylo0(entrylo0);
- mtc0_tlbw_hazard();
- write_c0_entrylo1(entrylo1);
- mtc0_tlbw_hazard();
+ write_c0_entrylo0(entrylo[0]);
+ write_c0_entrylo1(entrylo[1]);
write_c0_index(kvm_mips_get_commpage_asid(vcpu));
mtc0_tlbw_hazard();
tlb_write_indexed();
- mtc0_tlbw_hazard();
tlbw_use_hazard();
kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
@@ -358,82 +201,12 @@ int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
/* Restore old ASID */
write_c0_entryhi(old_entryhi);
mtc0_tlbw_hazard();
- tlbw_use_hazard();
local_irq_restore(flags);
return 0;
}
EXPORT_SYMBOL_GPL(kvm_mips_handle_commpage_tlb_fault);
-int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
- struct kvm_mips_tlb *tlb,
- unsigned long *hpa0,
- unsigned long *hpa1)
-{
- unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
- struct kvm *kvm = vcpu->kvm;
- kvm_pfn_t pfn0, pfn1;
- gfn_t gfn0, gfn1;
- long tlb_lo[2];
- int ret;
-
- tlb_lo[0] = tlb->tlb_lo0;
- tlb_lo[1] = tlb->tlb_lo1;
-
- /*
- * The commpage address must not be mapped to anything else if the guest
- * TLB contains entries nearby, or commpage accesses will break.
- */
- if (!((tlb->tlb_hi ^ KVM_GUEST_COMMPAGE_ADDR) &
- VPN2_MASK & (PAGE_MASK << 1)))
- tlb_lo[(KVM_GUEST_COMMPAGE_ADDR >> PAGE_SHIFT) & 1] = 0;
-
- gfn0 = mips3_tlbpfn_to_paddr(tlb_lo[0]) >> PAGE_SHIFT;
- gfn1 = mips3_tlbpfn_to_paddr(tlb_lo[1]) >> PAGE_SHIFT;
- if (gfn0 >= kvm->arch.guest_pmap_npages ||
- gfn1 >= kvm->arch.guest_pmap_npages) {
- kvm_err("%s: Invalid gfn: [%#llx, %#llx], EHi: %#lx\n",
- __func__, gfn0, gfn1, tlb->tlb_hi);
- kvm_mips_dump_guest_tlbs(vcpu);
- return -1;
- }
-
- if (kvm_mips_map_page(kvm, gfn0) < 0)
- return -1;
-
- if (kvm_mips_map_page(kvm, gfn1) < 0)
- return -1;
-
- pfn0 = kvm->arch.guest_pmap[gfn0];
- pfn1 = kvm->arch.guest_pmap[gfn1];
-
- if (hpa0)
- *hpa0 = pfn0 << PAGE_SHIFT;
-
- if (hpa1)
- *hpa1 = pfn1 << PAGE_SHIFT;
-
- /* Get attributes from the Guest TLB */
- entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
- (tlb_lo[0] & MIPS3_PG_D) | (tlb_lo[0] & MIPS3_PG_V);
- entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
- (tlb_lo[1] & MIPS3_PG_D) | (tlb_lo[1] & MIPS3_PG_V);
-
- kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
- tlb->tlb_lo0, tlb->tlb_lo1);
-
- preempt_disable();
- entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
- kvm_mips_get_kernel_asid(vcpu) :
- kvm_mips_get_user_asid(vcpu));
- ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
- tlb->tlb_mask);
- preempt_enable();
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(kvm_mips_handle_mapped_seg_tlb_fault);
-
int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
{
int i;
@@ -449,7 +222,7 @@ int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
}
kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
- __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1);
+ __func__, entryhi, index, tlb[i].tlb_lo[0], tlb[i].tlb_lo[1]);
return index;
}
@@ -481,7 +254,6 @@ int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
/* Restore old ASID */
write_c0_entryhi(old_entryhi);
mtc0_tlbw_hazard();
- tlbw_use_hazard();
local_irq_restore(flags);
@@ -512,21 +284,16 @@ int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
if (idx > 0) {
write_c0_entryhi(UNIQUE_ENTRYHI(idx));
- mtc0_tlbw_hazard();
-
write_c0_entrylo0(0);
- mtc0_tlbw_hazard();
-
write_c0_entrylo1(0);
mtc0_tlbw_hazard();
tlb_write_indexed();
- mtc0_tlbw_hazard();
+ tlbw_use_hazard();
}
write_c0_entryhi(old_entryhi);
mtc0_tlbw_hazard();
- tlbw_use_hazard();
local_irq_restore(flags);
@@ -554,61 +321,39 @@ void kvm_mips_flush_host_tlb(int skip_kseg0)
/* Blast 'em all away. */
for (entry = 0; entry < maxentry; entry++) {
write_c0_index(entry);
- mtc0_tlbw_hazard();
if (skip_kseg0) {
+ mtc0_tlbr_hazard();
tlb_read();
- tlbw_use_hazard();
+ tlb_read_hazard();
entryhi = read_c0_entryhi();
/* Don't blow away guest kernel entries */
if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0)
continue;
+
+ write_c0_pagemask(old_pagemask);
}
/* Make sure all entries differ. */
write_c0_entryhi(UNIQUE_ENTRYHI(entry));
- mtc0_tlbw_hazard();
write_c0_entrylo0(0);
- mtc0_tlbw_hazard();
write_c0_entrylo1(0);
mtc0_tlbw_hazard();
tlb_write_indexed();
- mtc0_tlbw_hazard();
+ tlbw_use_hazard();
}
- tlbw_use_hazard();
-
write_c0_entryhi(old_entryhi);
write_c0_pagemask(old_pagemask);
mtc0_tlbw_hazard();
- tlbw_use_hazard();
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(kvm_mips_flush_host_tlb);
-void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
- struct kvm_vcpu *vcpu)
-{
- unsigned long asid = asid_cache(cpu);
-
- asid += cpu_asid_inc();
- if (!(asid & cpu_asid_mask(&cpu_data[cpu]))) {
- if (cpu_has_vtag_icache)
- flush_icache_all();
-
- kvm_local_flush_tlb_all(); /* start new asid cycle */
-
- if (!asid) /* fix version if needed */
- asid = asid_first_version(cpu);
- }
-
- cpu_context(cpu, mm) = asid_cache(cpu) = asid;
-}
-
void kvm_local_flush_tlb_all(void)
{
unsigned long flags;
@@ -628,191 +373,12 @@ void kvm_local_flush_tlb_all(void)
write_c0_index(entry);
mtc0_tlbw_hazard();
tlb_write_indexed();
+ tlbw_use_hazard();
entry++;
}
- tlbw_use_hazard();
write_c0_entryhi(old_ctx);
mtc0_tlbw_hazard();
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(kvm_local_flush_tlb_all);
-
-/**
- * kvm_mips_migrate_count() - Migrate timer.
- * @vcpu: Virtual CPU.
- *
- * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
- * if it was running prior to being cancelled.
- *
- * Must be called when the VCPU is migrated to a different CPU to ensure that
- * timer expiry during guest execution interrupts the guest and causes the
- * interrupt to be delivered in a timely manner.
- */
-static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
-{
- if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
- hrtimer_restart(&vcpu->arch.comparecount_timer);
-}
-
-/* Restore ASID once we are scheduled back after preemption */
-void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
-{
- unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
- unsigned long flags;
- int newasid = 0;
-
- kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
-
- /* Allocate new kernel and user ASIDs if needed */
-
- local_irq_save(flags);
-
- if ((vcpu->arch.guest_kernel_asid[cpu] ^ asid_cache(cpu)) &
- asid_version_mask(cpu)) {
- kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
- vcpu->arch.guest_kernel_asid[cpu] =
- vcpu->arch.guest_kernel_mm.context.asid[cpu];
- kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
- vcpu->arch.guest_user_asid[cpu] =
- vcpu->arch.guest_user_mm.context.asid[cpu];
- newasid++;
-
- kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
- cpu_context(cpu, current->mm));
- kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
- cpu, vcpu->arch.guest_kernel_asid[cpu]);
- kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
- vcpu->arch.guest_user_asid[cpu]);
- }
-
- if (vcpu->arch.last_sched_cpu != cpu) {
- kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
- vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
- /*
- * Migrate the timer interrupt to the current CPU so that it
- * always interrupts the guest and synchronously triggers a
- * guest timer interrupt.
- */
- kvm_mips_migrate_count(vcpu);
- }
-
- if (!newasid) {
- /*
- * If we preempted while the guest was executing, then reload
- * the pre-empted ASID
- */
- if (current->flags & PF_VCPU) {
- write_c0_entryhi(vcpu->arch.
- preempt_entryhi & asid_mask);
- ehb();
- }
- } else {
- /* New ASIDs were allocated for the VM */
-
- /*
- * Were we in guest context? If so then the pre-empted ASID is
- * no longer valid, we need to set it to what it should be based
- * on the mode of the Guest (Kernel/User)
- */
- if (current->flags & PF_VCPU) {
- if (KVM_GUEST_KERNEL_MODE(vcpu))
- write_c0_entryhi(vcpu->arch.
- guest_kernel_asid[cpu] &
- asid_mask);
- else
- write_c0_entryhi(vcpu->arch.
- guest_user_asid[cpu] &
- asid_mask);
- ehb();
- }
- }
-
- /* restore guest state to registers */
- kvm_mips_callbacks->vcpu_set_regs(vcpu);
-
- local_irq_restore(flags);
-
-}
-EXPORT_SYMBOL_GPL(kvm_arch_vcpu_load);
-
-/* ASID can change if another task is scheduled during preemption */
-void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
-{
- unsigned long flags;
- uint32_t cpu;
-
- local_irq_save(flags);
-
- cpu = smp_processor_id();
-
- vcpu->arch.preempt_entryhi = read_c0_entryhi();
- vcpu->arch.last_sched_cpu = cpu;
-
- /* save guest state in registers */
- kvm_mips_callbacks->vcpu_get_regs(vcpu);
-
- if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
- asid_version_mask(cpu))) {
- kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
- cpu_context(cpu, current->mm));
- drop_mmu_context(current->mm, cpu);
- }
- write_c0_entryhi(cpu_asid(cpu, current->mm));
- ehb();
-
- local_irq_restore(flags);
-}
-EXPORT_SYMBOL_GPL(kvm_arch_vcpu_put);
-
-uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- unsigned long paddr, flags, vpn2, asid;
- uint32_t inst;
- int index;
-
- if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 ||
- KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
- local_irq_save(flags);
- index = kvm_mips_host_tlb_lookup(vcpu, (unsigned long) opc);
- if (index >= 0) {
- inst = *(opc);
- } else {
- vpn2 = (unsigned long) opc & VPN2_MASK;
- asid = kvm_read_c0_guest_entryhi(cop0) &
- KVM_ENTRYHI_ASID;
- index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid);
- if (index < 0) {
- kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
- __func__, opc, vcpu, read_c0_entryhi());
- kvm_mips_dump_host_tlbs();
- local_irq_restore(flags);
- return KVM_INVALID_INST;
- }
- if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
- &vcpu->arch.guest_tlb[index],
- NULL, NULL)) {
- kvm_err("%s: handling mapped seg tlb fault failed for %p, index: %u, vcpu: %p, ASID: %#lx\n",
- __func__, opc, index, vcpu,
- read_c0_entryhi());
- kvm_mips_dump_guest_tlbs(vcpu);
- local_irq_restore(flags);
- return KVM_INVALID_INST;
- }
- inst = *(opc);
- }
- local_irq_restore(flags);
- } else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
- paddr =
- kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
- (unsigned long) opc);
- inst = *(uint32_t *) CKSEG0ADDR(paddr);
- } else {
- kvm_err("%s: illegal address: %p\n", __func__, opc);
- return KVM_INVALID_INST;
- }
-
- return inst;
-}
-EXPORT_SYMBOL_GPL(kvm_get_inst);
diff --git a/arch/mips/kvm/trace.h b/arch/mips/kvm/trace.h
index bd6437f67..c858cf168 100644
--- a/arch/mips/kvm/trace.h
+++ b/arch/mips/kvm/trace.h
@@ -17,8 +17,75 @@
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE trace
-/* Tracepoints for VM eists */
-extern char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES];
+/*
+ * Tracepoints for VM enters
+ */
+DECLARE_EVENT_CLASS(kvm_transition,
+ TP_PROTO(struct kvm_vcpu *vcpu),
+ TP_ARGS(vcpu),
+ TP_STRUCT__entry(
+ __field(unsigned long, pc)
+ ),
+
+ TP_fast_assign(
+ __entry->pc = vcpu->arch.pc;
+ ),
+
+ TP_printk("PC: 0x%08lx",
+ __entry->pc)
+);
+
+DEFINE_EVENT(kvm_transition, kvm_enter,
+ TP_PROTO(struct kvm_vcpu *vcpu),
+ TP_ARGS(vcpu));
+
+DEFINE_EVENT(kvm_transition, kvm_reenter,
+ TP_PROTO(struct kvm_vcpu *vcpu),
+ TP_ARGS(vcpu));
+
+DEFINE_EVENT(kvm_transition, kvm_out,
+ TP_PROTO(struct kvm_vcpu *vcpu),
+ TP_ARGS(vcpu));
+
+/* The first 32 exit reasons correspond to Cause.ExcCode */
+#define KVM_TRACE_EXIT_INT 0
+#define KVM_TRACE_EXIT_TLBMOD 1
+#define KVM_TRACE_EXIT_TLBMISS_LD 2
+#define KVM_TRACE_EXIT_TLBMISS_ST 3
+#define KVM_TRACE_EXIT_ADDRERR_LD 4
+#define KVM_TRACE_EXIT_ADDRERR_ST 5
+#define KVM_TRACE_EXIT_SYSCALL 8
+#define KVM_TRACE_EXIT_BREAK_INST 9
+#define KVM_TRACE_EXIT_RESVD_INST 10
+#define KVM_TRACE_EXIT_COP_UNUSABLE 11
+#define KVM_TRACE_EXIT_TRAP_INST 13
+#define KVM_TRACE_EXIT_MSA_FPE 14
+#define KVM_TRACE_EXIT_FPE 15
+#define KVM_TRACE_EXIT_MSA_DISABLED 21
+/* Further exit reasons */
+#define KVM_TRACE_EXIT_WAIT 32
+#define KVM_TRACE_EXIT_CACHE 33
+#define KVM_TRACE_EXIT_SIGNAL 34
+
+/* Tracepoints for VM exits */
+#define kvm_trace_symbol_exit_types \
+ { KVM_TRACE_EXIT_INT, "Interrupt" }, \
+ { KVM_TRACE_EXIT_TLBMOD, "TLB Mod" }, \
+ { KVM_TRACE_EXIT_TLBMISS_LD, "TLB Miss (LD)" }, \
+ { KVM_TRACE_EXIT_TLBMISS_ST, "TLB Miss (ST)" }, \
+ { KVM_TRACE_EXIT_ADDRERR_LD, "Address Error (LD)" }, \
+ { KVM_TRACE_EXIT_ADDRERR_ST, "Address Err (ST)" }, \
+ { KVM_TRACE_EXIT_SYSCALL, "System Call" }, \
+ { KVM_TRACE_EXIT_BREAK_INST, "Break Inst" }, \
+ { KVM_TRACE_EXIT_RESVD_INST, "Reserved Inst" }, \
+ { KVM_TRACE_EXIT_COP_UNUSABLE, "COP0/1 Unusable" }, \
+ { KVM_TRACE_EXIT_TRAP_INST, "Trap Inst" }, \
+ { KVM_TRACE_EXIT_MSA_FPE, "MSA FPE" }, \
+ { KVM_TRACE_EXIT_FPE, "FPE" }, \
+ { KVM_TRACE_EXIT_MSA_DISABLED, "MSA Disabled" }, \
+ { KVM_TRACE_EXIT_WAIT, "WAIT" }, \
+ { KVM_TRACE_EXIT_CACHE, "CACHE" }, \
+ { KVM_TRACE_EXIT_SIGNAL, "Signal" }
TRACE_EVENT(kvm_exit,
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
@@ -34,10 +101,173 @@ TRACE_EVENT(kvm_exit,
),
TP_printk("[%s]PC: 0x%08lx",
- kvm_mips_exit_types_str[__entry->reason],
+ __print_symbolic(__entry->reason,
+ kvm_trace_symbol_exit_types),
__entry->pc)
);
+#define KVM_TRACE_MFC0 0
+#define KVM_TRACE_MTC0 1
+#define KVM_TRACE_DMFC0 2
+#define KVM_TRACE_DMTC0 3
+#define KVM_TRACE_RDHWR 4
+
+#define KVM_TRACE_HWR_COP0 0
+#define KVM_TRACE_HWR_HWR 1
+
+#define KVM_TRACE_COP0(REG, SEL) ((KVM_TRACE_HWR_COP0 << 8) | \
+ ((REG) << 3) | (SEL))
+#define KVM_TRACE_HWR(REG, SEL) ((KVM_TRACE_HWR_HWR << 8) | \
+ ((REG) << 3) | (SEL))
+
+#define kvm_trace_symbol_hwr_ops \
+ { KVM_TRACE_MFC0, "MFC0" }, \
+ { KVM_TRACE_MTC0, "MTC0" }, \
+ { KVM_TRACE_DMFC0, "DMFC0" }, \
+ { KVM_TRACE_DMTC0, "DMTC0" }, \
+ { KVM_TRACE_RDHWR, "RDHWR" }
+
+#define kvm_trace_symbol_hwr_cop \
+ { KVM_TRACE_HWR_COP0, "COP0" }, \
+ { KVM_TRACE_HWR_HWR, "HWR" }
+
+#define kvm_trace_symbol_hwr_regs \
+ { KVM_TRACE_COP0( 0, 0), "Index" }, \
+ { KVM_TRACE_COP0( 2, 0), "EntryLo0" }, \
+ { KVM_TRACE_COP0( 3, 0), "EntryLo1" }, \
+ { KVM_TRACE_COP0( 4, 0), "Context" }, \
+ { KVM_TRACE_COP0( 4, 2), "UserLocal" }, \
+ { KVM_TRACE_COP0( 5, 0), "PageMask" }, \
+ { KVM_TRACE_COP0( 6, 0), "Wired" }, \
+ { KVM_TRACE_COP0( 7, 0), "HWREna" }, \
+ { KVM_TRACE_COP0( 8, 0), "BadVAddr" }, \
+ { KVM_TRACE_COP0( 9, 0), "Count" }, \
+ { KVM_TRACE_COP0(10, 0), "EntryHi" }, \
+ { KVM_TRACE_COP0(11, 0), "Compare" }, \
+ { KVM_TRACE_COP0(12, 0), "Status" }, \
+ { KVM_TRACE_COP0(12, 1), "IntCtl" }, \
+ { KVM_TRACE_COP0(12, 2), "SRSCtl" }, \
+ { KVM_TRACE_COP0(13, 0), "Cause" }, \
+ { KVM_TRACE_COP0(14, 0), "EPC" }, \
+ { KVM_TRACE_COP0(15, 0), "PRId" }, \
+ { KVM_TRACE_COP0(15, 1), "EBase" }, \
+ { KVM_TRACE_COP0(16, 0), "Config" }, \
+ { KVM_TRACE_COP0(16, 1), "Config1" }, \
+ { KVM_TRACE_COP0(16, 2), "Config2" }, \
+ { KVM_TRACE_COP0(16, 3), "Config3" }, \
+ { KVM_TRACE_COP0(16, 4), "Config4" }, \
+ { KVM_TRACE_COP0(16, 5), "Config5" }, \
+ { KVM_TRACE_COP0(16, 7), "Config7" }, \
+ { KVM_TRACE_COP0(26, 0), "ECC" }, \
+ { KVM_TRACE_COP0(30, 0), "ErrorEPC" }, \
+ { KVM_TRACE_COP0(31, 2), "KScratch1" }, \
+ { KVM_TRACE_COP0(31, 3), "KScratch2" }, \
+ { KVM_TRACE_COP0(31, 4), "KScratch3" }, \
+ { KVM_TRACE_COP0(31, 5), "KScratch4" }, \
+ { KVM_TRACE_COP0(31, 6), "KScratch5" }, \
+ { KVM_TRACE_COP0(31, 7), "KScratch6" }, \
+ { KVM_TRACE_HWR( 0, 0), "CPUNum" }, \
+ { KVM_TRACE_HWR( 1, 0), "SYNCI_Step" }, \
+ { KVM_TRACE_HWR( 2, 0), "CC" }, \
+ { KVM_TRACE_HWR( 3, 0), "CCRes" }, \
+ { KVM_TRACE_HWR(29, 0), "ULR" }
+
+TRACE_EVENT(kvm_hwr,
+ TP_PROTO(struct kvm_vcpu *vcpu, unsigned int op, unsigned int reg,
+ unsigned long val),
+ TP_ARGS(vcpu, op, reg, val),
+ TP_STRUCT__entry(
+ __field(unsigned long, val)
+ __field(u16, reg)
+ __field(u8, op)
+ ),
+
+ TP_fast_assign(
+ __entry->val = val;
+ __entry->reg = reg;
+ __entry->op = op;
+ ),
+
+ TP_printk("%s %s (%s:%u:%u) 0x%08lx",
+ __print_symbolic(__entry->op,
+ kvm_trace_symbol_hwr_ops),
+ __print_symbolic(__entry->reg,
+ kvm_trace_symbol_hwr_regs),
+ __print_symbolic(__entry->reg >> 8,
+ kvm_trace_symbol_hwr_cop),
+ (__entry->reg >> 3) & 0x1f,
+ __entry->reg & 0x7,
+ __entry->val)
+);
+
+#define KVM_TRACE_AUX_RESTORE 0
+#define KVM_TRACE_AUX_SAVE 1
+#define KVM_TRACE_AUX_ENABLE 2
+#define KVM_TRACE_AUX_DISABLE 3
+#define KVM_TRACE_AUX_DISCARD 4
+
+#define KVM_TRACE_AUX_FPU 1
+#define KVM_TRACE_AUX_MSA 2
+#define KVM_TRACE_AUX_FPU_MSA 3
+
+#define kvm_trace_symbol_aux_op \
+ { KVM_TRACE_AUX_RESTORE, "restore" }, \
+ { KVM_TRACE_AUX_SAVE, "save" }, \
+ { KVM_TRACE_AUX_ENABLE, "enable" }, \
+ { KVM_TRACE_AUX_DISABLE, "disable" }, \
+ { KVM_TRACE_AUX_DISCARD, "discard" }
+
+#define kvm_trace_symbol_aux_state \
+ { KVM_TRACE_AUX_FPU, "FPU" }, \
+ { KVM_TRACE_AUX_MSA, "MSA" }, \
+ { KVM_TRACE_AUX_FPU_MSA, "FPU & MSA" }
+
+TRACE_EVENT(kvm_aux,
+ TP_PROTO(struct kvm_vcpu *vcpu, unsigned int op,
+ unsigned int state),
+ TP_ARGS(vcpu, op, state),
+ TP_STRUCT__entry(
+ __field(unsigned long, pc)
+ __field(u8, op)
+ __field(u8, state)
+ ),
+
+ TP_fast_assign(
+ __entry->pc = vcpu->arch.pc;
+ __entry->op = op;
+ __entry->state = state;
+ ),
+
+ TP_printk("%s %s PC: 0x%08lx",
+ __print_symbolic(__entry->op,
+ kvm_trace_symbol_aux_op),
+ __print_symbolic(__entry->state,
+ kvm_trace_symbol_aux_state),
+ __entry->pc)
+);
+
+TRACE_EVENT(kvm_asid_change,
+ TP_PROTO(struct kvm_vcpu *vcpu, unsigned int old_asid,
+ unsigned int new_asid),
+ TP_ARGS(vcpu, old_asid, new_asid),
+ TP_STRUCT__entry(
+ __field(unsigned long, pc)
+ __field(u8, old_asid)
+ __field(u8, new_asid)
+ ),
+
+ TP_fast_assign(
+ __entry->pc = vcpu->arch.pc;
+ __entry->old_asid = old_asid;
+ __entry->new_asid = new_asid;
+ ),
+
+ TP_printk("PC: 0x%08lx old: 0x%02x new: 0x%02x",
+ __entry->pc,
+ __entry->old_asid,
+ __entry->new_asid)
+);
+
#endif /* _TRACE_KVM_H */
/* This part must be outside protection */
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
index 6ba0fafce..091553942 100644
--- a/arch/mips/kvm/trap_emul.c
+++ b/arch/mips/kvm/trap_emul.c
@@ -21,7 +21,7 @@
static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
{
gpa_t gpa;
- uint32_t kseg = KSEGX(gva);
+ gva_t kseg = KSEGX(gva);
if ((kseg == CKSEG0) || (kseg == CKSEG1))
gpa = CPHYSADDR(gva);
@@ -40,8 +40,8 @@ static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct kvm_run *run = vcpu->run;
- uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
- unsigned long cause = vcpu->arch.host_cp0_cause;
+ u32 __user *opc = (u32 __user *) vcpu->arch.pc;
+ u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
@@ -87,15 +87,15 @@ static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
- uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+ u32 __user *opc = (u32 __user *) vcpu->arch.pc;
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
- unsigned long cause = vcpu->arch.host_cp0_cause;
+ u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
|| KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
- kvm_debug("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
+ kvm_debug("USER/KSEG23 ADDR TLB MOD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
cause, opc, badvaddr);
er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu);
@@ -111,14 +111,14 @@ static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
* when we are not using HIGHMEM. Need to address this in a
* HIGHMEM kernel
*/
- kvm_err("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n",
+ kvm_err("TLB MOD fault not handled, cause %#x, PC: %p, BadVaddr: %#lx\n",
cause, opc, badvaddr);
kvm_mips_dump_host_tlbs();
kvm_arch_vcpu_dump_regs(vcpu);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
} else {
- kvm_err("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
+ kvm_err("Illegal TLB Mod fault address , cause %#x, PC: %p, BadVaddr: %#lx\n",
cause, opc, badvaddr);
kvm_mips_dump_host_tlbs();
kvm_arch_vcpu_dump_regs(vcpu);
@@ -128,59 +128,12 @@ static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
return ret;
}
-static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
-{
- struct kvm_run *run = vcpu->run;
- uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
- unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
- unsigned long cause = vcpu->arch.host_cp0_cause;
- enum emulation_result er = EMULATE_DONE;
- int ret = RESUME_GUEST;
-
- if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
- && KVM_GUEST_KERNEL_MODE(vcpu)) {
- if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- ret = RESUME_HOST;
- }
- } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
- || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
- kvm_debug("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
- cause, opc, badvaddr);
- er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
- if (er == EMULATE_DONE)
- ret = RESUME_GUEST;
- else {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- ret = RESUME_HOST;
- }
- } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
- /*
- * All KSEG0 faults are handled by KVM, as the guest kernel does
- * not expect to ever get them
- */
- if (kvm_mips_handle_kseg0_tlb_fault
- (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- ret = RESUME_HOST;
- }
- } else {
- kvm_err("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
- cause, opc, badvaddr);
- kvm_mips_dump_host_tlbs();
- kvm_arch_vcpu_dump_regs(vcpu);
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- ret = RESUME_HOST;
- }
- return ret;
-}
-
-static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
+static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store)
{
struct kvm_run *run = vcpu->run;
- uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+ u32 __user *opc = (u32 __user *) vcpu->arch.pc;
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
- unsigned long cause = vcpu->arch.host_cp0_cause;
+ u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
@@ -192,8 +145,8 @@ static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
}
} else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
|| KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
- kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n",
- vcpu->arch.pc, badvaddr);
+ kvm_debug("USER ADDR TLB %s fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
+ store ? "ST" : "LD", cause, opc, badvaddr);
/*
* User Address (UA) fault, this could happen if
@@ -213,14 +166,18 @@ static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
ret = RESUME_HOST;
}
} else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
+ /*
+ * All KSEG0 faults are handled by KVM, as the guest kernel does
+ * not expect to ever get them
+ */
if (kvm_mips_handle_kseg0_tlb_fault
(vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
} else {
- kvm_err("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
- cause, opc, badvaddr);
+ kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n",
+ store ? "ST" : "LD", cause, opc, badvaddr);
kvm_mips_dump_host_tlbs();
kvm_arch_vcpu_dump_regs(vcpu);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
@@ -229,12 +186,22 @@ static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
return ret;
}
+static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
+{
+ return kvm_trap_emul_handle_tlb_miss(vcpu, true);
+}
+
+static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
+{
+ return kvm_trap_emul_handle_tlb_miss(vcpu, false);
+}
+
static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
- uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+ u32 __user *opc = (u32 __user *) vcpu->arch.pc;
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
- unsigned long cause = vcpu->arch.host_cp0_cause;
+ u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
@@ -251,7 +218,7 @@ static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
ret = RESUME_HOST;
}
} else {
- kvm_err("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n",
+ kvm_err("Address Error (STORE): cause %#x, PC: %p, BadVaddr: %#lx\n",
cause, opc, badvaddr);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
@@ -262,9 +229,9 @@ static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
- uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+ u32 __user *opc = (u32 __user *) vcpu->arch.pc;
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
- unsigned long cause = vcpu->arch.host_cp0_cause;
+ u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
@@ -280,7 +247,7 @@ static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
ret = RESUME_HOST;
}
} else {
- kvm_err("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n",
+ kvm_err("Address Error (LOAD): cause %#x, PC: %p, BadVaddr: %#lx\n",
cause, opc, badvaddr);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
@@ -292,8 +259,8 @@ static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
- uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
- unsigned long cause = vcpu->arch.host_cp0_cause;
+ u32 __user *opc = (u32 __user *) vcpu->arch.pc;
+ u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
@@ -310,8 +277,8 @@ static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
- uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
- unsigned long cause = vcpu->arch.host_cp0_cause;
+ u32 __user *opc = (u32 __user *) vcpu->arch.pc;
+ u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
@@ -328,8 +295,8 @@ static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
- uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
- unsigned long cause = vcpu->arch.host_cp0_cause;
+ u32 __user *opc = (u32 __user *) vcpu->arch.pc;
+ u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
@@ -346,8 +313,8 @@ static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
- uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc;
- unsigned long cause = vcpu->arch.host_cp0_cause;
+ u32 __user *opc = (u32 __user *)vcpu->arch.pc;
+ u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
@@ -364,8 +331,8 @@ static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
- uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc;
- unsigned long cause = vcpu->arch.host_cp0_cause;
+ u32 __user *opc = (u32 __user *)vcpu->arch.pc;
+ u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
@@ -382,8 +349,8 @@ static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
- uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc;
- unsigned long cause = vcpu->arch.host_cp0_cause;
+ u32 __user *opc = (u32 __user *)vcpu->arch.pc;
+ u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
@@ -407,8 +374,8 @@ static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct kvm_run *run = vcpu->run;
- uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
- unsigned long cause = vcpu->arch.host_cp0_cause;
+ u32 __user *opc = (u32 __user *) vcpu->arch.pc;
+ u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
@@ -451,24 +418,41 @@ static int kvm_trap_emul_vm_init(struct kvm *kvm)
static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
{
+ vcpu->arch.kscratch_enabled = 0xfc;
+
return 0;
}
static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
- uint32_t config1;
+ u32 config, config1;
int vcpu_id = vcpu->vcpu_id;
/*
* Arch specific stuff, set up config registers properly so that the
- * guest will come up as expected, for now we simulate a MIPS 24kc
+ * guest will come up as expected
*/
+#ifndef CONFIG_CPU_MIPSR6
+ /* r2-r5, simulate a MIPS 24kc */
kvm_write_c0_guest_prid(cop0, 0x00019300);
- /* Have config1, Cacheable, noncoherent, write-back, write allocate */
- kvm_write_c0_guest_config(cop0, MIPS_CONF_M | (0x3 << CP0C0_K0) |
- (0x1 << CP0C0_AR) |
- (MMU_TYPE_R4000 << CP0C0_MT));
+#else
+ /* r6+, simulate a generic QEMU machine */
+ kvm_write_c0_guest_prid(cop0, 0x00010000);
+#endif
+ /*
+ * Have config1, Cacheable, noncoherent, write-back, write allocate.
+ * Endianness, arch revision & virtually tagged icache should match
+ * host.
+ */
+ config = read_c0_config() & MIPS_CONF_AR;
+ config |= MIPS_CONF_M | CONF_CM_CACHABLE_NONCOHERENT | MIPS_CONF_MT_TLB;
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ config |= CONF_BE;
+#endif
+ if (cpu_has_vtag_icache)
+ config |= MIPS_CONF_VI;
+ kvm_write_c0_guest_config(cop0, config);
/* Read the cache characteristics from the host Config1 Register */
config1 = (read_c0_config1() & ~0x7f);
@@ -478,9 +462,8 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
/* We unset some bits that we aren't emulating */
- config1 &=
- ~((1 << CP0C1_C2) | (1 << CP0C1_MD) | (1 << CP0C1_PC) |
- (1 << CP0C1_WR) | (1 << CP0C1_CA));
+ config1 &= ~(MIPS_CONF1_C2 | MIPS_CONF1_MD | MIPS_CONF1_PC |
+ MIPS_CONF1_WR | MIPS_CONF1_CA);
kvm_write_c0_guest_config1(cop0, config1);
/* Have config3, no tertiary/secondary caches implemented */
@@ -511,6 +494,17 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
return 0;
}
+static unsigned long kvm_trap_emul_num_regs(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+
+static int kvm_trap_emul_copy_reg_indices(struct kvm_vcpu *vcpu,
+ u64 __user *indices)
+{
+ return 0;
+}
+
static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg,
s64 *v)
@@ -660,6 +654,8 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
.dequeue_io_int = kvm_mips_dequeue_io_int_cb,
.irq_deliver = kvm_mips_irq_deliver_cb,
.irq_clear = kvm_mips_irq_clear_cb,
+ .num_regs = kvm_trap_emul_num_regs,
+ .copy_reg_indices = kvm_trap_emul_copy_reg_indices,
.get_one_reg = kvm_trap_emul_get_one_reg,
.set_one_reg = kvm_trap_emul_set_one_reg,
.vcpu_get_regs = kvm_trap_emul_vcpu_get_regs,
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index ff17669e3..8ac0e5994 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -66,7 +66,7 @@ int gic_present;
#endif
static int exin_avail;
-static struct resource ltq_eiu_irq[MAX_EIU];
+static u32 ltq_eiu_irq[MAX_EIU];
static void __iomem *ltq_icu_membase[MAX_IM];
static void __iomem *ltq_eiu_membase;
static struct irq_domain *ltq_domain;
@@ -75,7 +75,7 @@ static int ltq_perfcount_irq;
int ltq_eiu_get_irq(int exin)
{
if (exin < exin_avail)
- return ltq_eiu_irq[exin].start;
+ return ltq_eiu_irq[exin];
return -1;
}
@@ -125,8 +125,8 @@ static int ltq_eiu_settype(struct irq_data *d, unsigned int type)
{
int i;
- for (i = 0; i < MAX_EIU; i++) {
- if (d->hwirq == ltq_eiu_irq[i].start) {
+ for (i = 0; i < exin_avail; i++) {
+ if (d->hwirq == ltq_eiu_irq[i]) {
int val = 0;
int edge = 0;
@@ -173,8 +173,8 @@ static unsigned int ltq_startup_eiu_irq(struct irq_data *d)
int i;
ltq_enable_irq(d);
- for (i = 0; i < MAX_EIU; i++) {
- if (d->hwirq == ltq_eiu_irq[i].start) {
+ for (i = 0; i < exin_avail; i++) {
+ if (d->hwirq == ltq_eiu_irq[i]) {
/* by default we are low level triggered */
ltq_eiu_settype(d, IRQF_TRIGGER_LOW);
/* clear all pending */
@@ -195,8 +195,8 @@ static void ltq_shutdown_eiu_irq(struct irq_data *d)
int i;
ltq_disable_irq(d);
- for (i = 0; i < MAX_EIU; i++) {
- if (d->hwirq == ltq_eiu_irq[i].start) {
+ for (i = 0; i < exin_avail; i++) {
+ if (d->hwirq == ltq_eiu_irq[i]) {
/* disable */
ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~BIT(i),
LTQ_EIU_EXIN_INEN);
@@ -206,7 +206,7 @@ static void ltq_shutdown_eiu_irq(struct irq_data *d)
}
static struct irq_chip ltq_irq_type = {
- "icu",
+ .name = "icu",
.irq_enable = ltq_enable_irq,
.irq_disable = ltq_disable_irq,
.irq_unmask = ltq_enable_irq,
@@ -216,7 +216,7 @@ static struct irq_chip ltq_irq_type = {
};
static struct irq_chip ltq_eiu_type = {
- "eiu",
+ .name = "eiu",
.irq_startup = ltq_startup_eiu_irq,
.irq_shutdown = ltq_shutdown_eiu_irq,
.irq_enable = ltq_enable_irq,
@@ -341,10 +341,10 @@ static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
return 0;
for (i = 0; i < exin_avail; i++)
- if (hw == ltq_eiu_irq[i].start)
+ if (hw == ltq_eiu_irq[i])
chip = &ltq_eiu_type;
- irq_set_chip_and_handler(hw, chip, handle_level_irq);
+ irq_set_chip_and_handler(irq, chip, handle_level_irq);
return 0;
}
@@ -439,14 +439,15 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway");
if (eiu_node && !of_address_to_resource(eiu_node, 0, &res)) {
/* find out how many external irq sources we have */
- exin_avail = of_irq_count(eiu_node);
+ exin_avail = of_property_count_u32_elems(eiu_node,
+ "lantiq,eiu-irqs");
if (exin_avail > MAX_EIU)
exin_avail = MAX_EIU;
- ret = of_irq_to_resource_table(eiu_node,
+ ret = of_property_read_u32_array(eiu_node, "lantiq,eiu-irqs",
ltq_eiu_irq, exin_avail);
- if (ret != exin_avail)
+ if (ret)
panic("failed to load external irq resources");
if (!request_mem_region(res.start, resource_size(&res),
diff --git a/arch/mips/lantiq/prom.c b/arch/mips/lantiq/prom.c
index 5f693ac77..4cbb000e7 100644
--- a/arch/mips/lantiq/prom.c
+++ b/arch/mips/lantiq/prom.c
@@ -74,8 +74,8 @@ void __init plat_mem_setup(void)
set_io_port_base((unsigned long) KSEG1);
- if (fw_arg0 == -2) /* UHI interface */
- dtb = (void *)fw_arg1;
+ if (fw_passed_dtb) /* UHI interface */
+ dtb = (void *)fw_passed_dtb;
else if (__dtb_start != __dtb_end)
dtb = (void *)__dtb_start;
else
diff --git a/arch/mips/loongson64/common/dma-swiotlb.c b/arch/mips/loongson64/common/dma-swiotlb.c
index 4ffa6fc81..1a80b6f73 100644
--- a/arch/mips/loongson64/common/dma-swiotlb.c
+++ b/arch/mips/loongson64/common/dma-swiotlb.c
@@ -10,7 +10,7 @@
#include <dma-coherence.h>
static void *loongson_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
void *ret;
@@ -41,7 +41,7 @@ static void *loongson_dma_alloc_coherent(struct device *dev, size_t size,
}
static void loongson_dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs)
+ void *vaddr, dma_addr_t dma_handle, unsigned long attrs)
{
swiotlb_free_coherent(dev, size, vaddr, dma_handle);
}
@@ -49,7 +49,7 @@ static void loongson_dma_free_coherent(struct device *dev, size_t size,
static dma_addr_t loongson_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
dma_addr_t daddr = swiotlb_map_page(dev, page, offset, size,
dir, attrs);
@@ -59,9 +59,9 @@ static dma_addr_t loongson_dma_map_page(struct device *dev, struct page *page,
static int loongson_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
- int r = swiotlb_map_sg_attrs(dev, sg, nents, dir, NULL);
+ int r = swiotlb_map_sg_attrs(dev, sg, nents, dir, 0);
mb();
return r;
diff --git a/arch/mips/loongson64/loongson-3/smp.c b/arch/mips/loongson64/loongson-3/smp.c
index e59759af6..2fec6f753 100644
--- a/arch/mips/loongson64/loongson-3/smp.c
+++ b/arch/mips/loongson64/loongson-3/smp.c
@@ -417,6 +417,7 @@ static int loongson3_cpu_disable(void)
return -EBUSY;
set_cpu_online(cpu, false);
+ calculate_cpu_foreign_map();
cpumask_clear_cpu(cpu, &cpu_callin_map);
local_irq_save(flags);
fixup_irqs();
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index d96e912b9..36775d20b 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -434,8 +434,8 @@ static int microMIPS32_to_MIPS32(union mips_instruction *insn_ptr)
* a single subroutine should be used across both
* modules.
*/
-static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
- unsigned long *contpc)
+int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
+ unsigned long *contpc)
{
union mips_instruction insn = (union mips_instruction)dec_insn.insn;
unsigned int fcr31;
@@ -627,8 +627,8 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
dec_insn.pc_inc +
dec_insn.next_pc_inc;
return 1;
- case cbcond0_op:
- case cbcond1_op:
+ case pop10_op:
+ case pop30_op:
if (!cpu_has_mips_r6)
break;
if (insn.i_format.rt && !insn.i_format.rs)
@@ -683,14 +683,14 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
dec_insn.next_pc_inc;
return 1;
- case beqzcjic_op:
+ case pop66_op:
if (!cpu_has_mips_r6)
break;
*contpc = regs->cp0_epc + dec_insn.pc_inc +
dec_insn.next_pc_inc;
return 1;
- case bnezcjialc_op:
+ case pop76_op:
if (!cpu_has_mips_r6)
break;
if (!insn.i_format.rs)
@@ -784,10 +784,10 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
*/
static inline int cop1_64bit(struct pt_regs *xcp)
{
- if (config_enabled(CONFIG_64BIT) && !config_enabled(CONFIG_MIPS32_O32))
+ if (IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_MIPS32_O32))
return 1;
- else if (config_enabled(CONFIG_32BIT) &&
- !config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
+ else if (IS_ENABLED(CONFIG_32BIT) &&
+ !IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
return 0;
return !test_thread_flag(TIF_32BIT_FPREGS);
@@ -1268,7 +1268,7 @@ branch_common:
* instruction in the dslot.
*/
sig = mips_dsemul(xcp, ir,
- contpc);
+ bcpc, contpc);
if (sig < 0)
break;
if (sig)
@@ -1323,7 +1323,7 @@ branch_common:
* Single step the non-cp1
* instruction in the dslot
*/
- sig = mips_dsemul(xcp, ir, contpc);
+ sig = mips_dsemul(xcp, ir, bcpc, contpc);
if (sig < 0)
break;
if (sig)
diff --git a/arch/mips/math-emu/dsemul.c b/arch/mips/math-emu/dsemul.c
index 47074887e..4a094f7ac 100644
--- a/arch/mips/math-emu/dsemul.c
+++ b/arch/mips/math-emu/dsemul.c
@@ -1,3 +1,6 @@
+#include <linux/err.h>
+#include <linux/slab.h>
+
#include <asm/branch.h>
#include <asm/cacheflush.h>
#include <asm/fpu_emulator.h>
@@ -5,43 +8,211 @@
#include <asm/mipsregs.h>
#include <asm/uaccess.h>
-#include "ieee754.h"
-
-/*
- * Emulate the arbitrary instruction ir at xcp->cp0_epc. Required when
- * we have to emulate the instruction in a COP1 branch delay slot. Do
- * not change cp0_epc due to the instruction
+/**
+ * struct emuframe - The 'emulation' frame structure
+ * @emul: The instruction to 'emulate'.
+ * @badinst: A break instruction to cause a return to the kernel.
*
- * According to the spec:
- * 1) it shouldn't be a branch :-)
- * 2) it can be a COP instruction :-(
- * 3) if we are tring to run a protected memory space we must take
- * special care on memory access instructions :-(
- */
-
-/*
- * "Trampoline" return routine to catch exception following
- * execution of delay-slot instruction execution.
+ * This structure defines the frames placed within the delay slot emulation
+ * page in response to a call to mips_dsemul(). Each thread may be allocated
+ * only one frame at any given time. The kernel stores within it the
+ * instruction to be 'emulated' followed by a break instruction, then
+ * executes the frame in user mode. The break causes a trap to the kernel
+ * which leads to do_dsemulret() being called unless the instruction in
+ * @emul causes a trap itself, is a branch, or a signal is delivered to
+ * the thread. In these cases the allocated frame will either be reused by
+ * a subsequent delay slot 'emulation', or be freed during signal delivery or
+ * upon thread exit.
+ *
+ * This approach is used because:
+ *
+ * - Actually emulating all instructions isn't feasible. We would need to
+ * be able to handle instructions from all revisions of the MIPS ISA,
+ * all ASEs & all vendor instruction set extensions. This would be a
+ * whole lot of work & continual maintenance burden as new instructions
+ * are introduced, and in the case of some vendor extensions may not
+ * even be possible. Thus we need to take the approach of actually
+ * executing the instruction.
+ *
+ * - We must execute the instruction within user context. If we were to
+ * execute the instruction in kernel mode then it would have access to
+ * kernel resources without very careful checks, leaving us with a
+ * high potential for security or stability issues to arise.
+ *
+ * - We used to place the frame on the users stack, but this requires
+ * that the stack be executable. This is bad for security so the
+ * per-process page is now used instead.
+ *
+ * - The instruction in @emul may be something entirely invalid for a
+ * delay slot. The user may (intentionally or otherwise) place a branch
+ * in a delay slot, or a kernel mode instruction, or something else
+ * which generates an exception. Thus we can't rely upon the break in
+ * @badinst always being hit. For this reason we track the index of the
+ * frame allocated to each thread, allowing us to clean it up at later
+ * points such as signal delivery or thread exit.
+ *
+ * - The user may generate a fake struct emuframe if they wish, invoking
+ * the BRK_MEMU break instruction themselves. We must therefore not
+ * trust that BRK_MEMU means there's actually a valid frame allocated
+ * to the thread, and must not allow the user to do anything they
+ * couldn't already.
*/
-
struct emuframe {
mips_instruction emul;
mips_instruction badinst;
- mips_instruction cookie;
- unsigned long epc;
};
-/*
- * Set up an emulation frame for instruction IR, from a delay slot of
- * a branch jumping to CPC. Return 0 if successful, -1 if no emulation
- * required, otherwise a signal number causing a frame setup failure.
- */
-int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc)
+static const int emupage_frame_count = PAGE_SIZE / sizeof(struct emuframe);
+
+static inline __user struct emuframe *dsemul_page(void)
+{
+ return (__user struct emuframe *)STACK_TOP;
+}
+
+static int alloc_emuframe(void)
+{
+ mm_context_t *mm_ctx = &current->mm->context;
+ int idx;
+
+retry:
+ spin_lock(&mm_ctx->bd_emupage_lock);
+
+ /* Ensure we have an allocation bitmap */
+ if (!mm_ctx->bd_emupage_allocmap) {
+ mm_ctx->bd_emupage_allocmap =
+ kcalloc(BITS_TO_LONGS(emupage_frame_count),
+ sizeof(unsigned long),
+ GFP_ATOMIC);
+
+ if (!mm_ctx->bd_emupage_allocmap) {
+ idx = BD_EMUFRAME_NONE;
+ goto out_unlock;
+ }
+ }
+
+ /* Attempt to allocate a single bit/frame */
+ idx = bitmap_find_free_region(mm_ctx->bd_emupage_allocmap,
+ emupage_frame_count, 0);
+ if (idx < 0) {
+ /*
+ * Failed to allocate a frame. We'll wait until one becomes
+ * available. We unlock the page so that other threads actually
+ * get the opportunity to free their frames, which means
+ * technically the result of bitmap_full may be incorrect.
+ * However the worst case is that we repeat all this and end up
+ * back here again.
+ */
+ spin_unlock(&mm_ctx->bd_emupage_lock);
+ if (!wait_event_killable(mm_ctx->bd_emupage_queue,
+ !bitmap_full(mm_ctx->bd_emupage_allocmap,
+ emupage_frame_count)))
+ goto retry;
+
+ /* Received a fatal signal - just give in */
+ return BD_EMUFRAME_NONE;
+ }
+
+ /* Success! */
+ pr_debug("allocate emuframe %d to %d\n", idx, current->pid);
+out_unlock:
+ spin_unlock(&mm_ctx->bd_emupage_lock);
+ return idx;
+}
+
+static void free_emuframe(int idx, struct mm_struct *mm)
+{
+ mm_context_t *mm_ctx = &mm->context;
+
+ spin_lock(&mm_ctx->bd_emupage_lock);
+
+ pr_debug("free emuframe %d from %d\n", idx, current->pid);
+ bitmap_clear(mm_ctx->bd_emupage_allocmap, idx, 1);
+
+ /* If some thread is waiting for a frame, now's its chance */
+ wake_up(&mm_ctx->bd_emupage_queue);
+
+ spin_unlock(&mm_ctx->bd_emupage_lock);
+}
+
+static bool within_emuframe(struct pt_regs *regs)
+{
+ unsigned long base = (unsigned long)dsemul_page();
+
+ if (regs->cp0_epc < base)
+ return false;
+ if (regs->cp0_epc >= (base + PAGE_SIZE))
+ return false;
+
+ return true;
+}
+
+bool dsemul_thread_cleanup(struct task_struct *tsk)
+{
+ int fr_idx;
+
+ /* Clear any allocated frame, retrieving its index */
+ fr_idx = atomic_xchg(&tsk->thread.bd_emu_frame, BD_EMUFRAME_NONE);
+
+ /* If no frame was allocated, we're done */
+ if (fr_idx == BD_EMUFRAME_NONE)
+ return false;
+
+ task_lock(tsk);
+
+ /* Free the frame that this thread had allocated */
+ if (tsk->mm)
+ free_emuframe(fr_idx, tsk->mm);
+
+ task_unlock(tsk);
+ return true;
+}
+
+bool dsemul_thread_rollback(struct pt_regs *regs)
+{
+ struct emuframe __user *fr;
+ int fr_idx;
+
+ /* Do nothing if we're not executing from a frame */
+ if (!within_emuframe(regs))
+ return false;
+
+ /* Find the frame being executed */
+ fr_idx = atomic_read(&current->thread.bd_emu_frame);
+ if (fr_idx == BD_EMUFRAME_NONE)
+ return false;
+ fr = &dsemul_page()[fr_idx];
+
+ /*
+ * If the PC is at the emul instruction, roll back to the branch. If
+ * PC is at the badinst (break) instruction, we've already emulated the
+ * instruction so progress to the continue PC. If it's anything else
+ * then something is amiss & the user has branched into some other area
+ * of the emupage - we'll free the allocated frame anyway.
+ */
+ if (msk_isa16_mode(regs->cp0_epc) == (unsigned long)&fr->emul)
+ regs->cp0_epc = current->thread.bd_emu_branch_pc;
+ else if (msk_isa16_mode(regs->cp0_epc) == (unsigned long)&fr->badinst)
+ regs->cp0_epc = current->thread.bd_emu_cont_pc;
+
+ atomic_set(&current->thread.bd_emu_frame, BD_EMUFRAME_NONE);
+ free_emuframe(fr_idx, current->mm);
+ return true;
+}
+
+void dsemul_mm_cleanup(struct mm_struct *mm)
+{
+ mm_context_t *mm_ctx = &mm->context;
+
+ kfree(mm_ctx->bd_emupage_allocmap);
+}
+
+int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
+ unsigned long branch_pc, unsigned long cont_pc)
{
int isa16 = get_isa16_mode(regs->cp0_epc);
mips_instruction break_math;
struct emuframe __user *fr;
- int err;
+ int err, fr_idx;
/* NOP is easy */
if (ir == 0)
@@ -68,30 +239,20 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc)
}
}
- pr_debug("dsemul %lx %lx\n", regs->cp0_epc, cpc);
+ pr_debug("dsemul 0x%08lx cont at 0x%08lx\n", regs->cp0_epc, cont_pc);
- /*
- * The strategy is to push the instruction onto the user stack
- * and put a trap after it which we can catch and jump to
- * the required address any alternative apart from full
- * instruction emulation!!.
- *
- * Algorithmics used a system call instruction, and
- * borrowed that vector. MIPS/Linux version is a bit
- * more heavyweight in the interests of portability and
- * multiprocessor support. For Linux we use a BREAK 514
- * instruction causing a breakpoint exception.
- */
- break_math = BREAK_MATH(isa16);
-
- /* Ensure that the two instructions are in the same cache line */
- fr = (struct emuframe __user *)
- ((regs->regs[29] - sizeof(struct emuframe)) & ~0x7);
-
- /* Verify that the stack pointer is not completely insane */
- if (unlikely(!access_ok(VERIFY_WRITE, fr, sizeof(struct emuframe))))
+ /* Allocate a frame if we don't already have one */
+ fr_idx = atomic_read(&current->thread.bd_emu_frame);
+ if (fr_idx == BD_EMUFRAME_NONE)
+ fr_idx = alloc_emuframe();
+ if (fr_idx == BD_EMUFRAME_NONE)
return SIGBUS;
+ fr = &dsemul_page()[fr_idx];
+
+ /* Retrieve the appropriately encoded break instruction */
+ break_math = BREAK_MATH(isa16);
+ /* Write the instructions to the frame */
if (isa16) {
err = __put_user(ir >> 16,
(u16 __user *)(&fr->emul));
@@ -106,84 +267,37 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc)
err |= __put_user(break_math, &fr->badinst);
}
- err |= __put_user((mips_instruction)BD_COOKIE, &fr->cookie);
- err |= __put_user(cpc, &fr->epc);
-
if (unlikely(err)) {
MIPS_FPU_EMU_INC_STATS(errors);
+ free_emuframe(fr_idx, current->mm);
return SIGBUS;
}
+ /* Record the PC of the branch, PC to continue from & frame index */
+ current->thread.bd_emu_branch_pc = branch_pc;
+ current->thread.bd_emu_cont_pc = cont_pc;
+ atomic_set(&current->thread.bd_emu_frame, fr_idx);
+
+ /* Change user register context to execute the frame */
regs->cp0_epc = (unsigned long)&fr->emul | isa16;
+ /* Ensure the icache observes our newly written frame */
flush_cache_sigtramp((unsigned long)&fr->emul);
return 0;
}
-int do_dsemulret(struct pt_regs *xcp)
+bool do_dsemulret(struct pt_regs *xcp)
{
- int isa16 = get_isa16_mode(xcp->cp0_epc);
- struct emuframe __user *fr;
- unsigned long epc;
- u32 insn, cookie;
- int err = 0;
- u16 instr[2];
-
- fr = (struct emuframe __user *)
- (msk_isa16_mode(xcp->cp0_epc) - sizeof(mips_instruction));
-
- /*
- * If we can't even access the area, something is very wrong, but we'll
- * leave that to the default handling
- */
- if (!access_ok(VERIFY_READ, fr, sizeof(struct emuframe)))
- return 0;
-
- /*
- * Do some sanity checking on the stackframe:
- *
- * - Is the instruction pointed to by the EPC an BREAK_MATH?
- * - Is the following memory word the BD_COOKIE?
- */
- if (isa16) {
- err = __get_user(instr[0],
- (u16 __user *)(&fr->badinst));
- err |= __get_user(instr[1],
- (u16 __user *)((long)(&fr->badinst) + 2));
- insn = (instr[0] << 16) | instr[1];
- } else {
- err = __get_user(insn, &fr->badinst);
- }
- err |= __get_user(cookie, &fr->cookie);
-
- if (unlikely(err ||
- insn != BREAK_MATH(isa16) || cookie != BD_COOKIE)) {
+ /* Cleanup the allocated frame, returning if there wasn't one */
+ if (!dsemul_thread_cleanup(current)) {
MIPS_FPU_EMU_INC_STATS(errors);
- return 0;
- }
-
- /*
- * At this point, we are satisfied that it's a BD emulation trap. Yes,
- * a user might have deliberately put two malformed and useless
- * instructions in a row in his program, in which case he's in for a
- * nasty surprise - the next instruction will be treated as a
- * continuation address! Alas, this seems to be the only way that we
- * can handle signals, recursion, and longjmps() in the context of
- * emulating the branch delay instruction.
- */
-
- pr_debug("dsemulret\n");
-
- if (__get_user(epc, &fr->epc)) { /* Saved EPC */
- /* This is not a good situation to be in */
- force_sig(SIGBUS, current);
-
- return 0;
+ return false;
}
/* Set EPC to return to post-branch instruction */
- xcp->cp0_epc = epc;
+ xcp->cp0_epc = current->thread.bd_emu_cont_pc;
+ pr_debug("dsemulret to 0x%08lx\n", xcp->cp0_epc);
MIPS_FPU_EMU_INC_STATS(ds_emul);
- return 1;
+ return true;
}
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index ef7f925dd..fa7d8d379 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -40,6 +40,51 @@
#include <asm/mips-cm.h>
/*
+ * Bits describing what cache ops an SMP callback function may perform.
+ *
+ * R4K_HIT - Virtual user or kernel address based cache operations. The
+ * active_mm must be checked before using user addresses, falling
+ * back to kmap.
+ * R4K_INDEX - Index based cache operations.
+ */
+
+#define R4K_HIT BIT(0)
+#define R4K_INDEX BIT(1)
+
+/**
+ * r4k_op_needs_ipi() - Decide if a cache op needs to be done on every core.
+ * @type: Type of cache operations (R4K_HIT or R4K_INDEX).
+ *
+ * Decides whether a cache op needs to be performed on every core in the system.
+ * This may change depending on the @type of cache operation, as well as the set
+ * of online CPUs, so preemption should be disabled by the caller to prevent CPU
+ * hotplug from changing the result.
+ *
+ * Returns: 1 if the cache operation @type should be done on every core in
+ * the system.
+ * 0 if the cache operation @type is globalized and only needs to
+ * be performed on a simple CPU.
+ */
+static inline bool r4k_op_needs_ipi(unsigned int type)
+{
+ /* The MIPS Coherence Manager (CM) globalizes address-based cache ops */
+ if (type == R4K_HIT && mips_cm_present())
+ return false;
+
+ /*
+ * Hardware doesn't globalize the required cache ops, so SMP calls may
+ * be needed, but only if there are foreign CPUs (non-siblings with
+ * separate caches).
+ */
+ /* cpu_foreign_map[] undeclared when !CONFIG_SMP */
+#ifdef CONFIG_SMP
+ return !cpumask_empty(&cpu_foreign_map[0]);
+#else
+ return false;
+#endif
+}
+
+/*
* Special Variant of smp_call_function for use by cache functions:
*
* o No return value
@@ -48,30 +93,17 @@
* primary cache.
* o doesn't disable interrupts on the local CPU
*/
-static inline void r4k_on_each_cpu(void (*func) (void *info), void *info)
+static inline void r4k_on_each_cpu(unsigned int type,
+ void (*func)(void *info), void *info)
{
preempt_disable();
-
- /*
- * The Coherent Manager propagates address-based cache ops to other
- * cores but not index-based ops. However, r4k_on_each_cpu is used
- * in both cases so there is no easy way to tell what kind of op is
- * executed to the other cores. The best we can probably do is
- * to restrict that call when a CM is not present because both
- * CM-based SMP protocols (CMP & CPS) restrict index-based cache ops.
- */
- if (!mips_cm_present())
- smp_call_function_many(&cpu_foreign_map, func, info, 1);
+ if (r4k_op_needs_ipi(type))
+ smp_call_function_many(&cpu_foreign_map[smp_processor_id()],
+ func, info, 1);
func(info);
preempt_enable();
}
-#if defined(CONFIG_MIPS_CMP) || defined(CONFIG_MIPS_CPS)
-#define cpu_has_safe_index_cacheops 0
-#else
-#define cpu_has_safe_index_cacheops 1
-#endif
-
/*
* Must die.
*/
@@ -462,22 +494,44 @@ static inline void local_r4k___flush_cache_all(void * args)
static void r4k___flush_cache_all(void)
{
- r4k_on_each_cpu(local_r4k___flush_cache_all, NULL);
+ r4k_on_each_cpu(R4K_INDEX, local_r4k___flush_cache_all, NULL);
}
-static inline int has_valid_asid(const struct mm_struct *mm)
+/**
+ * has_valid_asid() - Determine if an mm already has an ASID.
+ * @mm: Memory map.
+ * @type: R4K_HIT or R4K_INDEX, type of cache op.
+ *
+ * Determines whether @mm already has an ASID on any of the CPUs which cache ops
+ * of type @type within an r4k_on_each_cpu() call will affect. If
+ * r4k_on_each_cpu() does an SMP call to a single VPE in each core, then the
+ * scope of the operation is confined to sibling CPUs, otherwise all online CPUs
+ * will need to be checked.
+ *
+ * Must be called in non-preemptive context.
+ *
+ * Returns: 1 if the CPUs affected by @type cache ops have an ASID for @mm.
+ * 0 otherwise.
+ */
+static inline int has_valid_asid(const struct mm_struct *mm, unsigned int type)
{
-#ifdef CONFIG_MIPS_MT_SMP
- int i;
+ unsigned int i;
+ const cpumask_t *mask = cpu_present_mask;
- for_each_online_cpu(i)
+ /* cpu_sibling_map[] undeclared when !CONFIG_SMP */
+#ifdef CONFIG_SMP
+ /*
+ * If r4k_on_each_cpu does SMP calls, it does them to a single VPE in
+ * each foreign core, so we only need to worry about siblings.
+ * Otherwise we need to worry about all present CPUs.
+ */
+ if (r4k_op_needs_ipi(type))
+ mask = &cpu_sibling_map[smp_processor_id()];
+#endif
+ for_each_cpu(i, mask)
if (cpu_context(i, mm))
return 1;
-
return 0;
-#else
- return cpu_context(smp_processor_id(), mm);
-#endif
}
static void r4k__flush_cache_vmap(void)
@@ -490,12 +544,16 @@ static void r4k__flush_cache_vunmap(void)
r4k_blast_dcache();
}
+/*
+ * Note: flush_tlb_range() assumes flush_cache_range() sufficiently flushes
+ * whole caches when vma is executable.
+ */
static inline void local_r4k_flush_cache_range(void * args)
{
struct vm_area_struct *vma = args;
int exec = vma->vm_flags & VM_EXEC;
- if (!(has_valid_asid(vma->vm_mm)))
+ if (!has_valid_asid(vma->vm_mm, R4K_INDEX))
return;
/*
@@ -516,14 +574,14 @@ static void r4k_flush_cache_range(struct vm_area_struct *vma,
int exec = vma->vm_flags & VM_EXEC;
if (cpu_has_dc_aliases || exec)
- r4k_on_each_cpu(local_r4k_flush_cache_range, vma);
+ r4k_on_each_cpu(R4K_INDEX, local_r4k_flush_cache_range, vma);
}
static inline void local_r4k_flush_cache_mm(void * args)
{
struct mm_struct *mm = args;
- if (!has_valid_asid(mm))
+ if (!has_valid_asid(mm, R4K_INDEX))
return;
/*
@@ -548,7 +606,7 @@ static void r4k_flush_cache_mm(struct mm_struct *mm)
if (!cpu_has_dc_aliases)
return;
- r4k_on_each_cpu(local_r4k_flush_cache_mm, mm);
+ r4k_on_each_cpu(R4K_INDEX, local_r4k_flush_cache_mm, mm);
}
struct flush_cache_page_args {
@@ -573,10 +631,10 @@ static inline void local_r4k_flush_cache_page(void *args)
void *vaddr;
/*
- * If ownes no valid ASID yet, cannot possibly have gotten
+ * If owns no valid ASID yet, cannot possibly have gotten
* this page into the cache.
*/
- if (!has_valid_asid(mm))
+ if (!has_valid_asid(mm, R4K_HIT))
return;
addr &= PAGE_MASK;
@@ -643,7 +701,7 @@ static void r4k_flush_cache_page(struct vm_area_struct *vma,
args.addr = addr;
args.pfn = pfn;
- r4k_on_each_cpu(local_r4k_flush_cache_page, &args);
+ r4k_on_each_cpu(R4K_HIT, local_r4k_flush_cache_page, &args);
}
static inline void local_r4k_flush_data_cache_page(void * addr)
@@ -656,18 +714,23 @@ static void r4k_flush_data_cache_page(unsigned long addr)
if (in_atomic())
local_r4k_flush_data_cache_page((void *)addr);
else
- r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr);
+ r4k_on_each_cpu(R4K_HIT, local_r4k_flush_data_cache_page,
+ (void *) addr);
}
struct flush_icache_range_args {
unsigned long start;
unsigned long end;
+ unsigned int type;
};
-static inline void local_r4k_flush_icache_range(unsigned long start, unsigned long end)
+static inline void __local_r4k_flush_icache_range(unsigned long start,
+ unsigned long end,
+ unsigned int type)
{
if (!cpu_has_ic_fills_f_dc) {
- if (end - start >= dcache_size) {
+ if (type == R4K_INDEX ||
+ (type & R4K_INDEX && end - start >= dcache_size)) {
r4k_blast_dcache();
} else {
R4600_HIT_CACHEOP_WAR_IMPL;
@@ -675,7 +738,8 @@ static inline void local_r4k_flush_icache_range(unsigned long start, unsigned lo
}
}
- if (end - start > icache_size)
+ if (type == R4K_INDEX ||
+ (type & R4K_INDEX && end - start > icache_size))
r4k_blast_icache();
else {
switch (boot_cpu_type()) {
@@ -701,23 +765,52 @@ static inline void local_r4k_flush_icache_range(unsigned long start, unsigned lo
#endif
}
+static inline void local_r4k_flush_icache_range(unsigned long start,
+ unsigned long end)
+{
+ __local_r4k_flush_icache_range(start, end, R4K_HIT | R4K_INDEX);
+}
+
static inline void local_r4k_flush_icache_range_ipi(void *args)
{
struct flush_icache_range_args *fir_args = args;
unsigned long start = fir_args->start;
unsigned long end = fir_args->end;
+ unsigned int type = fir_args->type;
- local_r4k_flush_icache_range(start, end);
+ __local_r4k_flush_icache_range(start, end, type);
}
static void r4k_flush_icache_range(unsigned long start, unsigned long end)
{
struct flush_icache_range_args args;
+ unsigned long size, cache_size;
args.start = start;
args.end = end;
+ args.type = R4K_HIT | R4K_INDEX;
- r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args);
+ /*
+ * Indexed cache ops require an SMP call.
+ * Consider if that can or should be avoided.
+ */
+ preempt_disable();
+ if (r4k_op_needs_ipi(R4K_INDEX) && !r4k_op_needs_ipi(R4K_HIT)) {
+ /*
+ * If address-based cache ops don't require an SMP call, then
+ * use them exclusively for small flushes.
+ */
+ size = end - start;
+ cache_size = icache_size;
+ if (!cpu_has_ic_fills_f_dc) {
+ size *= 2;
+ cache_size += dcache_size;
+ }
+ if (size <= cache_size)
+ args.type &= ~R4K_INDEX;
+ }
+ r4k_on_each_cpu(args.type, local_r4k_flush_icache_range_ipi, &args);
+ preempt_enable();
instruction_hazard();
}
@@ -744,7 +837,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
* subset property so we have to flush the primary caches
* explicitly
*/
- if (cpu_has_safe_index_cacheops && size >= dcache_size) {
+ if (size >= dcache_size) {
r4k_blast_dcache();
} else {
R4600_HIT_CACHEOP_WAR_IMPL;
@@ -781,7 +874,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
return;
}
- if (cpu_has_safe_index_cacheops && size >= dcache_size) {
+ if (size >= dcache_size) {
r4k_blast_dcache();
} else {
R4600_HIT_CACHEOP_WAR_IMPL;
@@ -794,25 +887,76 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
}
#endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */
+struct flush_cache_sigtramp_args {
+ struct mm_struct *mm;
+ struct page *page;
+ unsigned long addr;
+};
+
/*
* While we're protected against bad userland addresses we don't care
* very much about what happens in that case. Usually a segmentation
* fault will dump the process later on anyway ...
*/
-static void local_r4k_flush_cache_sigtramp(void * arg)
+static void local_r4k_flush_cache_sigtramp(void *args)
{
+ struct flush_cache_sigtramp_args *fcs_args = args;
+ unsigned long addr = fcs_args->addr;
+ struct page *page = fcs_args->page;
+ struct mm_struct *mm = fcs_args->mm;
+ int map_coherent = 0;
+ void *vaddr;
+
unsigned long ic_lsize = cpu_icache_line_size();
unsigned long dc_lsize = cpu_dcache_line_size();
unsigned long sc_lsize = cpu_scache_line_size();
- unsigned long addr = (unsigned long) arg;
+
+ /*
+ * If owns no valid ASID yet, cannot possibly have gotten
+ * this page into the cache.
+ */
+ if (!has_valid_asid(mm, R4K_HIT))
+ return;
+
+ if (mm == current->active_mm) {
+ vaddr = NULL;
+ } else {
+ /*
+ * Use kmap_coherent or kmap_atomic to do flushes for
+ * another ASID than the current one.
+ */
+ map_coherent = (cpu_has_dc_aliases &&
+ page_mapcount(page) &&
+ !Page_dcache_dirty(page));
+ if (map_coherent)
+ vaddr = kmap_coherent(page, addr);
+ else
+ vaddr = kmap_atomic(page);
+ addr = (unsigned long)vaddr + (addr & ~PAGE_MASK);
+ }
R4600_HIT_CACHEOP_WAR_IMPL;
- if (dc_lsize)
- protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
- if (!cpu_icache_snoops_remote_store && scache_size)
- protected_writeback_scache_line(addr & ~(sc_lsize - 1));
+ if (!cpu_has_ic_fills_f_dc) {
+ if (dc_lsize)
+ vaddr ? flush_dcache_line(addr & ~(dc_lsize - 1))
+ : protected_writeback_dcache_line(
+ addr & ~(dc_lsize - 1));
+ if (!cpu_icache_snoops_remote_store && scache_size)
+ vaddr ? flush_scache_line(addr & ~(sc_lsize - 1))
+ : protected_writeback_scache_line(
+ addr & ~(sc_lsize - 1));
+ }
if (ic_lsize)
- protected_flush_icache_line(addr & ~(ic_lsize - 1));
+ vaddr ? flush_icache_line(addr & ~(ic_lsize - 1))
+ : protected_flush_icache_line(addr & ~(ic_lsize - 1));
+
+ if (vaddr) {
+ if (map_coherent)
+ kunmap_coherent();
+ else
+ kunmap_atomic(vaddr);
+ }
+
if (MIPS4K_ICACHE_REFILL_WAR) {
__asm__ __volatile__ (
".set push\n\t"
@@ -837,7 +981,23 @@ static void local_r4k_flush_cache_sigtramp(void * arg)
static void r4k_flush_cache_sigtramp(unsigned long addr)
{
- r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr);
+ struct flush_cache_sigtramp_args args;
+ int npages;
+
+ down_read(&current->mm->mmap_sem);
+
+ npages = get_user_pages_fast(addr, 1, 0, &args.page);
+ if (npages < 1)
+ goto out;
+
+ args.mm = current->mm;
+ args.addr = addr;
+
+ r4k_on_each_cpu(R4K_HIT, local_r4k_flush_cache_sigtramp, &args);
+
+ put_page(args.page);
+out:
+ up_read(&current->mm->mmap_sem);
}
static void r4k_flush_icache_all(void)
@@ -851,6 +1011,15 @@ struct flush_kernel_vmap_range_args {
int size;
};
+static inline void local_r4k_flush_kernel_vmap_range_index(void *args)
+{
+ /*
+ * Aliases only affect the primary caches so don't bother with
+ * S-caches or T-caches.
+ */
+ r4k_blast_dcache();
+}
+
static inline void local_r4k_flush_kernel_vmap_range(void *args)
{
struct flush_kernel_vmap_range_args *vmra = args;
@@ -861,12 +1030,8 @@ static inline void local_r4k_flush_kernel_vmap_range(void *args)
* Aliases only affect the primary caches so don't bother with
* S-caches or T-caches.
*/
- if (cpu_has_safe_index_cacheops && size >= dcache_size)
- r4k_blast_dcache();
- else {
- R4600_HIT_CACHEOP_WAR_IMPL;
- blast_dcache_range(vaddr, vaddr + size);
- }
+ R4600_HIT_CACHEOP_WAR_IMPL;
+ blast_dcache_range(vaddr, vaddr + size);
}
static void r4k_flush_kernel_vmap_range(unsigned long vaddr, int size)
@@ -876,7 +1041,12 @@ static void r4k_flush_kernel_vmap_range(unsigned long vaddr, int size)
args.vaddr = (unsigned long) vaddr;
args.size = size;
- r4k_on_each_cpu(local_r4k_flush_kernel_vmap_range, &args);
+ if (size >= dcache_size)
+ r4k_on_each_cpu(R4K_INDEX,
+ local_r4k_flush_kernel_vmap_range_index, NULL);
+ else
+ r4k_on_each_cpu(R4K_HIT, local_r4k_flush_kernel_vmap_range,
+ &args);
}
static inline void rm7k_erratum31(void)
@@ -1206,7 +1376,7 @@ static void probe_pcache(void)
c->icache.linesz;
c->icache.waybit = __ffs(icache_size/c->icache.ways);
- if (config & 0x8) /* VI bit */
+ if (config & MIPS_CONF_VI)
c->icache.flags |= MIPS_CACHE_VTAG;
/*
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index cb557d28c..b2eadd6fa 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -131,7 +131,7 @@ static void *mips_dma_alloc_noncoherent(struct device *dev, size_t size,
}
static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs)
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
void *ret;
struct page *page = NULL;
@@ -141,7 +141,7 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
* XXX: seems like the coherent and non-coherent implementations could
* be consolidated.
*/
- if (dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs))
+ if (attrs & DMA_ATTR_NON_CONSISTENT)
return mips_dma_alloc_noncoherent(dev, size, dma_handle, gfp);
gfp = massage_gfp_flags(dev, gfp);
@@ -176,13 +176,13 @@ static void mips_dma_free_noncoherent(struct device *dev, size_t size,
}
static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+ dma_addr_t dma_handle, unsigned long attrs)
{
unsigned long addr = (unsigned long) vaddr;
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
struct page *page = NULL;
- if (dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) {
+ if (attrs & DMA_ATTR_NON_CONSISTENT) {
mips_dma_free_noncoherent(dev, size, vaddr, dma_handle);
return;
}
@@ -200,7 +200,7 @@ static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
@@ -214,7 +214,7 @@ static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma,
pfn = page_to_pfn(virt_to_page((void *)addr));
- if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
+ if (attrs & DMA_ATTR_WRITE_COMBINE)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
else
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
@@ -291,7 +291,7 @@ static inline void __dma_sync(struct page *page,
}
static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction, struct dma_attrs *attrs)
+ size_t size, enum dma_data_direction direction, unsigned long attrs)
{
if (cpu_needs_post_dma_flush(dev))
__dma_sync(dma_addr_to_page(dev, dma_addr),
@@ -301,7 +301,7 @@ static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
}
static int mips_dma_map_sg(struct device *dev, struct scatterlist *sglist,
- int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
+ int nents, enum dma_data_direction direction, unsigned long attrs)
{
int i;
struct scatterlist *sg;
@@ -322,7 +322,7 @@ static int mips_dma_map_sg(struct device *dev, struct scatterlist *sglist,
static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
if (!plat_device_is_coherent(dev))
__dma_sync(page, offset, size, direction);
@@ -332,7 +332,7 @@ static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nhwentries, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
int i;
struct scatterlist *sg;
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 4b88fa031..9560ad731 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -153,7 +153,7 @@ good_area:
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- fault = handle_mm_fault(mm, vma, address, flags);
+ fault = handle_mm_fault(vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
return;
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 9b58eb5fd..72f7478ee 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -261,7 +261,6 @@ unsigned __weak platform_maar_init(unsigned num_pairs)
{
struct maar_config cfg[BOOT_MEM_MAP_MAX];
unsigned i, num_configured, num_cfg = 0;
- phys_addr_t skip;
for (i = 0; i < boot_mem_map.nr_map; i++) {
switch (boot_mem_map.map[i].type) {
@@ -272,14 +271,14 @@ unsigned __weak platform_maar_init(unsigned num_pairs)
continue;
}
- skip = 0x10000 - (boot_mem_map.map[i].addr & 0xffff);
-
+ /* Round lower up */
cfg[num_cfg].lower = boot_mem_map.map[i].addr;
- cfg[num_cfg].lower += skip;
+ cfg[num_cfg].lower = (cfg[num_cfg].lower + 0xffff) & ~0xffff;
- cfg[num_cfg].upper = cfg[num_cfg].lower;
- cfg[num_cfg].upper += boot_mem_map.map[i].size - 1;
- cfg[num_cfg].upper -= skip;
+ /* Round upper down */
+ cfg[num_cfg].upper = boot_mem_map.map[i].addr +
+ boot_mem_map.map[i].size;
+ cfg[num_cfg].upper = (cfg[num_cfg].upper & ~0xffff) - 1;
cfg[num_cfg].attrs = MIPS_MAAR_S;
num_cfg++;
@@ -441,6 +440,9 @@ static inline void mem_init_free_highmem(void)
#ifdef CONFIG_HIGHMEM
unsigned long tmp;
+ if (cpu_has_dc_aliases)
+ return;
+
for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
struct page *page = pfn_to_page(tmp);
@@ -504,7 +506,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
void (*free_init_pages_eva)(void *begin, void *end) = NULL;
-void __init_refok free_initmem(void)
+void __ref free_initmem(void)
{
prom_free_prom_memory();
/*
diff --git a/arch/mips/mm/sc-debugfs.c b/arch/mips/mm/sc-debugfs.c
index 5eefe3281..01f1154cd 100644
--- a/arch/mips/mm/sc-debugfs.c
+++ b/arch/mips/mm/sc-debugfs.c
@@ -73,8 +73,8 @@ static int __init sc_debugfs_init(void)
file = debugfs_create_file("prefetch", S_IRUGO | S_IWUSR, dir,
NULL, &sc_prefetch_fops);
- if (IS_ERR(file))
- return PTR_ERR(file);
+ if (!file)
+ return -ENOMEM;
return 0;
}
diff --git a/arch/mips/mm/sc-rm7k.c b/arch/mips/mm/sc-rm7k.c
index 9ac1efcfb..78f900c59 100644
--- a/arch/mips/mm/sc-rm7k.c
+++ b/arch/mips/mm/sc-rm7k.c
@@ -161,7 +161,7 @@ static void rm7k_tc_disable(void)
local_irq_save(flags);
blast_rm7k_tcache();
clear_c0_config(RM7K_CONF_TE);
- local_irq_save(flags);
+ local_irq_restore(flags);
}
static void rm7k_sc_disable(void)
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 4004b659c..55ce39606 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -888,7 +888,7 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
}
}
if (!did_vmalloc_branch) {
- if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) {
+ if (single_insn_swpd) {
uasm_il_b(p, r, label_vmalloc_done);
uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
} else {
@@ -1025,7 +1025,7 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
pte_off_odd += offsetof(pte_t, pte_high);
#endif
- if (config_enabled(CONFIG_XPA)) {
+ if (IS_ENABLED(CONFIG_XPA)) {
uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */
UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
UASM_i_MTC0(p, tmp, C0_ENTRYLO0);
@@ -1643,7 +1643,7 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
unsigned int swmode = mode & ~hwmode;
- if (config_enabled(CONFIG_XPA) && !cpu_has_64bits) {
+ if (IS_ENABLED(CONFIG_XPA) && !cpu_has_64bits) {
uasm_i_lui(p, scratch, swmode >> 16);
uasm_i_or(p, pte, pte, scratch);
BUG_ON(swmode & 0xffff);
@@ -2432,7 +2432,7 @@ static void config_htw_params(void)
pwsize |= ilog2(PTRS_PER_PMD) << MIPS_PWSIZE_MDW_SHIFT;
/* Set pointer size to size of directory pointers */
- if (config_enabled(CONFIG_64BIT))
+ if (IS_ENABLED(CONFIG_64BIT))
pwsize |= MIPS_PWSIZE_PS_MASK;
/* PTEs may be multiple pointers long (e.g. with XPA) */
pwsize |= ((PTE_T_LOG2 - PGD_T_LOG2) << MIPS_PWSIZE_PTEW_SHIFT)
@@ -2448,7 +2448,7 @@ static void config_htw_params(void)
* the pwctl fields.
*/
config = 1 << MIPS_PWCTL_PWEN_SHIFT;
- if (config_enabled(CONFIG_64BIT))
+ if (IS_ENABLED(CONFIG_64BIT))
config |= MIPS_PWCTL_XU_MASK;
write_c0_pwctl(config);
pr_info("Hardware Page Table Walker enabled\n");
@@ -2522,7 +2522,7 @@ void build_tlb_refill_handler(void)
*/
static int run_once = 0;
- if (config_enabled(CONFIG_XPA) && !cpu_has_rixi)
+ if (IS_ENABLED(CONFIG_XPA) && !cpu_has_rixi)
panic("Kernels supporting XPA currently require CPUs with RIXI");
output_pgtable_bits_defines();
diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c
index d78178dae..277cf52d8 100644
--- a/arch/mips/mm/uasm-micromips.c
+++ b/arch/mips/mm/uasm-micromips.c
@@ -53,8 +53,13 @@ static struct insn insn_table_MM[] = {
{ insn_bltzl, 0, 0 },
{ insn_bne, M(mm_bne32_op, 0, 0, 0, 0, 0), RT | RS | BIMM },
{ insn_cache, M(mm_pool32b_op, 0, 0, mm_cache_func, 0, 0), RT | RS | SIMM },
+ { insn_cfc1, M(mm_pool32f_op, 0, 0, 0, mm_cfc1_op, mm_32f_73_op), RT | RS },
+ { insn_cfcmsa, M(mm_pool32s_op, 0, msa_cfc_op, 0, 0, mm_32s_elm_op), RD | RE },
+ { insn_ctc1, M(mm_pool32f_op, 0, 0, 0, mm_ctc1_op, mm_32f_73_op), RT | RS },
+ { insn_ctcmsa, M(mm_pool32s_op, 0, msa_ctc_op, 0, 0, mm_32s_elm_op), RD | RE },
{ insn_daddu, 0, 0 },
{ insn_daddiu, 0, 0 },
+ { insn_di, M(mm_pool32a_op, 0, 0, 0, mm_di_op, mm_pool32axf_op), RS },
{ insn_divu, M(mm_pool32a_op, 0, 0, 0, mm_divu_op, mm_pool32axf_op), RT | RS },
{ insn_dmfc0, 0, 0 },
{ insn_dmtc0, 0, 0 },
@@ -84,6 +89,8 @@ static struct insn insn_table_MM[] = {
{ insn_mfhi, M(mm_pool32a_op, 0, 0, 0, mm_mfhi32_op, mm_pool32axf_op), RS },
{ insn_mflo, M(mm_pool32a_op, 0, 0, 0, mm_mflo32_op, mm_pool32axf_op), RS },
{ insn_mtc0, M(mm_pool32a_op, 0, 0, 0, mm_mtc0_op, mm_pool32axf_op), RT | RS | RD },
+ { insn_mthi, M(mm_pool32a_op, 0, 0, 0, mm_mthi32_op, mm_pool32axf_op), RS },
+ { insn_mtlo, M(mm_pool32a_op, 0, 0, 0, mm_mtlo32_op, mm_pool32axf_op), RS },
{ insn_mul, M(mm_pool32a_op, 0, 0, 0, 0, mm_mul_op), RT | RS | RD },
{ insn_or, M(mm_pool32a_op, 0, 0, 0, 0, mm_or32_op), RT | RS | RD },
{ insn_ori, M(mm_ori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM },
@@ -166,13 +173,15 @@ static void build_insn(u32 **buf, enum opcode opc, ...)
op = ip->match;
va_start(ap, opc);
if (ip->fields & RS) {
- if (opc == insn_mfc0 || opc == insn_mtc0)
+ if (opc == insn_mfc0 || opc == insn_mtc0 ||
+ opc == insn_cfc1 || opc == insn_ctc1)
op |= build_rt(va_arg(ap, u32));
else
op |= build_rs(va_arg(ap, u32));
}
if (ip->fields & RT) {
- if (opc == insn_mfc0 || opc == insn_mtc0)
+ if (opc == insn_mfc0 || opc == insn_mtc0 ||
+ opc == insn_cfc1 || opc == insn_ctc1)
op |= build_rs(va_arg(ap, u32));
else
op |= build_rt(va_arg(ap, u32));
diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c
index 45e3b8799..763d3f1ed 100644
--- a/arch/mips/mm/uasm-mips.c
+++ b/arch/mips/mm/uasm-mips.c
@@ -67,9 +67,14 @@ static struct insn insn_table[] = {
#else
{ insn_cache, M6(spec3_op, 0, 0, 0, cache6_op), RS | RT | SIMM9 },
#endif
+ { insn_cfc1, M(cop1_op, cfc_op, 0, 0, 0, 0), RT | RD },
+ { insn_cfcmsa, M(msa_op, 0, msa_cfc_op, 0, 0, msa_elm_op), RD | RE },
+ { insn_ctc1, M(cop1_op, ctc_op, 0, 0, 0, 0), RT | RD },
+ { insn_ctcmsa, M(msa_op, 0, msa_ctc_op, 0, 0, msa_elm_op), RD | RE },
{ insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },
{ insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE },
+ { insn_di, M(cop0_op, mfmc0_op, 0, 12, 0, 0), RT },
{ insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE },
{ insn_divu, M(spec_op, 0, 0, 0, 0, divu_op), RS | RT },
{ insn_dmfc0, M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET},
@@ -114,7 +119,13 @@ static struct insn insn_table[] = {
{ insn_mflo, M(spec_op, 0, 0, 0, 0, mflo_op), RD },
{ insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET},
{ insn_mthc0, M(cop0_op, mthc0_op, 0, 0, 0, 0), RT | RD | SET},
+ { insn_mthi, M(spec_op, 0, 0, 0, 0, mthi_op), RS },
+ { insn_mtlo, M(spec_op, 0, 0, 0, 0, mtlo_op), RS },
+#ifndef CONFIG_CPU_MIPSR6
{ insn_mul, M(spec2_op, 0, 0, 0, 0, mul_op), RS | RT | RD},
+#else
+ { insn_mul, M(spec_op, 0, 0, 0, mult_mul_op, mult_op), RS | RT | RD},
+#endif
{ insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
{ insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD },
#ifndef CONFIG_CPU_MIPSR6
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index ad718debc..a82970442 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -49,18 +49,19 @@ enum opcode {
insn_invalid,
insn_addiu, insn_addu, insn_and, insn_andi, insn_bbit0, insn_bbit1,
insn_beq, insn_beql, insn_bgez, insn_bgezl, insn_bltz, insn_bltzl,
- insn_bne, insn_cache, insn_daddiu, insn_daddu, insn_dins, insn_dinsm,
- insn_divu, insn_dmfc0, insn_dmtc0, insn_drotr, insn_drotr32, insn_dsll,
+ insn_bne, insn_cache, insn_cfc1, insn_cfcmsa, insn_ctc1, insn_ctcmsa,
+ insn_daddiu, insn_daddu, insn_di, insn_dins, insn_dinsm, insn_divu,
+ insn_dmfc0, insn_dmtc0, insn_drotr, insn_drotr32, insn_dsll,
insn_dsll32, insn_dsra, insn_dsrl, insn_dsrl32, insn_dsubu, insn_eret,
insn_ext, insn_ins, insn_j, insn_jal, insn_jalr, insn_jr, insn_lb,
insn_ld, insn_ldx, insn_lh, insn_ll, insn_lld, insn_lui, insn_lw,
insn_lwx, insn_mfc0, insn_mfhc0, insn_mfhi, insn_mflo, insn_mtc0,
- insn_mthc0, insn_mul, insn_or, insn_ori, insn_pref, insn_rfe,
- insn_rotr, insn_sc, insn_scd, insn_sd, insn_sll, insn_sllv, insn_slt,
- insn_sltiu, insn_sltu, insn_sra, insn_srl, insn_srlv, insn_subu,
- insn_sw, insn_sync, insn_syscall, insn_tlbp, insn_tlbr, insn_tlbwi,
- insn_tlbwr, insn_wait, insn_wsbh, insn_xor, insn_xori, insn_yield,
- insn_lddir, insn_ldpte,
+ insn_mthc0, insn_mthi, insn_mtlo, insn_mul, insn_or, insn_ori,
+ insn_pref, insn_rfe, insn_rotr, insn_sc, insn_scd, insn_sd, insn_sll,
+ insn_sllv, insn_slt, insn_sltiu, insn_sltu, insn_sra, insn_srl,
+ insn_srlv, insn_subu, insn_sw, insn_sync, insn_syscall, insn_tlbp,
+ insn_tlbr, insn_tlbwi, insn_tlbwr, insn_wait, insn_wsbh, insn_xor,
+ insn_xori, insn_yield, insn_lddir, insn_ldpte,
};
struct insn {
@@ -268,10 +269,15 @@ I_u1s2(_bltz)
I_u1s2(_bltzl)
I_u1u2s3(_bne)
I_u2s3u1(_cache)
+I_u1u2(_cfc1)
+I_u2u1(_cfcmsa)
+I_u1u2(_ctc1)
+I_u2u1(_ctcmsa)
I_u1u2u3(_dmfc0)
I_u1u2u3(_dmtc0)
I_u2u1s3(_daddiu)
I_u3u1u2(_daddu)
+I_u1(_di);
I_u1u2(_divu)
I_u2u1u3(_dsll)
I_u2u1u3(_dsll32)
@@ -301,6 +307,8 @@ I_u1(_mfhi)
I_u1(_mflo)
I_u1u2u3(_mtc0)
I_u1u2u3(_mthc0)
+I_u1(_mthi)
+I_u1(_mtlo)
I_u3u1u2(_mul)
I_u2u1u3(_ori)
I_u3u1u2(_or)
@@ -370,11 +378,7 @@ UASM_EXPORT_SYMBOL(ISAFUNC(uasm_build_label));
int ISAFUNC(uasm_in_compat_space_p)(long addr)
{
/* Is this address in 32bit compat space? */
-#ifdef CONFIG_64BIT
- return (((addr) & 0xffffffff00000000L) == 0xffffffff00000000L);
-#else
- return 1;
-#endif
+ return addr == (int)addr;
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_in_compat_space_p));
diff --git a/arch/mips/mti-malta/malta-dtshim.c b/arch/mips/mti-malta/malta-dtshim.c
index f7133efc5..151f4882e 100644
--- a/arch/mips/mti-malta/malta-dtshim.c
+++ b/arch/mips/mti-malta/malta-dtshim.c
@@ -31,7 +31,7 @@ static unsigned __init gen_fdt_mem_array(__be32 *mem_array, unsigned long size)
entries = 1;
mem_array[0] = cpu_to_be32(PHYS_OFFSET);
- if (config_enabled(CONFIG_EVA)) {
+ if (IS_ENABLED(CONFIG_EVA)) {
/*
* The current Malta EVA configuration is "special" in that it
* always makes use of addresses in the upper half of the 32 bit
@@ -82,7 +82,7 @@ static void __init append_memory(void *fdt, int root_off)
physical_memsize = 32 << 20;
}
- if (config_enabled(CONFIG_CPU_BIG_ENDIAN)) {
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
/*
* SOC-it swaps, or perhaps doesn't swap, when DMA'ing
* the last word of physical memory.
diff --git a/arch/mips/mti-malta/malta-memory.c b/arch/mips/mti-malta/malta-memory.c
index d5f8dae6a..a47556723 100644
--- a/arch/mips/mti-malta/malta-memory.c
+++ b/arch/mips/mti-malta/malta-memory.c
@@ -32,7 +32,7 @@ static void free_init_pages_eva_malta(void *begin, void *end)
void __init fw_meminit(void)
{
- bool eva = config_enabled(CONFIG_EVA);
+ bool eva = IS_ENABLED(CONFIG_EVA);
free_init_pages_eva = eva ? free_init_pages_eva_malta : NULL;
}
diff --git a/arch/mips/mti-malta/malta-setup.c b/arch/mips/mti-malta/malta-setup.c
index 33d5ff506..7e7364b05 100644
--- a/arch/mips/mti-malta/malta-setup.c
+++ b/arch/mips/mti-malta/malta-setup.c
@@ -39,6 +39,9 @@
#include <linux/console.h>
#endif
+#define ROCIT_CONFIG_GEN0 0x1f403000
+#define ROCIT_CONFIG_GEN0_PCI_IOCU BIT(7)
+
extern void malta_be_init(void);
extern int malta_be_handler(struct pt_regs *regs, int is_fixup);
@@ -107,6 +110,8 @@ static void __init fd_activate(void)
static int __init plat_enable_iocoherency(void)
{
int supported = 0;
+ u32 cfg;
+
if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO) {
if (BONITO_PCICACHECTRL & BONITO_PCICACHECTRL_CPUCOH_PRES) {
BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_CPUCOH_EN;
@@ -129,7 +134,8 @@ static int __init plat_enable_iocoherency(void)
} else if (mips_cm_numiocu() != 0) {
/* Nothing special needs to be done to enable coherency */
pr_info("CMP IOCU detected\n");
- if ((*(unsigned int *)0xbf403000 & 0x81) != 0x81) {
+ cfg = __raw_readl((u32 *)CKSEG1ADDR(ROCIT_CONFIG_GEN0));
+ if (!(cfg & ROCIT_CONFIG_GEN0_PCI_IOCU)) {
pr_crit("IOCU OPERATION DISABLED BY SWITCH - DEFAULTING TO SW IO COHERENCY\n");
return 0;
}
@@ -261,7 +267,7 @@ void __init plat_mem_setup(void)
fdt = malta_dt_shim(fdt);
__dt_setup_arch(fdt);
- if (config_enabled(CONFIG_EVA))
+ if (IS_ENABLED(CONFIG_EVA))
/* EVA has already been configured in mach-malta/kernel-init.h */
pr_info("Enhanced Virtual Addressing (EVA) activated\n");
diff --git a/arch/mips/mti-sead3/sead3-setup.c b/arch/mips/mti-sead3/sead3-setup.c
index 9f2f9b2b2..edfcaf066 100644
--- a/arch/mips/mti-sead3/sead3-setup.c
+++ b/arch/mips/mti-sead3/sead3-setup.c
@@ -8,7 +8,6 @@
*/
#include <linux/init.h>
#include <linux/libfdt.h>
-#include <linux/of_platform.h>
#include <linux/of_fdt.h>
#include <asm/prom.h>
@@ -107,10 +106,3 @@ void __init device_tree_init(void)
unflatten_and_copy_device_tree();
}
-
-static int __init customize_machine(void)
-{
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
- return 0;
-}
-arch_initcall(customize_machine);
diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
index 1a8c96035..39e7b472f 100644
--- a/arch/mips/net/bpf_jit.c
+++ b/arch/mips/net/bpf_jit.c
@@ -426,7 +426,7 @@ static inline void emit_load_ptr(unsigned int dst, unsigned int src,
static inline void emit_load_func(unsigned int reg, ptr imm,
struct jit_ctx *ctx)
{
- if (config_enabled(CONFIG_64BIT)) {
+ if (IS_ENABLED(CONFIG_64BIT)) {
/* At this point imm is always 64-bit */
emit_load_imm(r_tmp, (u64)imm >> 32, ctx);
emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */
@@ -516,7 +516,7 @@ static inline void emit_jr(unsigned int reg, struct jit_ctx *ctx)
static inline u16 align_sp(unsigned int num)
{
/* Double word alignment for 32-bit, quadword for 64-bit */
- unsigned int align = config_enabled(CONFIG_64BIT) ? 16 : 8;
+ unsigned int align = IS_ENABLED(CONFIG_64BIT) ? 16 : 8;
num = (num + (align - 1)) & -align;
return num;
}
@@ -1199,7 +1199,7 @@ void bpf_jit_compile(struct bpf_prog *fp)
memset(&ctx, 0, sizeof(ctx));
- ctx.offsets = kcalloc(fp->len, sizeof(*ctx.offsets), GFP_KERNEL);
+ ctx.offsets = kcalloc(fp->len + 1, sizeof(*ctx.offsets), GFP_KERNEL);
if (ctx.offsets == NULL)
return;
diff --git a/arch/mips/netlogic/common/nlm-dma.c b/arch/mips/netlogic/common/nlm-dma.c
index 3758715d4..0630693be 100644
--- a/arch/mips/netlogic/common/nlm-dma.c
+++ b/arch/mips/netlogic/common/nlm-dma.c
@@ -45,7 +45,7 @@
static char *nlm_swiotlb;
static void *nlm_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
/* ignore region specifiers */
gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
@@ -62,7 +62,7 @@ static void *nlm_dma_alloc_coherent(struct device *dev, size_t size,
}
static void nlm_dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs)
+ void *vaddr, dma_addr_t dma_handle, unsigned long attrs)
{
swiotlb_free_coherent(dev, size, vaddr, dma_handle);
}
diff --git a/arch/mips/oprofile/op_model_loongson3.c b/arch/mips/oprofile/op_model_loongson3.c
index 8bcf7fc40..85f3ee4ab 100644
--- a/arch/mips/oprofile/op_model_loongson3.c
+++ b/arch/mips/oprofile/op_model_loongson3.c
@@ -168,33 +168,26 @@ static int loongson3_perfcount_handler(void)
return handled;
}
-static int loongson3_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
+static int loongson3_starting_cpu(unsigned int cpu)
{
- switch (action) {
- case CPU_STARTING:
- case CPU_STARTING_FROZEN:
- write_c0_perflo1(reg.control1);
- write_c0_perflo2(reg.control2);
- break;
- case CPU_DYING:
- case CPU_DYING_FROZEN:
- write_c0_perflo1(0xc0000000);
- write_c0_perflo2(0x40000000);
- break;
- }
-
- return NOTIFY_OK;
+ write_c0_perflo1(reg.control1);
+ write_c0_perflo2(reg.control2);
+ return 0;
}
-static struct notifier_block loongson3_notifier_block = {
- .notifier_call = loongson3_cpu_callback
-};
+static int loongson3_dying_cpu(unsigned int cpu)
+{
+ write_c0_perflo1(0xc0000000);
+ write_c0_perflo2(0x40000000);
+ return 0;
+}
static int __init loongson3_init(void)
{
on_each_cpu(reset_counters, NULL, 1);
- register_hotcpu_notifier(&loongson3_notifier_block);
+ cpuhp_setup_state_nocalls(CPUHP_AP_MIPS_OP_LOONGSON3_STARTING,
+ "AP_MIPS_OP_LOONGSON3_STARTING",
+ loongson3_starting_cpu, loongson3_dying_cpu);
save_perf_irq = perf_irq;
perf_irq = loongson3_perfcount_handler;
@@ -204,7 +197,7 @@ static int __init loongson3_init(void)
static void loongson3_exit(void)
{
on_each_cpu(reset_counters, NULL, 1);
- unregister_hotcpu_notifier(&loongson3_notifier_block);
+ cpuhp_remove_state_nocalls(CPUHP_AP_MIPS_OP_LOONGSON3_STARTING);
perf_irq = save_perf_irq;
}
diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c
index f1b11f0de..b4c02f296 100644
--- a/arch/mips/pci/pci.c
+++ b/arch/mips/pci/pci.c
@@ -112,7 +112,14 @@ static void pcibios_scanbus(struct pci_controller *hose)
need_domain_info = 1;
}
- if (!pci_has_flag(PCI_PROBE_ONLY)) {
+ /*
+ * We insert PCI resources into the iomem_resource and
+ * ioport_resource trees in either pci_bus_claim_resources()
+ * or pci_bus_assign_resources().
+ */
+ if (pci_has_flag(PCI_PROBE_ONLY)) {
+ pci_bus_claim_resources(bus);
+ } else {
pci_bus_size_bridges(bus);
pci_bus_assign_resources(bus);
}
@@ -319,6 +326,16 @@ void pcibios_fixup_bus(struct pci_bus *bus)
EXPORT_SYMBOL(PCIBIOS_MIN_IO);
EXPORT_SYMBOL(PCIBIOS_MIN_MEM);
+void pci_resource_to_user(const struct pci_dev *dev, int bar,
+ const struct resource *rsrc, resource_size_t *start,
+ resource_size_t *end)
+{
+ phys_addr_t size = resource_size(rsrc);
+
+ *start = fixup_bigphys_addr(rsrc->start, size);
+ *end = rsrc->start + size;
+}
+
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
diff --git a/arch/mips/pic32/pic32mzda/init.c b/arch/mips/pic32/pic32mzda/init.c
index 775ff90a9..515997104 100644
--- a/arch/mips/pic32/pic32mzda/init.c
+++ b/arch/mips/pic32/pic32mzda/init.c
@@ -33,8 +33,8 @@ static ulong get_fdtaddr(void)
{
ulong ftaddr = 0;
- if ((fw_arg0 == -2) && fw_arg1 && !fw_arg2 && !fw_arg3)
- return (ulong)fw_arg1;
+ if (fw_passed_dtb && !fw_arg2 && !fw_arg3)
+ return (ulong)fw_passed_dtb;
if (__dtb_start < __dtb_end)
ftaddr = (ulong)__dtb_start;
@@ -147,8 +147,7 @@ static int __init plat_of_setup(void)
panic("Device tree not present");
pic32_of_prepare_platform_data(pic32_auxdata_lookup);
- if (of_platform_populate(NULL, of_default_bus_match_table,
- pic32_auxdata_lookup, NULL))
+ if (of_platform_default_populate(NULL, pic32_auxdata_lookup, NULL))
panic("Failed to populate DT");
return 0;
diff --git a/arch/mips/pistachio/init.c b/arch/mips/pistachio/init.c
index ab7982823..1c91cad79 100644
--- a/arch/mips/pistachio/init.c
+++ b/arch/mips/pistachio/init.c
@@ -14,7 +14,6 @@
#include <linux/kernel.h>
#include <linux/of_address.h>
#include <linux/of_fdt.h>
-#include <linux/of_platform.h>
#include <asm/cacheflush.h>
#include <asm/dma-coherence.h>
@@ -60,29 +59,6 @@ const char *get_system_type(void)
return sys_type;
}
-static void __init plat_setup_iocoherency(void)
-{
- /*
- * Kernel has been configured with software coherency
- * but we might choose to turn it off and use hardware
- * coherency instead.
- */
- if (mips_cm_numiocu() != 0) {
- /* Nothing special needs to be done to enable coherency */
- pr_info("CMP IOCU detected\n");
- hw_coherentio = 1;
- if (coherentio == 0)
- pr_info("Hardware DMA cache coherency disabled\n");
- else
- pr_info("Hardware DMA cache coherency enabled\n");
- } else {
- if (coherentio == 1)
- pr_info("Hardware DMA cache coherency unsupported, but enabled from command line!\n");
- else
- pr_info("Software DMA cache coherency enabled\n");
- }
-}
-
void __init *plat_get_fdt(void)
{
if (fw_arg0 != -2)
@@ -93,8 +69,6 @@ void __init *plat_get_fdt(void)
void __init plat_mem_setup(void)
{
__dt_setup_arch(plat_get_fdt());
-
- plat_setup_iocoherency();
}
#define DEFAULT_CPC_BASE_ADDR 0x1bde0000
@@ -159,15 +133,3 @@ void __init device_tree_init(void)
unflatten_and_copy_device_tree();
}
-
-static int __init plat_of_setup(void)
-{
- if (!of_have_populated_dt())
- panic("Device tree not present");
-
- if (of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL))
- panic("Failed to populate DT");
-
- return 0;
-}
-arch_initcall(plat_of_setup);
diff --git a/arch/mips/ralink/cevt-rt3352.c b/arch/mips/ralink/cevt-rt3352.c
index 3ad0b0794..f24eee04e 100644
--- a/arch/mips/ralink/cevt-rt3352.c
+++ b/arch/mips/ralink/cevt-rt3352.c
@@ -117,11 +117,13 @@ static int systick_set_oneshot(struct clock_event_device *evt)
return 0;
}
-static void __init ralink_systick_init(struct device_node *np)
+static int __init ralink_systick_init(struct device_node *np)
{
+ int ret;
+
systick.membase = of_iomap(np, 0);
if (!systick.membase)
- return;
+ return -ENXIO;
systick_irqaction.name = np->name;
systick.dev.name = np->name;
@@ -131,16 +133,21 @@ static void __init ralink_systick_init(struct device_node *np)
systick.dev.irq = irq_of_parse_and_map(np, 0);
if (!systick.dev.irq) {
pr_err("%s: request_irq failed", np->name);
- return;
+ return -EINVAL;
}
- clocksource_mmio_init(systick.membase + SYSTICK_COUNT, np->name,
- SYSTICK_FREQ, 301, 16, clocksource_mmio_readl_up);
+ ret = clocksource_mmio_init(systick.membase + SYSTICK_COUNT, np->name,
+ SYSTICK_FREQ, 301, 16,
+ clocksource_mmio_readl_up);
+ if (ret)
+ return ret;
clockevents_register_device(&systick.dev);
pr_info("%s: running - mult: %d, shift: %d\n",
np->name, systick.dev.mult, systick.dev.shift);
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(systick, "ralink,cevt-systick", ralink_systick_init);
diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c
index d40edda0c..3c7c9bf57 100644
--- a/arch/mips/ralink/mt7620.c
+++ b/arch/mips/ralink/mt7620.c
@@ -175,7 +175,7 @@ static struct rt2880_pmx_func spi_cs1_grp_mt7628[] = {
};
static struct rt2880_pmx_func spis_grp_mt7628[] = {
- FUNC("pwm", 3, 14, 4),
+ FUNC("pwm_uart2", 3, 14, 4),
FUNC("util", 2, 14, 4),
FUNC("gpio", 1, 14, 4),
FUNC("spis", 0, 14, 4),
diff --git a/arch/mips/sgi-ip22/ip22-reset.c b/arch/mips/sgi-ip22/ip22-reset.c
index 063c2dd31..2f45b0357 100644
--- a/arch/mips/sgi-ip22/ip22-reset.c
+++ b/arch/mips/sgi-ip22/ip22-reset.c
@@ -7,7 +7,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
-#include <linux/ds1286.h>
+#include <linux/rtc/ds1286.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
diff --git a/arch/mips/sni/time.c b/arch/mips/sni/time.c
index fb4b3520c..7ee14f41f 100644
--- a/arch/mips/sni/time.c
+++ b/arch/mips/sni/time.c
@@ -8,7 +8,6 @@
#include <asm/sni.h>
#include <asm/time.h>
-#include <asm-generic/rtc.h>
#define SNI_CLOCK_TICK_RATE 3686400
#define SNI_COUNTER2_DIV 64
diff --git a/arch/mips/txx9/generic/pci.c b/arch/mips/txx9/generic/pci.c
index a77698ff2..1f6bc9a30 100644
--- a/arch/mips/txx9/generic/pci.c
+++ b/arch/mips/txx9/generic/pci.c
@@ -268,7 +268,7 @@ static int txx9_i8259_irq_setup(int irq)
return err;
}
-static void __init_refok quirk_slc90e66_bridge(struct pci_dev *dev)
+static void __ref quirk_slc90e66_bridge(struct pci_dev *dev)
{
int irq; /* PCI/ISA Bridge interrupt */
u8 reg_64;
diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c
index 108f8a8d1..ada92db92 100644
--- a/arch/mips/txx9/generic/setup.c
+++ b/arch/mips/txx9/generic/setup.c
@@ -727,7 +727,7 @@ void __init txx9_iocled_init(unsigned long baseaddr,
int i;
static char *default_triggers[] __initdata = {
"heartbeat",
- "ide-disk",
+ "disk-activity",
"nand-disk",
NULL,
};
diff --git a/arch/mips/txx9/rbtx4939/setup.c b/arch/mips/txx9/rbtx4939/setup.c
index 370304097..8b937300f 100644
--- a/arch/mips/txx9/rbtx4939/setup.c
+++ b/arch/mips/txx9/rbtx4939/setup.c
@@ -215,7 +215,7 @@ static int __init rbtx4939_led_probe(struct platform_device *pdev)
int i;
static char *default_triggers[] __initdata = {
"heartbeat",
- "ide-disk",
+ "disk-activity",
"nand-disk",
};
diff --git a/arch/mips/xilfpga/init.c b/arch/mips/xilfpga/init.c
index ce2aee216..602e384a2 100644
--- a/arch/mips/xilfpga/init.c
+++ b/arch/mips/xilfpga/init.c
@@ -10,7 +10,6 @@
*/
#include <linux/of_fdt.h>
-#include <linux/of_platform.h>
#include <asm/prom.h>
@@ -43,15 +42,3 @@ void __init device_tree_init(void)
unflatten_and_copy_device_tree();
}
-
-static int __init plat_of_setup(void)
-{
- if (!of_have_populated_dt())
- panic("Device tree not present");
-
- if (of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL))
- panic("Failed to populate DT");
-
- return 0;
-}
-arch_initcall(plat_of_setup);