summaryrefslogtreecommitdiff
path: root/arch/arc
diff options
context:
space:
mode:
authorAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-01-20 14:01:31 -0300
committerAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-01-20 14:01:31 -0300
commitb4b7ff4b08e691656c9d77c758fc355833128ac0 (patch)
tree82fcb00e6b918026dc9f2d1f05ed8eee83874cc0 /arch/arc
parent35acfa0fc609f2a2cd95cef4a6a9c3a5c38f1778 (diff)
Linux-libre 4.4-gnupck-4.4-gnu
Diffstat (limited to 'arch/arc')
-rw-r--r--arch/arc/Kconfig41
-rw-r--r--arch/arc/Makefile4
-rw-r--r--arch/arc/boot/dts/Makefile6
-rw-r--r--arch/arc/boot/dts/axc001.dtsi2
-rw-r--r--arch/arc/boot/dts/axc003.dtsi2
-rw-r--r--arch/arc/boot/dts/axc003_idu.dtsi2
-rw-r--r--arch/arc/boot/dts/axs10x_mb.dtsi1
-rw-r--r--arch/arc/boot/dts/nsim_hs.dts13
-rw-r--r--arch/arc/boot/dts/skeleton.dtsi2
-rw-r--r--arch/arc/boot/dts/vdk_axc003.dtsi2
-rw-r--r--arch/arc/boot/dts/vdk_axc003_idu.dtsi2
-rw-r--r--arch/arc/configs/axs101_defconfig3
-rw-r--r--arch/arc/configs/axs103_defconfig3
-rw-r--r--arch/arc/configs/axs103_smp_defconfig3
-rw-r--r--arch/arc/configs/nsim_hs_defconfig2
-rw-r--r--arch/arc/configs/nsim_hs_smp_defconfig2
-rw-r--r--arch/arc/configs/nsimosci_hs_defconfig2
-rw-r--r--arch/arc/configs/nsimosci_hs_smp_defconfig2
-rw-r--r--arch/arc/configs/vdk_hs38_defconfig2
-rw-r--r--arch/arc/configs/vdk_hs38_smp_defconfig2
-rw-r--r--arch/arc/include/asm/arcregs.h6
-rw-r--r--arch/arc/include/asm/atomic.h8
-rw-r--r--arch/arc/include/asm/cache.h4
-rw-r--r--arch/arc/include/asm/cacheflush.h8
-rw-r--r--arch/arc/include/asm/entry-compact.h13
-rw-r--r--arch/arc/include/asm/highmem.h61
-rw-r--r--arch/arc/include/asm/hugepage.h81
-rw-r--r--arch/arc/include/asm/irq.h1
-rw-r--r--arch/arc/include/asm/irqflags-arcv2.h3
-rw-r--r--arch/arc/include/asm/irqflags-compact.h18
-rw-r--r--arch/arc/include/asm/kmap_types.h18
-rw-r--r--arch/arc/include/asm/mach_desc.h10
-rw-r--r--arch/arc/include/asm/mcip.h3
-rw-r--r--arch/arc/include/asm/mmu.h7
-rw-r--r--arch/arc/include/asm/page.h9
-rw-r--r--arch/arc/include/asm/pgalloc.h12
-rw-r--r--arch/arc/include/asm/pgtable.h88
-rw-r--r--arch/arc/include/asm/processor.h11
-rw-r--r--arch/arc/include/asm/setup.h7
-rw-r--r--arch/arc/include/asm/smp.h7
-rw-r--r--arch/arc/include/asm/tlbflush.h5
-rw-r--r--arch/arc/include/asm/unwind.h4
-rw-r--r--arch/arc/include/uapi/asm/page.h11
-rw-r--r--arch/arc/kernel/ctx_sw.c2
-rw-r--r--arch/arc/kernel/ctx_sw_asm.S3
-rw-r--r--arch/arc/kernel/entry-arcv2.S21
-rw-r--r--arch/arc/kernel/entry-compact.S72
-rw-r--r--arch/arc/kernel/entry.S17
-rw-r--r--arch/arc/kernel/head.S49
-rw-r--r--arch/arc/kernel/intc-arcv2.c15
-rw-r--r--arch/arc/kernel/intc-compact.c90
-rw-r--r--arch/arc/kernel/irq.c45
-rw-r--r--arch/arc/kernel/mcip.c46
-rw-r--r--arch/arc/kernel/perf_event.c32
-rw-r--r--arch/arc/kernel/process.c9
-rw-r--r--arch/arc/kernel/setup.c8
-rw-r--r--arch/arc/kernel/smp.c66
-rw-r--r--arch/arc/kernel/time.c3
-rw-r--r--arch/arc/kernel/unwind.c94
-rw-r--r--arch/arc/kernel/vmlinux.lds.S2
-rw-r--r--arch/arc/lib/memcpy-archs.S52
-rw-r--r--arch/arc/mm/Makefile1
-rw-r--r--arch/arc/mm/cache.c91
-rw-r--r--arch/arc/mm/fault.c13
-rw-r--r--arch/arc/mm/highmem.c140
-rw-r--r--arch/arc/mm/init.c106
-rw-r--r--arch/arc/mm/tlb.c238
-rw-r--r--arch/arc/mm/tlbex.S55
-rw-r--r--arch/arc/plat-axs10x/axs10x.c8
-rw-r--r--arch/arc/plat-sim/platform.c5
70 files changed, 1180 insertions, 596 deletions
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 78c0621d5..6312f6079 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -76,6 +76,10 @@ config STACKTRACE_SUPPORT
config HAVE_LATENCYTOP_SUPPORT
def_bool y
+config HAVE_ARCH_TRANSPARENT_HUGEPAGE
+ def_bool y
+ depends on ARC_MMU_V4
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
@@ -190,6 +194,16 @@ config NR_CPUS
range 2 4096
default "4"
+config ARC_SMP_HALT_ON_RESET
+ bool "Enable Halt-on-reset boot mode"
+ default y if ARC_UBOOT_SUPPORT
+ help
+ In SMP configuration cores can be configured as Halt-on-reset
+ or they could all start at same time. For Halt-on-reset, non
+ masters are parked until Master kicks them so they can start of
+ at designated entry point. For other case, all jump to common
+ entry point and spin wait for Master's signal.
+
endif #SMP
menuconfig ARC_CACHE
@@ -278,6 +292,8 @@ choice
default ARC_MMU_V2 if ARC_CPU_750D
default ARC_MMU_V4 if ARC_CPU_HS
+if ISA_ARCOMPACT
+
config ARC_MMU_V1
bool "MMU v1"
help
@@ -297,6 +313,8 @@ config ARC_MMU_V3
Variable Page size (1k-16k), var JTLB size 128 x (2 or 4)
Shared Address Spaces (SASID)
+endif
+
config ARC_MMU_V4
bool "MMU v4"
depends on ISA_ARCV2
@@ -427,6 +445,29 @@ config LINUX_LINK_BASE
However some customers have peripherals mapped at this addr, so
Linux needs to be scooted a bit.
If you don't know what the above means, leave this setting alone.
+ This needs to match memory start address specified in Device Tree
+
+config HIGHMEM
+ bool "High Memory Support"
+ help
+ With ARC 2G:2G address split, only upper 2G is directly addressable by
+ kernel. Enable this to potentially allow access to rest of 2G and PAE
+ in future
+
+config ARC_HAS_PAE40
+ bool "Support for the 40-bit Physical Address Extension"
+ default n
+ depends on ISA_ARCV2
+ select HIGHMEM
+ help
+ Enable access to physical memory beyond 4G, only supported on
+ ARC cores with 40 bit Physical Addressing support
+
+config ARCH_PHYS_ADDR_T_64BIT
+ def_bool ARC_HAS_PAE40
+
+config ARCH_DMA_ADDR_T_64BIT
+ bool
config ARC_CURR_IN_REG
bool "Dedicate Register r25 for current_task pointer"
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index 8a27a4830..aeb190210 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -81,7 +81,7 @@ endif
LIBGCC := $(shell $(CC) $(cflags-y) --print-libgcc-file-name)
# Modules with short calls might break for calls into builtin-kernel
-KBUILD_CFLAGS_MODULE += -mlong-calls
+KBUILD_CFLAGS_MODULE += -mlong-calls -mno-millicode
# Finally dump eveything into kernel build system
KBUILD_CFLAGS += $(cflags-y)
@@ -121,7 +121,7 @@ $(boot_targets): vmlinux
$(Q)$(MAKE) $(build)=$(boot)/dts $(boot)/dts/$@
dtbs: scripts
- $(Q)$(MAKE) $(build)=$(boot)/dts dtbs
+ $(Q)$(MAKE) $(build)=$(boot)/dts
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
diff --git a/arch/arc/boot/dts/Makefile b/arch/arc/boot/dts/Makefile
index b0e3f19bb..a09f11b71 100644
--- a/arch/arc/boot/dts/Makefile
+++ b/arch/arc/boot/dts/Makefile
@@ -6,10 +6,12 @@ ifneq ($(CONFIG_ARC_BUILTIN_DTB_NAME),"")
endif
obj-y += $(builtindtb-y).dtb.o
-targets += $(builtindtb-y).dtb
+dtb-y := $(builtindtb-y).dtb
.SECONDARY: $(obj)/$(builtindtb-y).dtb.S
-dtbs: $(addprefix $(obj)/, $(builtindtb-y).dtb)
+dtstree := $(srctree)/$(src)
+dtb-$(CONFIG_OF_ALL_DTBS) := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts))
+always := $(dtb-y)
clean-files := *.dtb *.dtb.S
diff --git a/arch/arc/boot/dts/axc001.dtsi b/arch/arc/boot/dts/axc001.dtsi
index a5e2726a0..420dcfde2 100644
--- a/arch/arc/boot/dts/axc001.dtsi
+++ b/arch/arc/boot/dts/axc001.dtsi
@@ -95,6 +95,6 @@
#size-cells = <1>;
ranges = <0x00000000 0x80000000 0x40000000>;
device_type = "memory";
- reg = <0x00000000 0x20000000>; /* 512MiB */
+ reg = <0x80000000 0x20000000>; /* 512MiB */
};
};
diff --git a/arch/arc/boot/dts/axc003.dtsi b/arch/arc/boot/dts/axc003.dtsi
index 846481f37..f90fadf7f 100644
--- a/arch/arc/boot/dts/axc003.dtsi
+++ b/arch/arc/boot/dts/axc003.dtsi
@@ -98,6 +98,6 @@
#size-cells = <1>;
ranges = <0x00000000 0x80000000 0x40000000>;
device_type = "memory";
- reg = <0x00000000 0x20000000>; /* 512MiB */
+ reg = <0x80000000 0x20000000>; /* 512MiB */
};
};
diff --git a/arch/arc/boot/dts/axc003_idu.dtsi b/arch/arc/boot/dts/axc003_idu.dtsi
index 2f0b33257..06a9f294a 100644
--- a/arch/arc/boot/dts/axc003_idu.dtsi
+++ b/arch/arc/boot/dts/axc003_idu.dtsi
@@ -121,6 +121,6 @@
#size-cells = <1>;
ranges = <0x00000000 0x80000000 0x40000000>;
device_type = "memory";
- reg = <0x00000000 0x20000000>; /* 512MiB */
+ reg = <0x80000000 0x20000000>; /* 512MiB */
};
};
diff --git a/arch/arc/boot/dts/axs10x_mb.dtsi b/arch/arc/boot/dts/axs10x_mb.dtsi
index f3db32154..44a578c10 100644
--- a/arch/arc/boot/dts/axs10x_mb.dtsi
+++ b/arch/arc/boot/dts/axs10x_mb.dtsi
@@ -46,6 +46,7 @@
snps,pbl = < 32 >;
clocks = <&apbclk>;
clock-names = "stmmaceth";
+ max-speed = <100>;
};
ehci@0x40000 {
diff --git a/arch/arc/boot/dts/nsim_hs.dts b/arch/arc/boot/dts/nsim_hs.dts
index 911f069e0..fc81879bc 100644
--- a/arch/arc/boot/dts/nsim_hs.dts
+++ b/arch/arc/boot/dts/nsim_hs.dts
@@ -11,8 +11,17 @@
/ {
compatible = "snps,nsim_hs";
+ #address-cells = <2>;
+ #size-cells = <2>;
interrupt-parent = <&core_intc>;
+ memory {
+ device_type = "memory";
+ /* CONFIG_LINUX_LINK_BASE needs to match low mem start */
+ reg = <0x0 0x80000000 0x0 0x20000000 /* 512 MB low mem */
+ 0x1 0x00000000 0x0 0x40000000>; /* 1 GB highmem */
+ };
+
chosen {
bootargs = "earlycon=arc_uart,mmio32,0xc0fc1000,115200n8 console=ttyARC0,115200n8";
};
@@ -26,8 +35,8 @@
#address-cells = <1>;
#size-cells = <1>;
- /* child and parent address space 1:1 mapped */
- ranges;
+ /* only perip space at end of low mem accessible */
+ ranges = <0x80000000 0x0 0x80000000 0x80000000>;
core_intc: core-interrupt-controller {
compatible = "snps,archs-intc";
diff --git a/arch/arc/boot/dts/skeleton.dtsi b/arch/arc/boot/dts/skeleton.dtsi
index a870bdd5e..296d371a3 100644
--- a/arch/arc/boot/dts/skeleton.dtsi
+++ b/arch/arc/boot/dts/skeleton.dtsi
@@ -32,6 +32,6 @@
memory {
device_type = "memory";
- reg = <0x00000000 0x10000000>; /* 256M */
+ reg = <0x80000000 0x10000000>; /* 256M */
};
};
diff --git a/arch/arc/boot/dts/vdk_axc003.dtsi b/arch/arc/boot/dts/vdk_axc003.dtsi
index 9393fd902..84226bd48 100644
--- a/arch/arc/boot/dts/vdk_axc003.dtsi
+++ b/arch/arc/boot/dts/vdk_axc003.dtsi
@@ -56,6 +56,6 @@
#size-cells = <1>;
ranges = <0x00000000 0x80000000 0x40000000>;
device_type = "memory";
- reg = <0x00000000 0x20000000>; /* 512MiB */
+ reg = <0x80000000 0x20000000>; /* 512MiB */
};
};
diff --git a/arch/arc/boot/dts/vdk_axc003_idu.dtsi b/arch/arc/boot/dts/vdk_axc003_idu.dtsi
index 9bee8ed09..31f0fb5fc 100644
--- a/arch/arc/boot/dts/vdk_axc003_idu.dtsi
+++ b/arch/arc/boot/dts/vdk_axc003_idu.dtsi
@@ -71,6 +71,6 @@
#size-cells = <1>;
ranges = <0x00000000 0x80000000 0x40000000>;
device_type = "memory";
- reg = <0x00000000 0x20000000>; /* 512MiB */
+ reg = <0x80000000 0x20000000>; /* 512MiB */
};
};
diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig
index 562dac6a7..f1ac9818b 100644
--- a/arch/arc/configs/axs101_defconfig
+++ b/arch/arc/configs/axs101_defconfig
@@ -1,4 +1,4 @@
-CONFIG_CROSS_COMPILE="arc-linux-uclibc-"
+CONFIG_CROSS_COMPILE="arc-linux-"
CONFIG_DEFAULT_HOSTNAME="ARCLinux"
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
@@ -89,7 +89,6 @@ CONFIG_MMC=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_DW=y
-CONFIG_MMC_DW_IDMAC=y
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_EXT3_FS=y
CONFIG_EXT4_FS=y
diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig
index 83a6d8d5c..323486d6e 100644
--- a/arch/arc/configs/axs103_defconfig
+++ b/arch/arc/configs/axs103_defconfig
@@ -1,4 +1,4 @@
-CONFIG_CROSS_COMPILE="arc-linux-uclibc-"
+CONFIG_CROSS_COMPILE="arc-linux-"
CONFIG_DEFAULT_HOSTNAME="ARCLinux"
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
@@ -95,7 +95,6 @@ CONFIG_MMC=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_DW=y
-CONFIG_MMC_DW_IDMAC=y
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_EXT3_FS=y
CONFIG_EXT4_FS=y
diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig
index f1e1c84e0..66191cd04 100644
--- a/arch/arc/configs/axs103_smp_defconfig
+++ b/arch/arc/configs/axs103_smp_defconfig
@@ -1,4 +1,4 @@
-CONFIG_CROSS_COMPILE="arc-linux-uclibc-"
+CONFIG_CROSS_COMPILE="arc-linux-"
CONFIG_DEFAULT_HOSTNAME="ARCLinux"
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
@@ -96,7 +96,6 @@ CONFIG_MMC=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_DW=y
-CONFIG_MMC_DW_IDMAC=y
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_EXT3_FS=y
CONFIG_EXT4_FS=y
diff --git a/arch/arc/configs/nsim_hs_defconfig b/arch/arc/configs/nsim_hs_defconfig
index f761a7c70..f68838e80 100644
--- a/arch/arc/configs/nsim_hs_defconfig
+++ b/arch/arc/configs/nsim_hs_defconfig
@@ -1,4 +1,4 @@
-CONFIG_CROSS_COMPILE="arc-linux-uclibc-"
+CONFIG_CROSS_COMPILE="arc-linux-"
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_DEFAULT_HOSTNAME="ARCLinux"
# CONFIG_SWAP is not set
diff --git a/arch/arc/configs/nsim_hs_smp_defconfig b/arch/arc/configs/nsim_hs_smp_defconfig
index dc6f74f41..96bd1c20f 100644
--- a/arch/arc/configs/nsim_hs_smp_defconfig
+++ b/arch/arc/configs/nsim_hs_smp_defconfig
@@ -1,4 +1,4 @@
-CONFIG_CROSS_COMPILE="arc-linux-uclibc-"
+CONFIG_CROSS_COMPILE="arc-linux-"
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_DEFAULT_HOSTNAME="ARCLinux"
# CONFIG_SWAP is not set
diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig
index 3fef0a210..fcae66683 100644
--- a/arch/arc/configs/nsimosci_hs_defconfig
+++ b/arch/arc/configs/nsimosci_hs_defconfig
@@ -1,4 +1,4 @@
-CONFIG_CROSS_COMPILE="arc-linux-uclibc-"
+CONFIG_CROSS_COMPILE="arc-linux-"
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_DEFAULT_HOSTNAME="ARCLinux"
# CONFIG_SWAP is not set
diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig
index 51784837d..b01b65916 100644
--- a/arch/arc/configs/nsimosci_hs_smp_defconfig
+++ b/arch/arc/configs/nsimosci_hs_smp_defconfig
@@ -1,4 +1,4 @@
-CONFIG_CROSS_COMPILE="arc-linux-uclibc-"
+CONFIG_CROSS_COMPILE="arc-linux-"
CONFIG_DEFAULT_HOSTNAME="ARCLinux"
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
diff --git a/arch/arc/configs/vdk_hs38_defconfig b/arch/arc/configs/vdk_hs38_defconfig
index ef35ef392..a07f20de2 100644
--- a/arch/arc/configs/vdk_hs38_defconfig
+++ b/arch/arc/configs/vdk_hs38_defconfig
@@ -1,4 +1,4 @@
-CONFIG_CROSS_COMPILE="arc-linux-uclibc-"
+CONFIG_CROSS_COMPILE="arc-linux-"
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_DEFAULT_HOSTNAME="ARCLinux"
# CONFIG_CROSS_MEMORY_ATTACH is not set
diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig
index 634509e5e..f36c047b3 100644
--- a/arch/arc/configs/vdk_hs38_smp_defconfig
+++ b/arch/arc/configs/vdk_hs38_smp_defconfig
@@ -1,4 +1,4 @@
-CONFIG_CROSS_COMPILE="arc-linux-uclibc-"
+CONFIG_CROSS_COMPILE="arc-linux-"
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_DEFAULT_HOSTNAME="ARCLinux"
# CONFIG_CROSS_MEMORY_ATTACH is not set
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index d8023bc8d..7fac7d85e 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -120,7 +120,7 @@
/* gcc builtin sr needs reg param to be long immediate */
#define write_aux_reg(reg_immed, val) \
- __builtin_arc_sr((unsigned int)val, reg_immed)
+ __builtin_arc_sr((unsigned int)(val), reg_immed)
#else
@@ -327,8 +327,8 @@ struct bcr_generic {
*/
struct cpuinfo_arc_mmu {
- unsigned int ver:4, pg_sz_k:8, s_pg_sz_m:8, u_dtlb:6, u_itlb:6;
- unsigned int num_tlb:16, sets:12, ways:4;
+ unsigned int ver:4, pg_sz_k:8, s_pg_sz_m:8, pad:10, sasid:1, pae:1;
+ unsigned int sets:12, ways:4, u_dtlb:8, u_itlb:8;
};
struct cpuinfo_arc_cache {
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index c3ecda023..7730d302c 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -17,11 +17,11 @@
#include <asm/barrier.h>
#include <asm/smp.h>
-#define atomic_read(v) ((v)->counter)
+#define atomic_read(v) READ_ONCE((v)->counter)
#ifdef CONFIG_ARC_HAS_LLSC
-#define atomic_set(v, i) (((v)->counter) = (i))
+#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
#ifdef CONFIG_ARC_STAR_9000923308
@@ -107,7 +107,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
#ifndef CONFIG_SMP
/* violating atomic_xxx API locking protocol in UP for optimization sake */
-#define atomic_set(v, i) (((v)->counter) = (i))
+#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
#else
@@ -125,7 +125,7 @@ static inline void atomic_set(atomic_t *v, int i)
unsigned long flags;
atomic_ops_lock(flags);
- v->counter = i;
+ WRITE_ONCE(v->counter, i);
atomic_ops_unlock(flags);
}
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
index e23ea6e76..210ef3e72 100644
--- a/arch/arc/include/asm/cache.h
+++ b/arch/arc/include/asm/cache.h
@@ -62,9 +62,8 @@ extern int ioc_exists;
#define ARC_REG_IC_IVIC 0x10
#define ARC_REG_IC_CTRL 0x11
#define ARC_REG_IC_IVIL 0x19
-#if defined(CONFIG_ARC_MMU_V3) || defined(CONFIG_ARC_MMU_V4)
#define ARC_REG_IC_PTAG 0x1E
-#endif
+#define ARC_REG_IC_PTAG_HI 0x1F
/* Bit val in IC_CTRL */
#define IC_CTRL_CACHE_DISABLE 0x1
@@ -77,6 +76,7 @@ extern int ioc_exists;
#define ARC_REG_DC_FLSH 0x4B
#define ARC_REG_DC_FLDL 0x4C
#define ARC_REG_DC_PTAG 0x5C
+#define ARC_REG_DC_PTAG_HI 0x5F
/* Bit val in DC_CTRL */
#define DC_CTRL_INV_MODE_FLUSH 0x40
diff --git a/arch/arc/include/asm/cacheflush.h b/arch/arc/include/asm/cacheflush.h
index 0992d3dbc..fbe3587c4 100644
--- a/arch/arc/include/asm/cacheflush.h
+++ b/arch/arc/include/asm/cacheflush.h
@@ -31,10 +31,10 @@
void flush_cache_all(void);
-void flush_icache_range(unsigned long start, unsigned long end);
-void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len);
-void __inv_icache_page(unsigned long paddr, unsigned long vaddr);
-void __flush_dcache_page(unsigned long paddr, unsigned long vaddr);
+void flush_icache_range(unsigned long kstart, unsigned long kend);
+void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len);
+void __inv_icache_page(phys_addr_t paddr, unsigned long vaddr);
+void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr);
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
diff --git a/arch/arc/include/asm/entry-compact.h b/arch/arc/include/asm/entry-compact.h
index 415443c2a..1aff3be91 100644
--- a/arch/arc/include/asm/entry-compact.h
+++ b/arch/arc/include/asm/entry-compact.h
@@ -110,13 +110,12 @@
.macro FAKE_RET_FROM_EXCPN
- ld r9, [sp, PT_status32]
- bic r9, r9, (STATUS_U_MASK|STATUS_DE_MASK)
- bset r9, r9, STATUS_L_BIT
- sr r9, [erstatus]
- mov r9, 55f
- sr r9, [eret]
-
+ lr r9, [status32]
+ bclr r9, r9, STATUS_AE_BIT
+ or r9, r9, (STATUS_E1_MASK|STATUS_E2_MASK)
+ sr r9, [erstatus]
+ mov r9, 55f
+ sr r9, [eret]
rtie
55:
.endm
diff --git a/arch/arc/include/asm/highmem.h b/arch/arc/include/asm/highmem.h
new file mode 100644
index 000000000..b1585c963
--- /dev/null
+++ b/arch/arc/include/asm/highmem.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2015 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef _ASM_HIGHMEM_H
+#define _ASM_HIGHMEM_H
+
+#ifdef CONFIG_HIGHMEM
+
+#include <uapi/asm/page.h>
+#include <asm/kmap_types.h>
+
+/* start after vmalloc area */
+#define FIXMAP_BASE (PAGE_OFFSET - FIXMAP_SIZE - PKMAP_SIZE)
+#define FIXMAP_SIZE PGDIR_SIZE /* only 1 PGD worth */
+#define KM_TYPE_NR ((FIXMAP_SIZE >> PAGE_SHIFT)/NR_CPUS)
+#define FIXMAP_ADDR(nr) (FIXMAP_BASE + ((nr) << PAGE_SHIFT))
+
+/* start after fixmap area */
+#define PKMAP_BASE (FIXMAP_BASE + FIXMAP_SIZE)
+#define PKMAP_SIZE PGDIR_SIZE
+#define LAST_PKMAP (PKMAP_SIZE >> PAGE_SHIFT)
+#define LAST_PKMAP_MASK (LAST_PKMAP - 1)
+#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
+#define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT)
+
+#define kmap_prot PAGE_KERNEL
+
+
+#include <asm/cacheflush.h>
+
+extern void *kmap(struct page *page);
+extern void *kmap_high(struct page *page);
+extern void *kmap_atomic(struct page *page);
+extern void __kunmap_atomic(void *kvaddr);
+extern void kunmap_high(struct page *page);
+
+extern void kmap_init(void);
+
+static inline void flush_cache_kmaps(void)
+{
+ flush_cache_all();
+}
+
+static inline void kunmap(struct page *page)
+{
+ BUG_ON(in_interrupt());
+ if (!PageHighMem(page))
+ return;
+ kunmap_high(page);
+}
+
+
+#endif
+
+#endif
diff --git a/arch/arc/include/asm/hugepage.h b/arch/arc/include/asm/hugepage.h
new file mode 100644
index 000000000..c5094de86
--- /dev/null
+++ b/arch/arc/include/asm/hugepage.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2013-15 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+
+#ifndef _ASM_ARC_HUGEPAGE_H
+#define _ASM_ARC_HUGEPAGE_H
+
+#include <linux/types.h>
+#include <asm-generic/pgtable-nopmd.h>
+
+static inline pte_t pmd_pte(pmd_t pmd)
+{
+ return __pte(pmd_val(pmd));
+}
+
+static inline pmd_t pte_pmd(pte_t pte)
+{
+ return __pmd(pte_val(pte));
+}
+
+#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
+#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
+#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
+#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
+#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
+#define pmd_mkhuge(pmd) pte_pmd(pte_mkhuge(pmd_pte(pmd)))
+#define pmd_mknotpresent(pmd) pte_pmd(pte_mknotpresent(pmd_pte(pmd)))
+#define pmd_mksplitting(pmd) pte_pmd(pte_mkspecial(pmd_pte(pmd)))
+#define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
+
+#define pmd_write(pmd) pte_write(pmd_pte(pmd))
+#define pmd_young(pmd) pte_young(pmd_pte(pmd))
+#define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd))
+#define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
+#define pmd_special(pmd) pte_special(pmd_pte(pmd))
+
+#define mk_pmd(page, prot) pte_pmd(mk_pte(page, prot))
+
+#define pmd_trans_huge(pmd) (pmd_val(pmd) & _PAGE_HW_SZ)
+#define pmd_trans_splitting(pmd) (pmd_trans_huge(pmd) && pmd_special(pmd))
+
+#define pfn_pmd(pfn, prot) (__pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+
+static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+{
+ /*
+ * open-coded pte_modify() with additional retaining of HW_SZ bit
+ * so that pmd_trans_huge() remains true for this PMD
+ */
+ return __pmd((pmd_val(pmd) & (_PAGE_CHG_MASK | _PAGE_HW_SZ)) | pgprot_val(newprot));
+}
+
+static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp, pmd_t pmd)
+{
+ *pmdp = pmd;
+}
+
+extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
+ pmd_t *pmd);
+
+#define has_transparent_hugepage() 1
+
+/* Generic variants assume pgtable_t is struct page *, hence need for these */
+#define __HAVE_ARCH_PGTABLE_DEPOSIT
+extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
+ pgtable_t pgtable);
+
+#define __HAVE_ARCH_PGTABLE_WITHDRAW
+extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
+
+#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
+extern void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+
+#endif
diff --git a/arch/arc/include/asm/irq.h b/arch/arc/include/asm/irq.h
index bc5103637..4fd7d62a6 100644
--- a/arch/arc/include/asm/irq.h
+++ b/arch/arc/include/asm/irq.h
@@ -16,6 +16,7 @@
#ifdef CONFIG_ISA_ARCOMPACT
#define TIMER0_IRQ 3
#define TIMER1_IRQ 4
+#define IPI_IRQ (NR_CPU_IRQS-1) /* dummy to enable SMP build for up hardware */
#else
#define TIMER0_IRQ 16
#define TIMER1_IRQ 17
diff --git a/arch/arc/include/asm/irqflags-arcv2.h b/arch/arc/include/asm/irqflags-arcv2.h
index ad481c240..258b0e5ad 100644
--- a/arch/arc/include/asm/irqflags-arcv2.h
+++ b/arch/arc/include/asm/irqflags-arcv2.h
@@ -37,6 +37,9 @@
#define ISA_INIT_STATUS_BITS (STATUS_IE_MASK | STATUS_AD_MASK | \
(ARCV2_IRQ_DEF_PRIO << 1))
+/* SLEEP needs default irq priority (<=) which can interrupt the doze */
+#define ISA_SLEEP_ARG (0x10 | ARCV2_IRQ_DEF_PRIO)
+
#ifndef __ASSEMBLY__
/*
diff --git a/arch/arc/include/asm/irqflags-compact.h b/arch/arc/include/asm/irqflags-compact.h
index aa805575c..c1d36458b 100644
--- a/arch/arc/include/asm/irqflags-compact.h
+++ b/arch/arc/include/asm/irqflags-compact.h
@@ -23,11 +23,13 @@
#define STATUS_E2_BIT 2 /* Int 2 enable */
#define STATUS_A1_BIT 3 /* Int 1 active */
#define STATUS_A2_BIT 4 /* Int 2 active */
+#define STATUS_AE_BIT 5 /* Exception active */
#define STATUS_E1_MASK (1<<STATUS_E1_BIT)
#define STATUS_E2_MASK (1<<STATUS_E2_BIT)
#define STATUS_A1_MASK (1<<STATUS_A1_BIT)
#define STATUS_A2_MASK (1<<STATUS_A2_BIT)
+#define STATUS_AE_MASK (1<<STATUS_AE_BIT)
#define STATUS_IE_MASK (STATUS_E1_MASK | STATUS_E2_MASK)
/* Other Interrupt Handling related Aux regs */
@@ -41,6 +43,8 @@
#define ISA_INIT_STATUS_BITS STATUS_IE_MASK
+#define ISA_SLEEP_ARG 0x3
+
#ifndef __ASSEMBLY__
/******************************************************************
@@ -91,7 +95,19 @@ static inline void arch_local_irq_restore(unsigned long flags)
/*
* Unconditionally Enable IRQs
*/
-extern void arch_local_irq_enable(void);
+static inline void arch_local_irq_enable(void)
+{
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " lr %0, [status32] \n"
+ " or %0, %0, %1 \n"
+ " flag %0 \n"
+ : "=&r"(temp)
+ : "n"((STATUS_E1_MASK | STATUS_E2_MASK))
+ : "cc", "memory");
+}
+
/*
* Unconditionally Disable IRQs
diff --git a/arch/arc/include/asm/kmap_types.h b/arch/arc/include/asm/kmap_types.h
new file mode 100644
index 000000000..f0d7f6ace
--- /dev/null
+++ b/arch/arc/include/asm/kmap_types.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2015 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef _ASM_KMAP_TYPES_H
+#define _ASM_KMAP_TYPES_H
+
+/*
+ * We primarily need to define KM_TYPE_NR here but that in turn
+ * is a function of PGDIR_SIZE etc.
+ * To avoid circular deps issue, put everything in asm/highmem.h
+ */
+#endif
diff --git a/arch/arc/include/asm/mach_desc.h b/arch/arc/include/asm/mach_desc.h
index e8993a2be..c28e6c347 100644
--- a/arch/arc/include/asm/mach_desc.h
+++ b/arch/arc/include/asm/mach_desc.h
@@ -23,11 +23,8 @@
* @dt_compat: Array of device tree 'compatible' strings
* (XXX: although only 1st entry is looked at)
* @init_early: Very early callback [called from setup_arch()]
- * @init_irq: setup external IRQ controllers [called from init_IRQ()]
- * @init_smp: for each CPU (e.g. setup IPI)
+ * @init_per_cpu: for each CPU as it is coming up (SMP as well as UP)
* [(M):init_IRQ(), (o):start_kernel_secondary()]
- * @init_time: platform specific clocksource/clockevent registration
- * [called from time_init()]
* @init_machine: arch initcall level callback (e.g. populate static
* platform devices or parse Devicetree)
* @init_late: Late initcall level callback
@@ -36,13 +33,10 @@
struct machine_desc {
const char *name;
const char **dt_compat;
-
void (*init_early)(void);
- void (*init_irq)(void);
#ifdef CONFIG_SMP
- void (*init_smp)(unsigned int);
+ void (*init_per_cpu)(unsigned int);
#endif
- void (*init_time)(void);
void (*init_machine)(void);
void (*init_late)(void);
diff --git a/arch/arc/include/asm/mcip.h b/arch/arc/include/asm/mcip.h
index 52c11f0bb..46f4e5351 100644
--- a/arch/arc/include/asm/mcip.h
+++ b/arch/arc/include/asm/mcip.h
@@ -86,9 +86,6 @@ static inline void __mcip_cmd_data(unsigned int cmd, unsigned int param,
__mcip_cmd(cmd, param);
}
-extern void mcip_init_early_smp(void);
-extern void mcip_init_smp(unsigned int cpu);
-
#endif
#endif
diff --git a/arch/arc/include/asm/mmu.h b/arch/arc/include/asm/mmu.h
index 0f9c3eb53..b144d7ca7 100644
--- a/arch/arc/include/asm/mmu.h
+++ b/arch/arc/include/asm/mmu.h
@@ -24,6 +24,7 @@
#if (CONFIG_ARC_MMU_VER < 4)
#define ARC_REG_TLBPD0 0x405
#define ARC_REG_TLBPD1 0x406
+#define ARC_REG_TLBPD1HI 0 /* Dummy: allows code sharing with ARC700 */
#define ARC_REG_TLBINDEX 0x407
#define ARC_REG_TLBCOMMAND 0x408
#define ARC_REG_PID 0x409
@@ -31,6 +32,7 @@
#else
#define ARC_REG_TLBPD0 0x460
#define ARC_REG_TLBPD1 0x461
+#define ARC_REG_TLBPD1HI 0x463
#define ARC_REG_TLBINDEX 0x464
#define ARC_REG_TLBCOMMAND 0x465
#define ARC_REG_PID 0x468
@@ -83,6 +85,11 @@ void arc_mmu_init(void);
extern char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len);
void read_decode_mmu_bcr(void);
+static inline int is_pae40_enabled(void)
+{
+ return IS_ENABLED(CONFIG_ARC_HAS_PAE40);
+}
+
#endif /* !__ASSEMBLY__ */
#endif
diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
index 9c8aa41e4..429957f1c 100644
--- a/arch/arc/include/asm/page.h
+++ b/arch/arc/include/asm/page.h
@@ -43,7 +43,6 @@ typedef struct {
typedef struct {
unsigned long pgprot;
} pgprot_t;
-typedef unsigned long pgtable_t;
#define pte_val(x) ((x).pte)
#define pgd_val(x) ((x).pgd)
@@ -57,20 +56,26 @@ typedef unsigned long pgtable_t;
#else /* !STRICT_MM_TYPECHECKS */
+#ifdef CONFIG_ARC_HAS_PAE40
+typedef unsigned long long pte_t;
+#else
typedef unsigned long pte_t;
+#endif
typedef unsigned long pgd_t;
typedef unsigned long pgprot_t;
-typedef unsigned long pgtable_t;
#define pte_val(x) (x)
#define pgd_val(x) (x)
#define pgprot_val(x) (x)
#define __pte(x) (x)
+#define __pgd(x) (x)
#define __pgprot(x) (x)
#define pte_pgprot(x) (x)
#endif
+typedef pte_t * pgtable_t;
+
#define ARCH_PFN_OFFSET (CONFIG_LINUX_LINK_BASE >> PAGE_SHIFT)
#define pfn_valid(pfn) (((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
diff --git a/arch/arc/include/asm/pgalloc.h b/arch/arc/include/asm/pgalloc.h
index 81208bfd9..86ed67128 100644
--- a/arch/arc/include/asm/pgalloc.h
+++ b/arch/arc/include/asm/pgalloc.h
@@ -49,7 +49,7 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t ptep)
static inline int __get_order_pgd(void)
{
- return get_order(PTRS_PER_PGD * 4);
+ return get_order(PTRS_PER_PGD * sizeof(pgd_t));
}
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
@@ -87,7 +87,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
static inline int __get_order_pte(void)
{
- return get_order(PTRS_PER_PTE * 4);
+ return get_order(PTRS_PER_PTE * sizeof(pte_t));
}
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
@@ -107,10 +107,10 @@ pte_alloc_one(struct mm_struct *mm, unsigned long address)
pgtable_t pte_pg;
struct page *page;
- pte_pg = __get_free_pages(GFP_KERNEL | __GFP_REPEAT, __get_order_pte());
+ pte_pg = (pgtable_t)__get_free_pages(GFP_KERNEL | __GFP_REPEAT, __get_order_pte());
if (!pte_pg)
return 0;
- memzero((void *)pte_pg, PTRS_PER_PTE * 4);
+ memzero((void *)pte_pg, PTRS_PER_PTE * sizeof(pte_t));
page = virt_to_page(pte_pg);
if (!pgtable_page_ctor(page)) {
__free_page(page);
@@ -128,12 +128,12 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
static inline void pte_free(struct mm_struct *mm, pgtable_t ptep)
{
pgtable_page_dtor(virt_to_page(ptep));
- free_pages(ptep, __get_order_pte());
+ free_pages((unsigned long)ptep, __get_order_pte());
}
#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
#define check_pgt_cache() do { } while (0)
-#define pmd_pgtable(pmd) pmd_page_vaddr(pmd)
+#define pmd_pgtable(pmd) ((pgtable_t) pmd_page_vaddr(pmd))
#endif /* _ASM_ARC_PGALLOC_H */
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index 128171880..57af2f05a 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -38,6 +38,7 @@
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm-generic/pgtable-nopmd.h>
+#include <linux/const.h>
/**************************************************************************
* Page Table Flags
@@ -60,7 +61,8 @@
#define _PAGE_EXECUTE (1<<3) /* Page has user execute perm (H) */
#define _PAGE_WRITE (1<<4) /* Page has user write perm (H) */
#define _PAGE_READ (1<<5) /* Page has user read perm (H) */
-#define _PAGE_MODIFIED (1<<6) /* Page modified (dirty) (S) */
+#define _PAGE_DIRTY (1<<6) /* Page modified (dirty) (S) */
+#define _PAGE_SPECIAL (1<<7)
#define _PAGE_GLOBAL (1<<8) /* Page is global (H) */
#define _PAGE_PRESENT (1<<10) /* TLB entry is valid (H) */
@@ -71,7 +73,8 @@
#define _PAGE_WRITE (1<<2) /* Page has user write perm (H) */
#define _PAGE_READ (1<<3) /* Page has user read perm (H) */
#define _PAGE_ACCESSED (1<<4) /* Page is accessed (S) */
-#define _PAGE_MODIFIED (1<<5) /* Page modified (dirty) (S) */
+#define _PAGE_DIRTY (1<<5) /* Page modified (dirty) (S) */
+#define _PAGE_SPECIAL (1<<6)
#if (CONFIG_ARC_MMU_VER >= 4)
#define _PAGE_WTHRU (1<<7) /* Page cache mode write-thru (H) */
@@ -81,32 +84,33 @@
#define _PAGE_PRESENT (1<<9) /* TLB entry is valid (H) */
#if (CONFIG_ARC_MMU_VER >= 4)
-#define _PAGE_SZ (1<<10) /* Page Size indicator (H) */
+#define _PAGE_HW_SZ (1<<10) /* Page Size indicator (H): 0 normal, 1 super */
#endif
#define _PAGE_SHARED_CODE (1<<11) /* Shared Code page with cmn vaddr
usable for shared TLB entries (H) */
+
+#define _PAGE_UNUSED_BIT (1<<12)
#endif
/* vmalloc permissions */
#define _K_PAGE_PERMS (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ | \
_PAGE_GLOBAL | _PAGE_PRESENT)
-#ifdef CONFIG_ARC_CACHE_PAGES
-#define _PAGE_DEF_CACHEABLE _PAGE_CACHEABLE
-#else
-#define _PAGE_DEF_CACHEABLE (0)
+#ifndef CONFIG_ARC_CACHE_PAGES
+#undef _PAGE_CACHEABLE
+#define _PAGE_CACHEABLE 0
#endif
-/* Helper for every "user" page
- * -kernel can R/W/X
- * -by default cached, unless config otherwise
- * -present in memory
- */
-#define ___DEF (_PAGE_PRESENT | _PAGE_DEF_CACHEABLE)
+#ifndef _PAGE_HW_SZ
+#define _PAGE_HW_SZ 0
+#endif
+
+/* Defaults for every user page */
+#define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE)
/* Set of bits not changed in pte_modify */
-#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED)
+#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
/* More Abbrevaited helpers */
#define PAGE_U_NONE __pgprot(___DEF)
@@ -122,15 +126,20 @@
* user vaddr space - visible in all addr spaces, but kernel mode only
* Thus Global, all-kernel-access, no-user-access, cached
*/
-#define PAGE_KERNEL __pgprot(_K_PAGE_PERMS | _PAGE_DEF_CACHEABLE)
+#define PAGE_KERNEL __pgprot(_K_PAGE_PERMS | _PAGE_CACHEABLE)
/* ioremap */
#define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS)
/* Masks for actual TLB "PD"s */
-#define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT)
+#define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_HW_SZ)
#define PTE_BITS_RWX (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ)
+
+#ifdef CONFIG_ARC_HAS_PAE40
+#define PTE_BITS_NON_RWX_IN_PD1 (0xff00000000 | PAGE_MASK | _PAGE_CACHEABLE)
+#else
#define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE)
+#endif
/**************************************************************************
* Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
@@ -191,26 +200,22 @@
/* Optimal Sizing of Pg Tbl - based on MMU page size */
#if defined(CONFIG_ARC_PAGE_SIZE_8K)
-#define BITS_FOR_PTE 8
+#define BITS_FOR_PTE 8 /* 11:8:13 */
#elif defined(CONFIG_ARC_PAGE_SIZE_16K)
-#define BITS_FOR_PTE 8
+#define BITS_FOR_PTE 8 /* 10:8:14 */
#elif defined(CONFIG_ARC_PAGE_SIZE_4K)
-#define BITS_FOR_PTE 9
+#define BITS_FOR_PTE 9 /* 11:9:12 */
#endif
#define BITS_FOR_PGD (32 - BITS_FOR_PTE - BITS_IN_PAGE)
-#define PGDIR_SHIFT (BITS_FOR_PTE + BITS_IN_PAGE)
+#define PGDIR_SHIFT (32 - BITS_FOR_PGD)
#define PGDIR_SIZE (1UL << PGDIR_SHIFT) /* vaddr span, not PDG sz */
#define PGDIR_MASK (~(PGDIR_SIZE-1))
-#ifdef __ASSEMBLY__
-#define PTRS_PER_PTE (1 << BITS_FOR_PTE)
-#define PTRS_PER_PGD (1 << BITS_FOR_PGD)
-#else
-#define PTRS_PER_PTE (1UL << BITS_FOR_PTE)
-#define PTRS_PER_PGD (1UL << BITS_FOR_PGD)
-#endif
+#define PTRS_PER_PTE _BITUL(BITS_FOR_PTE)
+#define PTRS_PER_PGD _BITUL(BITS_FOR_PGD)
+
/*
* Number of entries a user land program use.
* TASK_SIZE is the maximum vaddr that can be used by a userland program.
@@ -270,15 +275,10 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
(unsigned long)(((pte_val(x) - CONFIG_LINUX_LINK_BASE) >> \
PAGE_SHIFT)))
-#define mk_pte(page, pgprot) \
-({ \
- pte_t pte; \
- pte_val(pte) = __pa(page_address(page)) + pgprot_val(pgprot); \
- pte; \
-})
-
+#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
-#define pfn_pte(pfn, prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+#define pfn_pte(pfn, prot) (__pte(((pte_t)(pfn) << PAGE_SHIFT) | \
+ pgprot_val(prot)))
#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
/*
@@ -295,23 +295,26 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
/* Zoo of pte_xxx function */
#define pte_read(pte) (pte_val(pte) & _PAGE_READ)
#define pte_write(pte) (pte_val(pte) & _PAGE_WRITE)
-#define pte_dirty(pte) (pte_val(pte) & _PAGE_MODIFIED)
+#define pte_dirty(pte) (pte_val(pte) & _PAGE_DIRTY)
#define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED)
-#define pte_special(pte) (0)
+#define pte_special(pte) (pte_val(pte) & _PAGE_SPECIAL)
#define PTE_BIT_FUNC(fn, op) \
static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
+PTE_BIT_FUNC(mknotpresent, &= ~(_PAGE_PRESENT));
PTE_BIT_FUNC(wrprotect, &= ~(_PAGE_WRITE));
PTE_BIT_FUNC(mkwrite, |= (_PAGE_WRITE));
-PTE_BIT_FUNC(mkclean, &= ~(_PAGE_MODIFIED));
-PTE_BIT_FUNC(mkdirty, |= (_PAGE_MODIFIED));
+PTE_BIT_FUNC(mkclean, &= ~(_PAGE_DIRTY));
+PTE_BIT_FUNC(mkdirty, |= (_PAGE_DIRTY));
PTE_BIT_FUNC(mkold, &= ~(_PAGE_ACCESSED));
PTE_BIT_FUNC(mkyoung, |= (_PAGE_ACCESSED));
PTE_BIT_FUNC(exprotect, &= ~(_PAGE_EXECUTE));
PTE_BIT_FUNC(mkexec, |= (_PAGE_EXECUTE));
+PTE_BIT_FUNC(mkspecial, |= (_PAGE_SPECIAL));
+PTE_BIT_FUNC(mkhuge, |= (_PAGE_HW_SZ));
-static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
+#define __HAVE_ARCH_PTE_SPECIAL
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
@@ -357,7 +360,6 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
#define pgd_offset_fast(mm, addr) pgd_offset(mm, addr)
#endif
-extern void paging_init(void);
extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE);
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
pte_t *ptep);
@@ -383,6 +385,10 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
* remap a physical page `pfn' of size `size' with page protection `prot'
* into virtual address `from'
*/
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#include <asm/hugepage.h>
+#endif
+
#include <asm-generic/pgtable.h>
/* to cope with aliasing VIPT cache */
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
index ee682d8e0..1d694c1ef 100644
--- a/arch/arc/include/asm/processor.h
+++ b/arch/arc/include/asm/processor.h
@@ -57,11 +57,7 @@ struct task_struct;
* A lot of busy-wait loops in SMP are based off of non-volatile data otherwise
* get optimised away by gcc
*/
-#ifdef CONFIG_SMP
#define cpu_relax() __asm__ __volatile__ ("" : : : "memory")
-#else
-#define cpu_relax() do { } while (0)
-#endif
#define cpu_relax_lowlatency() cpu_relax()
@@ -114,7 +110,12 @@ extern unsigned int get_wchan(struct task_struct *p);
* -----------------------------------------------------------------------------
*/
#define VMALLOC_START 0x70000000
-#define VMALLOC_SIZE (PAGE_OFFSET - VMALLOC_START)
+
+/*
+ * 1 PGDIR_SIZE each for fixmap/pkmap, 2 PGDIR_SIZE gutter
+ * See asm/highmem.h for details
+ */
+#define VMALLOC_SIZE (PAGE_OFFSET - VMALLOC_START - PGDIR_SIZE * 4)
#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE)
#define USER_KERNEL_GUTTER 0x10000000
diff --git a/arch/arc/include/asm/setup.h b/arch/arc/include/asm/setup.h
index 6e3ef5ba4..307846691 100644
--- a/arch/arc/include/asm/setup.h
+++ b/arch/arc/include/asm/setup.h
@@ -33,4 +33,11 @@ extern int root_mountflags, end_mem;
void setup_processor(void);
void __init setup_arch_memory(void);
+/* Helpers used in arc_*_mumbojumbo routines */
+#define IS_AVAIL1(v, s) ((v) ? s : "")
+#define IS_DISABLED_RUN(v) ((v) ? "" : "(disabled) ")
+#define IS_USED_RUN(v) ((v) ? "" : "(not used) ")
+#define IS_USED_CFG(cfg) IS_USED_RUN(IS_ENABLED(cfg))
+#define IS_AVAIL2(v, s, cfg) IS_AVAIL1(v, s), IS_AVAIL1(v, IS_USED_CFG(cfg))
+
#endif /* __ASMARC_SETUP_H */
diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h
index 3845b9e94..991380438 100644
--- a/arch/arc/include/asm/smp.h
+++ b/arch/arc/include/asm/smp.h
@@ -45,12 +45,19 @@ extern int smp_ipi_irq_setup(int cpu, int irq);
* struct plat_smp_ops - SMP callbacks provided by platform to ARC SMP
*
* @info: SoC SMP specific info for /proc/cpuinfo etc
+ * @init_early_smp: A SMP specific h/w block can init itself
+ * Could be common across platforms so not covered by
+ * mach_desc->init_early()
+ * @init_per_cpu: Called for each core so SMP h/w block driver can do
+ * any needed setup per cpu (e.g. IPI request)
* @cpu_kick: For Master to kickstart a cpu (optionally at a PC)
* @ipi_send: To send IPI to a @cpu
* @ips_clear: To clear IPI received at @irq
*/
struct plat_smp_ops {
const char *info;
+ void (*init_early_smp)(void);
+ void (*init_per_cpu)(int cpu);
void (*cpu_kick)(int cpu, unsigned long pc);
void (*ipi_send)(int cpu);
void (*ipi_clear)(int irq);
diff --git a/arch/arc/include/asm/tlbflush.h b/arch/arc/include/asm/tlbflush.h
index 71c7b2e4b..1fe9c8c80 100644
--- a/arch/arc/include/asm/tlbflush.h
+++ b/arch/arc/include/asm/tlbflush.h
@@ -17,6 +17,8 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
void local_flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
+void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
#ifndef CONFIG_SMP
#define flush_tlb_range(vma, s, e) local_flush_tlb_range(vma, s, e)
@@ -24,6 +26,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma,
#define flush_tlb_kernel_range(s, e) local_flush_tlb_kernel_range(s, e)
#define flush_tlb_all() local_flush_tlb_all()
#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
+#define flush_pmd_tlb_range(vma, s, e) local_flush_pmd_tlb_range(vma, s, e)
#else
extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
@@ -31,5 +34,7 @@ extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
extern void flush_tlb_all(void);
extern void flush_tlb_mm(struct mm_struct *mm);
+extern void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
+
#endif /* CONFIG_SMP */
#endif
diff --git a/arch/arc/include/asm/unwind.h b/arch/arc/include/asm/unwind.h
index 7ca628b6e..c11a25bb8 100644
--- a/arch/arc/include/asm/unwind.h
+++ b/arch/arc/include/asm/unwind.h
@@ -112,7 +112,6 @@ struct unwind_frame_info {
extern int arc_unwind(struct unwind_frame_info *frame);
extern void arc_unwind_init(void);
-extern void arc_unwind_setup(void);
extern void *unwind_add_table(struct module *module, const void *table_start,
unsigned long table_size);
extern void unwind_remove_table(void *handle, int init_only);
@@ -152,9 +151,6 @@ static inline void arc_unwind_init(void)
{
}
-static inline void arc_unwind_setup(void)
-{
-}
#define unwind_add_table(a, b, c)
#define unwind_remove_table(a, b)
diff --git a/arch/arc/include/uapi/asm/page.h b/arch/arc/include/uapi/asm/page.h
index 9d129a2a1..059aff38f 100644
--- a/arch/arc/include/uapi/asm/page.h
+++ b/arch/arc/include/uapi/asm/page.h
@@ -9,6 +9,8 @@
#ifndef _UAPI__ASM_ARC_PAGE_H
#define _UAPI__ASM_ARC_PAGE_H
+#include <linux/const.h>
+
/* PAGE_SHIFT determines the page size */
#if defined(CONFIG_ARC_PAGE_SIZE_16K)
#define PAGE_SHIFT 14
@@ -25,13 +27,8 @@
#define PAGE_SHIFT 13
#endif
-#ifdef __ASSEMBLY__
-#define PAGE_SIZE (1 << PAGE_SHIFT)
-#define PAGE_OFFSET (0x80000000)
-#else
-#define PAGE_SIZE (1UL << PAGE_SHIFT) /* Default 8K */
-#define PAGE_OFFSET (0x80000000UL) /* Kernel starts at 2G onwards */
-#endif
+#define PAGE_SIZE _BITUL(PAGE_SHIFT) /* Default 8K */
+#define PAGE_OFFSET _AC(0x80000000, UL) /* Kernel starts at 2G onwrds */
#define PAGE_MASK (~(PAGE_SIZE-1))
diff --git a/arch/arc/kernel/ctx_sw.c b/arch/arc/kernel/ctx_sw.c
index c14a5bea0..5d446df2c 100644
--- a/arch/arc/kernel/ctx_sw.c
+++ b/arch/arc/kernel/ctx_sw.c
@@ -58,8 +58,6 @@ __switch_to(struct task_struct *prev_task, struct task_struct *next_task)
"st sp, [r24] \n\t"
#endif
- "sync \n\t"
-
/*
* setup _current_task with incoming tsk.
* optionally, set r25 to that as well
diff --git a/arch/arc/kernel/ctx_sw_asm.S b/arch/arc/kernel/ctx_sw_asm.S
index e24859409..e6890b1f8 100644
--- a/arch/arc/kernel/ctx_sw_asm.S
+++ b/arch/arc/kernel/ctx_sw_asm.S
@@ -44,9 +44,6 @@ __switch_to:
* don't need to do anything special to return it
*/
- /* hardware memory barrier */
- sync
-
/*
* switch to new task, contained in r1
* Temp reg r3 is required to get the ptr to store val
diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S
index 8fa76567e..cbfec7913 100644
--- a/arch/arc/kernel/entry-arcv2.S
+++ b/arch/arc/kernel/entry-arcv2.S
@@ -24,7 +24,7 @@
.align 4
# Initial 16 slots are Exception Vectors
-VECTOR stext ; Restart Vector (jump to entry point)
+VECTOR res_service ; Reset Vector
VECTOR mem_service ; Mem exception
VECTOR instr_service ; Instrn Error
VECTOR EV_MachineCheck ; Fatal Machine check
@@ -91,6 +91,25 @@ ENTRY(EV_DCError)
flag 1
END(EV_DCError)
+; ---------------------------------------------
+; Memory Error Exception Handler
+; - Unlike ARCompact, handles Bus errors for both User/Kernel mode,
+; Instruction fetch or Data access, under a single Exception Vector
+; ---------------------------------------------
+
+ENTRY(mem_service)
+
+ EXCEPTION_PROLOGUE
+
+ lr r0, [efa]
+ mov r1, sp
+
+ FAKE_RET_FROM_EXCPN
+
+ bl do_memory_error
+ b ret_from_exception
+END(mem_service)
+
ENTRY(EV_Misaligned)
EXCEPTION_PROLOGUE
diff --git a/arch/arc/kernel/entry-compact.S b/arch/arc/kernel/entry-compact.S
index 15d457b44..431433929 100644
--- a/arch/arc/kernel/entry-compact.S
+++ b/arch/arc/kernel/entry-compact.S
@@ -86,7 +86,7 @@
*/
; ********* Critical System Events **********************
-VECTOR res_service ; 0x0, Restart Vector (0x0)
+VECTOR res_service ; 0x0, Reset Vector (0x0)
VECTOR mem_service ; 0x8, Mem exception (0x1)
VECTOR instr_service ; 0x10, Instrn Error (0x2)
@@ -142,26 +142,18 @@ int1_saved_reg:
.zero 4
/* Each Interrupt level needs its own scratch */
-#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
-
ARCFP_DATA int2_saved_reg
.type int2_saved_reg, @object
.size int2_saved_reg, 4
int2_saved_reg:
.zero 4
-#endif
-
; ---------------------------------------------
.section .text, "ax",@progbits
-res_service: ; processor restart
- flag 0x1 ; not implemented
- nop
- nop
-reserved: ; processor restart
- rtie ; jump to processor initializations
+reserved:
+ flag 1 ; Unexpected event, halt
;##################### Interrupt Handling ##############################
@@ -175,12 +167,25 @@ ENTRY(handle_interrupt_level2)
;------------------------------------------------------
; if L2 IRQ interrupted a L1 ISR, disable preemption
+ ;
+ ; This is to avoid a potential L1-L2-L1 scenario
+ ; -L1 IRQ taken
+ ; -L2 interrupts L1 (before L1 ISR could run)
+ ; -preemption off IRQ, user task in syscall picked to run
+ ; -RTIE to userspace
+ ; Returns from L2 context fine
+ ; But both L1 and L2 re-enabled, so another L1 can be taken
+ ; while prev L1 is still unserviced
+ ;
;------------------------------------------------------
+ ; L2 interrupting L1 implies both L2 and L1 active
+ ; However both A2 and A1 are NOT set in STATUS32, thus
+ ; need to check STATUS32_L2 to determine if L1 was active
+
ld r9, [sp, PT_status32] ; get statu32_l2 (saved in pt_regs)
bbit0 r9, STATUS_A1_BIT, 1f ; L1 not active when L2 IRQ, so normal
- ; A1 is set in status32_l2
; bump thread_info->preempt_count (Disable preemption)
GET_CURR_THR_INFO_FROM_SP r10
ld r9, [r10, THREAD_INFO_PREEMPT_COUNT]
@@ -207,6 +212,31 @@ END(handle_interrupt_level2)
#endif
; ---------------------------------------------
+; User Mode Memory Bus Error Interrupt Handler
+; (Kernel mode memory errors handled via seperate exception vectors)
+; ---------------------------------------------
+ENTRY(mem_service)
+
+ INTERRUPT_PROLOGUE 2
+
+ mov r0, ilink2
+ mov r1, sp
+
+ ; User process needs to be killed with SIGBUS, but first need to get
+ ; out of the L2 interrupt context (drop to pure kernel mode) and jump
+ ; off to "C" code where SIGBUS in enqueued
+ lr r3, [status32]
+ bclr r3, r3, STATUS_A2_BIT
+ or r3, r3, (STATUS_E1_MASK|STATUS_E2_MASK)
+ sr r3, [status32_l2]
+ mov ilink2, 1f
+ rtie
+1:
+ bl do_memory_error
+ b ret_from_exception
+END(mem_service)
+
+; ---------------------------------------------
; Level 1 ISR
; ---------------------------------------------
ENTRY(handle_interrupt_level1)
@@ -320,11 +350,10 @@ END(call_do_page_fault)
; Note that we use realtime STATUS32 (not pt_regs->status32) to
; decide that.
- ; if Returning from Exception
- btst r10, STATUS_AE_BIT
- bnz .Lexcep_ret
+ and.f 0, r10, (STATUS_A1_MASK|STATUS_A2_MASK)
+ bz .Lexcep_or_pure_K_ret
- ; Not Exception so maybe Interrupts (Level 1 or 2)
+ ; Returning from Interrupts (Level 1 or 2)
#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
@@ -365,8 +394,7 @@ END(call_do_page_fault)
st r9, [r10, THREAD_INFO_PREEMPT_COUNT]
149:
- ;return from level 2
- INTERRUPT_EPILOGUE 2
+ INTERRUPT_EPILOGUE 2 ; return from level 2 interrupt
debug_marker_l2:
rtie
@@ -374,15 +402,11 @@ not_level2_interrupt:
#endif
- bbit0 r10, STATUS_A1_BIT, .Lpure_k_mode_ret
-
- ;return from level 1
- INTERRUPT_EPILOGUE 1
+ INTERRUPT_EPILOGUE 1 ; return from level 1 interrupt
debug_marker_l1:
rtie
-.Lexcep_ret:
-.Lpure_k_mode_ret:
+.Lexcep_or_pure_K_ret:
;this case is for syscalls or Exceptions or pure kernel mode
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
index 589abf517..2efb06253 100644
--- a/arch/arc/kernel/entry.S
+++ b/arch/arc/kernel/entry.S
@@ -93,23 +93,6 @@ ENTRY(instr_service)
END(instr_service)
; ---------------------------------------------
-; Memory Error Exception Handler
-; ---------------------------------------------
-
-ENTRY(mem_service)
-
- EXCEPTION_PROLOGUE
-
- lr r0, [efa]
- mov r1, sp
-
- FAKE_RET_FROM_EXCPN
-
- bl do_memory_error
- b ret_from_exception
-END(mem_service)
-
-; ---------------------------------------------
; Machine Check Exception Handler
; ---------------------------------------------
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
index 812f95e6a..689dd867f 100644
--- a/arch/arc/kernel/head.S
+++ b/arch/arc/kernel/head.S
@@ -50,28 +50,37 @@
.endm
.section .init.text, "ax",@progbits
- .type stext, @function
- .globl stext
-stext:
- ;-------------------------------------------------------------------
- ; Don't clobber r0-r2 yet. It might have bootloader provided info
- ;-------------------------------------------------------------------
+
+;----------------------------------------------------------------
+; Default Reset Handler (jumped into from Reset vector)
+; - Don't clobber r0,r1,r2 as they might have u-boot provided args
+; - Platforms can override this weak version if needed
+;----------------------------------------------------------------
+WEAK(res_service)
+ j stext
+END(res_service)
+
+;----------------------------------------------------------------
+; Kernel Entry point
+;----------------------------------------------------------------
+ENTRY(stext)
CPU_EARLY_SETUP
#ifdef CONFIG_SMP
- ; Ensure Boot (Master) proceeds. Others wait in platform dependent way
- ; IDENTITY Reg [ 3 2 1 0 ]
- ; (cpu-id) ^^^ => Zero for UP ARC700
- ; => #Core-ID if SMP (Master 0)
- ; Note that non-boot CPUs might not land here if halt-on-reset and
- ; instead breath life from @first_lines_of_secondary, but we still
- ; need to make sure only boot cpu takes this path.
GET_CPU_ID r5
cmp r5, 0
- mov.ne r0, r5
- jne arc_platform_smp_wait_to_boot
+ mov.nz r0, r5
+#ifdef CONFIG_ARC_SMP_HALT_ON_RESET
+ ; Non-Master can proceed as system would be booted sufficiently
+ jnz first_lines_of_secondary
+#else
+ ; Non-Masters wait for Master to boot enough and bring them up
+ jnz arc_platform_smp_wait_to_boot
#endif
+ ; Master falls thru
+#endif
+
; Clear BSS before updating any globals
; XXX: use ZOL here
mov r5, __bss_start
@@ -102,18 +111,14 @@ stext:
GET_TSK_STACK_BASE r9, sp ; r9 = tsk, sp = stack base(output)
j start_kernel ; "C" entry point
+END(stext)
#ifdef CONFIG_SMP
;----------------------------------------------------------------
; First lines of code run by secondary before jumping to 'C'
;----------------------------------------------------------------
.section .text, "ax",@progbits
- .type first_lines_of_secondary, @function
- .globl first_lines_of_secondary
-
-first_lines_of_secondary:
-
- CPU_EARLY_SETUP
+ENTRY(first_lines_of_secondary)
; setup per-cpu idle task as "current" on this CPU
ld r0, [@secondary_idle_tsk]
@@ -126,5 +131,5 @@ first_lines_of_secondary:
GET_TSK_STACK_BASE r0, sp
j start_kernel_secondary
-
+END(first_lines_of_secondary)
#endif
diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c
index 26c156827..0394f9f61 100644
--- a/arch/arc/kernel/intc-arcv2.c
+++ b/arch/arc/kernel/intc-arcv2.c
@@ -106,10 +106,21 @@ static struct irq_chip arcv2_irq_chip = {
static int arcv2_irq_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hw)
{
- if (irq == TIMER0_IRQ || irq == IPI_IRQ)
+ /*
+ * core intc IRQs [16, 23]:
+ * Statically assigned always private-per-core (Timers, WDT, IPI, PCT)
+ */
+ if (hw < 24) {
+ /*
+ * A subsequent request_percpu_irq() fails if percpu_devid is
+ * not set. That in turns sets NOAUTOEN, meaning each core needs
+ * to call enable_percpu_irq()
+ */
+ irq_set_percpu_devid(irq);
irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_percpu_irq);
- else
+ } else {
irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_level_irq);
+ }
return 0;
}
diff --git a/arch/arc/kernel/intc-compact.c b/arch/arc/kernel/intc-compact.c
index 039fac30b..06bcedf19 100644
--- a/arch/arc/kernel/intc-compact.c
+++ b/arch/arc/kernel/intc-compact.c
@@ -79,17 +79,16 @@ static struct irq_chip onchip_intc = {
static int arc_intc_domain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hw)
{
- /*
- * XXX: the IPI IRQ needs to be handled like TIMER too. However ARC core
- * code doesn't own it (like TIMER0). ISS IDU / ezchip define it
- * in platform header which can't be included here as it goes
- * against multi-platform image philisophy
- */
- if (irq == TIMER0_IRQ)
+ switch (irq) {
+ case TIMER0_IRQ:
+#ifdef CONFIG_SMP
+ case IPI_IRQ:
+#endif
irq_set_chip_and_handler(irq, &onchip_intc, handle_percpu_irq);
- else
+ break;
+ default:
irq_set_chip_and_handler(irq, &onchip_intc, handle_level_irq);
-
+ }
return 0;
}
@@ -148,78 +147,15 @@ IRQCHIP_DECLARE(arc_intc, "snps,arc700-intc", init_onchip_IRQ);
void arch_local_irq_enable(void)
{
-
unsigned long flags = arch_local_save_flags();
- /* Allow both L1 and L2 at the onset */
- flags |= (STATUS_E1_MASK | STATUS_E2_MASK);
-
- /* Called from hard ISR (between irq_enter and irq_exit) */
- if (in_irq()) {
-
- /* If in L2 ISR, don't re-enable any further IRQs as this can
- * cause IRQ priorities to get upside down. e.g. it could allow
- * L1 be taken while in L2 hard ISR which is wrong not only in
- * theory, it can also cause the dreaded L1-L2-L1 scenario
- */
- if (flags & STATUS_A2_MASK)
- flags &= ~(STATUS_E1_MASK | STATUS_E2_MASK);
-
- /* Even if in L1 ISR, allowe Higher prio L2 IRQs */
- else if (flags & STATUS_A1_MASK)
- flags &= ~(STATUS_E1_MASK);
- }
-
- /* called from soft IRQ, ideally we want to re-enable all levels */
-
- else if (in_softirq()) {
-
- /* However if this is case of L1 interrupted by L2,
- * re-enabling both may cause whaco L1-L2-L1 scenario
- * because ARC700 allows level 1 to interrupt an active L2 ISR
- * Thus we disable both
- * However some code, executing in soft ISR wants some IRQs
- * to be enabled so we re-enable L2 only
- *
- * How do we determine L1 intr by L2
- * -A2 is set (means in L2 ISR)
- * -E1 is set in this ISR's pt_regs->status32 which is
- * saved copy of status32_l2 when l2 ISR happened
- */
- struct pt_regs *pt = get_irq_regs();
-
- if ((flags & STATUS_A2_MASK) && pt &&
- (pt->status32 & STATUS_A1_MASK)) {
- /*flags &= ~(STATUS_E1_MASK | STATUS_E2_MASK); */
- flags &= ~(STATUS_E1_MASK);
- }
- }
+ if (flags & STATUS_A2_MASK)
+ flags |= STATUS_E2_MASK;
+ else if (flags & STATUS_A1_MASK)
+ flags |= STATUS_E1_MASK;
arch_local_irq_restore(flags);
}
-#else /* ! CONFIG_ARC_COMPACT_IRQ_LEVELS */
-
-/*
- * Simpler version for only 1 level of interrupt
- * Here we only Worry about Level 1 Bits
- */
-void arch_local_irq_enable(void)
-{
- unsigned long flags;
-
- /*
- * ARC IDE Drivers tries to re-enable interrupts from hard-isr
- * context which is simply wrong
- */
- if (in_irq()) {
- WARN_ONCE(1, "IRQ enabled from hard-isr");
- return;
- }
-
- flags = arch_local_save_flags();
- flags |= (STATUS_E1_MASK | STATUS_E2_MASK);
- arch_local_irq_restore(flags);
-}
-#endif
EXPORT_SYMBOL(arch_local_irq_enable);
+#endif
diff --git a/arch/arc/kernel/irq.c b/arch/arc/kernel/irq.c
index 2989a7bcf..ba17f8528 100644
--- a/arch/arc/kernel/irq.c
+++ b/arch/arc/kernel/irq.c
@@ -10,6 +10,7 @@
#include <linux/interrupt.h>
#include <linux/irqchip.h>
#include <asm/mach_desc.h>
+#include <asm/smp.h>
/*
* Late Interrupt system init called from start_kernel for Boot CPU only
@@ -19,17 +20,20 @@
*/
void __init init_IRQ(void)
{
- /* Any external intc can be setup here */
- if (machine_desc->init_irq)
- machine_desc->init_irq();
-
- /* process the entire interrupt tree in one go */
+ /*
+ * process the entire interrupt tree in one go
+ * Any external intc will be setup provided DT chains them
+ * properly
+ */
irqchip_init();
#ifdef CONFIG_SMP
- /* Master CPU can initialize it's side of IPI */
- if (machine_desc->init_smp)
- machine_desc->init_smp(smp_processor_id());
+ /* a SMP H/w block could do IPI IRQ request here */
+ if (plat_smp_ops.init_per_cpu)
+ plat_smp_ops.init_per_cpu(smp_processor_id());
+
+ if (machine_desc->init_per_cpu)
+ machine_desc->init_per_cpu(smp_processor_id());
#endif
}
@@ -47,6 +51,18 @@ void arch_do_IRQ(unsigned int irq, struct pt_regs *regs)
set_irq_regs(old_regs);
}
+/*
+ * API called for requesting percpu interrupts - called by each CPU
+ * - For boot CPU, actually request the IRQ with genirq core + enables
+ * - For subsequent callers only enable called locally
+ *
+ * Relies on being called by boot cpu first (i.e. request called ahead) of
+ * any enable as expected by genirq. Hence Suitable only for TIMER, IPI
+ * which are guaranteed to be setup on boot core first.
+ * Late probed peripherals such as perf can't use this as there no guarantee
+ * of being called on boot CPU first.
+ */
+
void arc_request_percpu_irq(int irq, int cpu,
irqreturn_t (*isr)(int irq, void *dev),
const char *irq_nm,
@@ -56,14 +72,17 @@ void arc_request_percpu_irq(int irq, int cpu,
if (!cpu) {
int rc;
+#ifdef CONFIG_ISA_ARCOMPACT
/*
- * These 2 calls are essential to making percpu IRQ APIs work
- * Ideally these details could be hidden in irq chip map function
- * but the issue is IPIs IRQs being static (non-DT) and platform
- * specific, so we can't identify them there.
+ * A subsequent request_percpu_irq() fails if percpu_devid is
+ * not set. That in turns sets NOAUTOEN, meaning each core needs
+ * to call enable_percpu_irq()
+ *
+ * For ARCv2, this is done in irq map function since we know
+ * which irqs are strictly per cpu
*/
irq_set_percpu_devid(irq);
- irq_modify_status(irq, IRQ_NOAUTOEN, 0); /* @irq, @clr, @set */
+#endif
rc = request_percpu_irq(irq, isr, irq_nm, percpu_dev);
if (rc)
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
index 4ffd1855f..bd237acdf 100644
--- a/arch/arc/kernel/mcip.c
+++ b/arch/arc/kernel/mcip.c
@@ -12,20 +12,14 @@
#include <linux/irq.h>
#include <linux/spinlock.h>
#include <asm/mcip.h>
+#include <asm/setup.h>
static char smp_cpuinfo_buf[128];
static int idu_detected;
static DEFINE_RAW_SPINLOCK(mcip_lock);
-/*
- * Any SMP specific init any CPU does when it comes up.
- * Here we setup the CPU to enable Inter-Processor-Interrupts
- * Called for each CPU
- * -Master : init_IRQ()
- * -Other(s) : start_kernel_secondary()
- */
-void mcip_init_smp(unsigned int cpu)
+static void mcip_setup_per_cpu(int cpu)
{
smp_ipi_irq_setup(cpu, IPI_IRQ);
}
@@ -96,34 +90,8 @@ static void mcip_ipi_clear(int irq)
#endif
}
-volatile int wake_flag;
-
-static void mcip_wakeup_cpu(int cpu, unsigned long pc)
-{
- BUG_ON(cpu == 0);
- wake_flag = cpu;
-}
-
-void arc_platform_smp_wait_to_boot(int cpu)
+static void mcip_probe_n_setup(void)
{
- while (wake_flag != cpu)
- ;
-
- wake_flag = 0;
- __asm__ __volatile__("j @first_lines_of_secondary \n");
-}
-
-struct plat_smp_ops plat_smp_ops = {
- .info = smp_cpuinfo_buf,
- .cpu_kick = mcip_wakeup_cpu,
- .ipi_send = mcip_ipi_send,
- .ipi_clear = mcip_ipi_clear,
-};
-
-void mcip_init_early_smp(void)
-{
-#define IS_AVAIL1(var, str) ((var) ? str : "")
-
struct mcip_bcr {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int pad3:8,
@@ -161,6 +129,14 @@ void mcip_init_early_smp(void)
panic("kernel trying to use non-existent GRTC\n");
}
+struct plat_smp_ops plat_smp_ops = {
+ .info = smp_cpuinfo_buf,
+ .init_early_smp = mcip_probe_n_setup,
+ .init_per_cpu = mcip_setup_per_cpu,
+ .ipi_send = mcip_ipi_send,
+ .ipi_clear = mcip_ipi_clear,
+};
+
/***************************************************************************
* ARCv2 Interrupt Distribution Unit (IDU)
*
diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c
index 0c08bb1ce..8b134cfe5 100644
--- a/arch/arc/kernel/perf_event.c
+++ b/arch/arc/kernel/perf_event.c
@@ -428,12 +428,11 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev)
#endif /* CONFIG_ISA_ARCV2 */
-void arc_cpu_pmu_irq_init(void)
+static void arc_cpu_pmu_irq_init(void *data)
{
- struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu);
+ int irq = *(int *)data;
- arc_request_percpu_irq(arc_pmu->irq, smp_processor_id(), arc_pmu_intr,
- "ARC perf counters", pmu_cpu);
+ enable_percpu_irq(irq, IRQ_TYPE_NONE);
/* Clear all pending interrupt flags */
write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff);
@@ -515,7 +514,6 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
if (has_interrupts) {
int irq = platform_get_irq(pdev, 0);
- unsigned long flags;
if (irq < 0) {
pr_err("Cannot get IRQ number for the platform\n");
@@ -524,24 +522,12 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
arc_pmu->irq = irq;
- /*
- * arc_cpu_pmu_irq_init() needs to be called on all cores for
- * their respective local PMU.
- * However we use opencoded on_each_cpu() to ensure it is called
- * on core0 first, so that arc_request_percpu_irq() sets up
- * AUTOEN etc. Otherwise enable_percpu_irq() fails to enable
- * perf IRQ on non master cores.
- * see arc_request_percpu_irq()
- */
- preempt_disable();
- local_irq_save(flags);
- arc_cpu_pmu_irq_init();
- local_irq_restore(flags);
- smp_call_function((smp_call_func_t)arc_cpu_pmu_irq_init, 0, 1);
- preempt_enable();
-
- /* Clean all pending interrupt flags */
- write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff);
+ /* intc map function ensures irq_set_percpu_devid() called */
+ request_percpu_irq(irq, arc_pmu_intr, "ARC perf counters",
+ this_cpu_ptr(&arc_pmu_cpu));
+
+ on_each_cpu(arc_cpu_pmu_irq_init, &irq, 1);
+
} else
arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
index 91d5a0f1f..a3f750e76 100644
--- a/arch/arc/kernel/process.c
+++ b/arch/arc/kernel/process.c
@@ -44,11 +44,10 @@ SYSCALL_DEFINE0(arc_gettls)
void arch_cpu_idle(void)
{
/* sleep, but enable all interrupts before committing */
- if (is_isa_arcompact()) {
- __asm__("sleep 0x3");
- } else {
- __asm__("sleep 0x10");
- }
+ __asm__ __volatile__(
+ "sleep %0 \n"
+ :
+ :"I"(ISA_SLEEP_ARG)); /* can't be "r" has to be embedded const */
}
asmlinkage void ret_from_fork(void);
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index cabde9dc0..e1b87444e 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -160,10 +160,6 @@ static const struct cpuinfo_data arc_cpu_tbl[] = {
{ {0x00, NULL } }
};
-#define IS_AVAIL1(v, s) ((v) ? s : "")
-#define IS_USED_RUN(v) ((v) ? "" : "(not used) ")
-#define IS_USED_CFG(cfg) IS_USED_RUN(IS_ENABLED(cfg))
-#define IS_AVAIL2(v, s, cfg) IS_AVAIL1(v, s), IS_AVAIL1(v, IS_USED_CFG(cfg))
static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
{
@@ -415,8 +411,9 @@ void __init setup_arch(char **cmdline_p)
if (machine_desc->init_early)
machine_desc->init_early();
- setup_processor();
smp_init_cpus();
+
+ setup_processor();
setup_arch_memory();
/* copy flat DT out of .init and then unflatten it */
@@ -432,7 +429,6 @@ void __init setup_arch(char **cmdline_p)
#endif
arc_unwind_init();
- arc_unwind_setup();
}
static int __init customize_machine(void)
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index be13d1242..ef6e9e15b 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -42,8 +42,13 @@ void __init smp_prepare_boot_cpu(void)
}
/*
- * Initialise the CPU possible map early - this describes the CPUs
- * which may be present or become present in the system.
+ * Called from setup_arch() before calling setup_processor()
+ *
+ * - Initialise the CPU possible map early - this describes the CPUs
+ * which may be present or become present in the system.
+ * - Call early smp init hook. This can initialize a specific multi-core
+ * IP which is say common to several platforms (hence not part of
+ * platform specific int_early() hook)
*/
void __init smp_init_cpus(void)
{
@@ -51,6 +56,9 @@ void __init smp_init_cpus(void)
for (i = 0; i < NR_CPUS; i++)
set_cpu_possible(i, true);
+
+ if (plat_smp_ops.init_early_smp)
+ plat_smp_ops.init_early_smp();
}
/* called from init ( ) => process 1 */
@@ -72,35 +80,29 @@ void __init smp_cpus_done(unsigned int max_cpus)
}
/*
- * After power-up, a non Master CPU needs to wait for Master to kick start it
- *
- * The default implementation halts
- *
- * This relies on platform specific support allowing Master to directly set
- * this CPU's PC (to be @first_lines_of_secondary() and kick start it.
- *
- * In lack of such h/w assist, platforms can override this function
- * - make this function busy-spin on a token, eventually set by Master
- * (from arc_platform_smp_wakeup_cpu())
- * - Once token is available, jump to @first_lines_of_secondary
- * (using inline asm).
- *
- * Alert: can NOT use stack here as it has not been determined/setup for CPU.
- * If it turns out to be elaborate, it's better to code it in assembly
- *
+ * Default smp boot helper for Run-on-reset case where all cores start off
+ * together. Non-masters need to wait for Master to start running.
+ * This is implemented using a flag in memory, which Non-masters spin-wait on.
+ * Master sets it to cpu-id of core to "ungate" it.
*/
-void __weak arc_platform_smp_wait_to_boot(int cpu)
+static volatile int wake_flag;
+
+static void arc_default_smp_cpu_kick(int cpu, unsigned long pc)
{
- /*
- * As a hack for debugging - since debugger will single-step over the
- * FLAG insn - wrap the halt itself it in a self loop
- */
- __asm__ __volatile__(
- "1: \n"
- " flag 1 \n"
- " b 1b \n");
+ BUG_ON(cpu == 0);
+ wake_flag = cpu;
+}
+
+void arc_platform_smp_wait_to_boot(int cpu)
+{
+ while (wake_flag != cpu)
+ ;
+
+ wake_flag = 0;
+ __asm__ __volatile__("j @first_lines_of_secondary \n");
}
+
const char *arc_platform_smp_cpuinfo(void)
{
return plat_smp_ops.info ? : "";
@@ -129,8 +131,12 @@ void start_kernel_secondary(void)
pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
- if (machine_desc->init_smp)
- machine_desc->init_smp(cpu);
+ /* Some SMP H/w setup - for each cpu */
+ if (plat_smp_ops.init_per_cpu)
+ plat_smp_ops.init_per_cpu(cpu);
+
+ if (machine_desc->init_per_cpu)
+ machine_desc->init_per_cpu(cpu);
arc_local_timer_setup();
@@ -161,6 +167,8 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
if (plat_smp_ops.cpu_kick)
plat_smp_ops.cpu_kick(cpu,
(unsigned long)first_lines_of_secondary);
+ else
+ arc_default_smp_cpu_kick(cpu, (unsigned long)NULL);
/* wait for 1 sec after kicking the secondary */
wait_till = jiffies + HZ;
diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c
index 4294761a2..dfad287f1 100644
--- a/arch/arc/kernel/time.c
+++ b/arch/arc/kernel/time.c
@@ -285,7 +285,4 @@ void __init time_init(void)
/* sets up the periodic event timer */
arc_local_timer_setup();
-
- if (machine_desc->init_time)
- machine_desc->init_time();
}
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
index 93c6ea52b..5eb707640 100644
--- a/arch/arc/kernel/unwind.c
+++ b/arch/arc/kernel/unwind.c
@@ -170,6 +170,23 @@ static struct unwind_table *find_table(unsigned long pc)
static unsigned long read_pointer(const u8 **pLoc,
const void *end, signed ptrType);
+static void init_unwind_hdr(struct unwind_table *table,
+ void *(*alloc) (unsigned long));
+
+/*
+ * wrappers for header alloc (vs. calling one vs. other at call site)
+ * to elide section mismatches warnings
+ */
+static void *__init unw_hdr_alloc_early(unsigned long sz)
+{
+ return __alloc_bootmem_nopanic(sz, sizeof(unsigned int),
+ MAX_DMA_ADDRESS);
+}
+
+static void *unw_hdr_alloc(unsigned long sz)
+{
+ return kmalloc(sz, GFP_KERNEL);
+}
static void init_unwind_table(struct unwind_table *table, const char *name,
const void *core_start, unsigned long core_size,
@@ -209,6 +226,8 @@ void __init arc_unwind_init(void)
__start_unwind, __end_unwind - __start_unwind,
NULL, 0);
/*__start_unwind_hdr, __end_unwind_hdr - __start_unwind_hdr);*/
+
+ init_unwind_hdr(&root_table, unw_hdr_alloc_early);
}
static const u32 bad_cie, not_fde;
@@ -241,8 +260,8 @@ static void swap_eh_frame_hdr_table_entries(void *p1, void *p2, int size)
e2->fde = v;
}
-static void __init setup_unwind_table(struct unwind_table *table,
- void *(*alloc) (unsigned long))
+static void init_unwind_hdr(struct unwind_table *table,
+ void *(*alloc) (unsigned long))
{
const u8 *ptr;
unsigned long tableSize = table->size, hdrSize;
@@ -277,10 +296,10 @@ static void __init setup_unwind_table(struct unwind_table *table,
if (cie == &not_fde)
continue;
if (cie == NULL || cie == &bad_cie)
- return;
+ goto ret_err;
ptrType = fde_pointer_type(cie);
if (ptrType < 0)
- return;
+ goto ret_err;
ptr = (const u8 *)(fde + 2);
if (!read_pointer(&ptr, (const u8 *)(fde + 1) + *fde,
@@ -296,13 +315,15 @@ static void __init setup_unwind_table(struct unwind_table *table,
}
if (tableSize || !n)
- return;
+ goto ret_err;
hdrSize = 4 + sizeof(unsigned long) + sizeof(unsigned int)
+ 2 * n * sizeof(unsigned long);
+
header = alloc(hdrSize);
if (!header)
- return;
+ goto ret_err;
+
header->version = 1;
header->eh_frame_ptr_enc = DW_EH_PE_abs | DW_EH_PE_native;
header->fde_count_enc = DW_EH_PE_abs | DW_EH_PE_data4;
@@ -340,18 +361,10 @@ static void __init setup_unwind_table(struct unwind_table *table,
table->hdrsz = hdrSize;
smp_wmb();
table->header = (const void *)header;
-}
-
-static void *__init balloc(unsigned long sz)
-{
- return __alloc_bootmem_nopanic(sz,
- sizeof(unsigned int),
- __pa(MAX_DMA_ADDRESS));
-}
+ return;
-void __init arc_unwind_setup(void)
-{
- setup_unwind_table(&root_table, balloc);
+ret_err:
+ panic("Attention !!! Dwarf FDE parsing errors\n");;
}
#ifdef CONFIG_MODULES
@@ -377,6 +390,8 @@ void *unwind_add_table(struct module *module, const void *table_start,
table_start, table_size,
NULL, 0);
+ init_unwind_hdr(table, unw_hdr_alloc);
+
#ifdef UNWIND_DEBUG
unw_debug("Table added for [%s] %lx %lx\n",
module->name, table->core.pc, table->core.range);
@@ -439,6 +454,7 @@ void unwind_remove_table(void *handle, int init_only)
info.init_only = init_only;
unlink_table(&info); /* XXX: SMP */
+ kfree(table->header);
kfree(table);
}
@@ -588,9 +604,6 @@ static signed fde_pointer_type(const u32 *cie)
const u8 *ptr = (const u8 *)(cie + 2);
unsigned version = *ptr;
- if (version != 1)
- return -1; /* unsupported */
-
if (*++ptr) {
const char *aug;
const u8 *end = (const u8 *)(cie + 1) + *cie;
@@ -986,42 +999,13 @@ int arc_unwind(struct unwind_frame_info *frame)
(const u8 *)(fde +
1) +
*fde, ptrType);
- if (pc >= endLoc)
+ if (pc >= endLoc) {
fde = NULL;
- } else
- fde = NULL;
- }
- if (fde == NULL) {
- for (fde = table->address, tableSize = table->size;
- cie = NULL, tableSize > sizeof(*fde)
- && tableSize - sizeof(*fde) >= *fde;
- tableSize -= sizeof(*fde) + *fde,
- fde += 1 + *fde / sizeof(*fde)) {
- cie = cie_for_fde(fde, table);
- if (cie == &bad_cie) {
cie = NULL;
- break;
}
- if (cie == NULL
- || cie == &not_fde
- || (ptrType = fde_pointer_type(cie)) < 0)
- continue;
- ptr = (const u8 *)(fde + 2);
- startLoc = read_pointer(&ptr,
- (const u8 *)(fde + 1) +
- *fde, ptrType);
- if (!startLoc)
- continue;
- if (!(ptrType & DW_EH_PE_indirect))
- ptrType &=
- DW_EH_PE_FORM | DW_EH_PE_signed;
- endLoc =
- startLoc + read_pointer(&ptr,
- (const u8 *)(fde +
- 1) +
- *fde, ptrType);
- if (pc >= startLoc && pc < endLoc)
- break;
+ } else {
+ fde = NULL;
+ cie = NULL;
}
}
}
@@ -1031,9 +1015,7 @@ int arc_unwind(struct unwind_frame_info *frame)
ptr = (const u8 *)(cie + 2);
end = (const u8 *)(cie + 1) + *cie;
frame->call_frame = 1;
- if ((state.version = *ptr) != 1)
- cie = NULL; /* unsupported version */
- else if (*++ptr) {
+ if (*++ptr) {
/* check if augmentation size is first (thus present) */
if (*ptr == 'z') {
while (++ptr < end && *ptr) {
diff --git a/arch/arc/kernel/vmlinux.lds.S b/arch/arc/kernel/vmlinux.lds.S
index dd35bde39..894e696bd 100644
--- a/arch/arc/kernel/vmlinux.lds.S
+++ b/arch/arc/kernel/vmlinux.lds.S
@@ -12,7 +12,7 @@
#include <asm/thread_info.h>
OUTPUT_ARCH(arc)
-ENTRY(_stext)
+ENTRY(res_service)
#ifdef CONFIG_CPU_BIG_ENDIAN
jiffies = jiffies_64 + 4;
diff --git a/arch/arc/lib/memcpy-archs.S b/arch/arc/lib/memcpy-archs.S
index 0cab0b8a5..f96c75edf 100644
--- a/arch/arc/lib/memcpy-archs.S
+++ b/arch/arc/lib/memcpy-archs.S
@@ -50,26 +50,26 @@ ENTRY(memcpy)
;;; if size <= 8
cmp r2, 8
- bls.d @smallchunk
+ bls.d @.Lsmallchunk
mov.f lp_count, r2
and.f r4, r0, 0x03
rsub lp_count, r4, 4
- lpnz @aligndestination
+ lpnz @.Laligndestination
;; LOOP BEGIN
ldb.ab r5, [r1,1]
sub r2, r2, 1
stb.ab r5, [r3,1]
-aligndestination:
+.Laligndestination:
;;; Check the alignment of the source
and.f r4, r1, 0x03
- bnz.d @sourceunaligned
+ bnz.d @.Lsourceunaligned
;;; CASE 0: Both source and destination are 32bit aligned
;;; Convert len to Dwords, unfold x4
lsr.f lp_count, r2, ZOLSHFT
- lpnz @copy32_64bytes
+ lpnz @.Lcopy32_64bytes
;; LOOP START
LOADX (r6, r1)
PREFETCH_READ (r1)
@@ -81,25 +81,25 @@ aligndestination:
STOREX (r8, r3)
STOREX (r10, r3)
STOREX (r4, r3)
-copy32_64bytes:
+.Lcopy32_64bytes:
and.f lp_count, r2, ZOLAND ;Last remaining 31 bytes
-smallchunk:
- lpnz @copyremainingbytes
+.Lsmallchunk:
+ lpnz @.Lcopyremainingbytes
;; LOOP START
ldb.ab r5, [r1,1]
stb.ab r5, [r3,1]
-copyremainingbytes:
+.Lcopyremainingbytes:
j [blink]
;;; END CASE 0
-sourceunaligned:
+.Lsourceunaligned:
cmp r4, 2
- beq.d @unalignedOffby2
+ beq.d @.LunalignedOffby2
sub r2, r2, 1
- bhi.d @unalignedOffby3
+ bhi.d @.LunalignedOffby3
ldb.ab r5, [r1, 1]
;;; CASE 1: The source is unaligned, off by 1
@@ -114,7 +114,7 @@ sourceunaligned:
or r5, r5, r6
;; Both src and dst are aligned
- lpnz @copy8bytes_1
+ lpnz @.Lcopy8bytes_1
;; LOOP START
ld.ab r6, [r1, 4]
prefetch [r1, 28] ;Prefetch the next read location
@@ -131,7 +131,7 @@ sourceunaligned:
st.ab r7, [r3, 4]
st.ab r9, [r3, 4]
-copy8bytes_1:
+.Lcopy8bytes_1:
;; Write back the remaining 16bits
EXTRACT_1 (r6, r5, 16)
@@ -141,14 +141,14 @@ copy8bytes_1:
stb.ab r5, [r3, 1]
and.f lp_count, r2, 0x07 ;Last 8bytes
- lpnz @copybytewise_1
+ lpnz @.Lcopybytewise_1
;; LOOP START
ldb.ab r6, [r1,1]
stb.ab r6, [r3,1]
-copybytewise_1:
+.Lcopybytewise_1:
j [blink]
-unalignedOffby2:
+.LunalignedOffby2:
;;; CASE 2: The source is unaligned, off by 2
ldh.ab r5, [r1, 2]
sub r2, r2, 1
@@ -159,7 +159,7 @@ unalignedOffby2:
#ifdef __BIG_ENDIAN__
asl.nz r5, r5, 16
#endif
- lpnz @copy8bytes_2
+ lpnz @.Lcopy8bytes_2
;; LOOP START
ld.ab r6, [r1, 4]
prefetch [r1, 28] ;Prefetch the next read location
@@ -176,7 +176,7 @@ unalignedOffby2:
st.ab r7, [r3, 4]
st.ab r9, [r3, 4]
-copy8bytes_2:
+.Lcopy8bytes_2:
#ifdef __BIG_ENDIAN__
lsr.nz r5, r5, 16
@@ -184,14 +184,14 @@ copy8bytes_2:
sth.ab r5, [r3, 2]
and.f lp_count, r2, 0x07 ;Last 8bytes
- lpnz @copybytewise_2
+ lpnz @.Lcopybytewise_2
;; LOOP START
ldb.ab r6, [r1,1]
stb.ab r6, [r3,1]
-copybytewise_2:
+.Lcopybytewise_2:
j [blink]
-unalignedOffby3:
+.LunalignedOffby3:
;;; CASE 3: The source is unaligned, off by 3
;;; Hence, I need to read 1byte for achieve the 32bit alignment
@@ -201,7 +201,7 @@ unalignedOffby3:
#ifdef __BIG_ENDIAN__
asl.ne r5, r5, 24
#endif
- lpnz @copy8bytes_3
+ lpnz @.Lcopy8bytes_3
;; LOOP START
ld.ab r6, [r1, 4]
prefetch [r1, 28] ;Prefetch the next read location
@@ -218,7 +218,7 @@ unalignedOffby3:
st.ab r7, [r3, 4]
st.ab r9, [r3, 4]
-copy8bytes_3:
+.Lcopy8bytes_3:
#ifdef __BIG_ENDIAN__
lsr.nz r5, r5, 24
@@ -226,11 +226,11 @@ copy8bytes_3:
stb.ab r5, [r3, 1]
and.f lp_count, r2, 0x07 ;Last 8bytes
- lpnz @copybytewise_3
+ lpnz @.Lcopybytewise_3
;; LOOP START
ldb.ab r6, [r1,1]
stb.ab r6, [r3,1]
-copybytewise_3:
+.Lcopybytewise_3:
j [blink]
END(memcpy)
diff --git a/arch/arc/mm/Makefile b/arch/arc/mm/Makefile
index 7beb94155..3703a4969 100644
--- a/arch/arc/mm/Makefile
+++ b/arch/arc/mm/Makefile
@@ -8,3 +8,4 @@
obj-y := extable.o ioremap.o dma.o fault.o init.o
obj-y += tlb.o tlbex.o cache.o mmap.o
+obj-$(CONFIG_HIGHMEM) += highmem.o
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index 0d1a6e968..ff7ff6cbb 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -25,7 +25,7 @@ static int l2_line_sz;
int ioc_exists;
volatile int slc_enable = 1, ioc_enable = 1;
-void (*_cache_line_loop_ic_fn)(unsigned long paddr, unsigned long vaddr,
+void (*_cache_line_loop_ic_fn)(phys_addr_t paddr, unsigned long vaddr,
unsigned long sz, const int cacheop);
void (*__dma_cache_wback_inv)(unsigned long start, unsigned long sz);
@@ -37,7 +37,6 @@ char *arc_cache_mumbojumbo(int c, char *buf, int len)
int n = 0;
struct cpuinfo_arc_cache *p;
-#define IS_USED_RUN(v) ((v) ? "" : "(disabled) ")
#define PR_CACHE(p, cfg, str) \
if (!(p)->ver) \
n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \
@@ -47,7 +46,7 @@ char *arc_cache_mumbojumbo(int c, char *buf, int len)
(p)->sz_k, (p)->assoc, (p)->line_len, \
(p)->vipt ? "VIPT" : "PIPT", \
(p)->alias ? " aliasing" : "", \
- IS_ENABLED(cfg) ? "" : " (not used)");
+ IS_USED_CFG(cfg));
PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache");
PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
@@ -63,7 +62,7 @@ char *arc_cache_mumbojumbo(int c, char *buf, int len)
if (ioc_exists)
n += scnprintf(buf + n, len - n, "IOC\t\t:%s\n",
- IS_USED_RUN(ioc_enable));
+ IS_DISABLED_RUN(ioc_enable));
return buf;
}
@@ -217,7 +216,7 @@ slc_chk:
*/
static inline
-void __cache_line_loop_v2(unsigned long paddr, unsigned long vaddr,
+void __cache_line_loop_v2(phys_addr_t paddr, unsigned long vaddr,
unsigned long sz, const int op)
{
unsigned int aux_cmd;
@@ -254,8 +253,12 @@ void __cache_line_loop_v2(unsigned long paddr, unsigned long vaddr,
}
}
+/*
+ * For ARC700 MMUv3 I-cache and D-cache flushes
+ * Also reused for HS38 aliasing I-cache configuration
+ */
static inline
-void __cache_line_loop_v3(unsigned long paddr, unsigned long vaddr,
+void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
unsigned long sz, const int op)
{
unsigned int aux_cmd, aux_tag;
@@ -290,6 +293,16 @@ void __cache_line_loop_v3(unsigned long paddr, unsigned long vaddr,
if (full_page)
write_aux_reg(aux_tag, paddr);
+ /*
+ * This is technically for MMU v4, using the MMU v3 programming model
+ * Special work for HS38 aliasing I-cache configuratino with PAE40
+ * - upper 8 bits of paddr need to be written into PTAG_HI
+ * - (and needs to be written before the lower 32 bits)
+ * Note that PTAG_HI is hoisted outside the line loop
+ */
+ if (is_pae40_enabled() && op == OP_INV_IC)
+ write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
+
while (num_lines-- > 0) {
if (!full_page) {
write_aux_reg(aux_tag, paddr);
@@ -302,14 +315,20 @@ void __cache_line_loop_v3(unsigned long paddr, unsigned long vaddr,
}
/*
- * In HS38x (MMU v4), although icache is VIPT, only paddr is needed for cache
- * maintenance ops (in IVIL reg), as long as icache doesn't alias.
+ * In HS38x (MMU v4), I-cache is VIPT (can alias), D-cache is PIPT
+ * Here's how cache ops are implemented
+ *
+ * - D-cache: only paddr needed (in DC_IVDL/DC_FLDL)
+ * - I-cache Non Aliasing: Despite VIPT, only paddr needed (in IC_IVIL)
+ * - I-cache Aliasing: Both vaddr and paddr needed (in IC_IVIL, IC_PTAG
+ * respectively, similar to MMU v3 programming model, hence
+ * __cache_line_loop_v3() is used)
*
- * For Aliasing icache, vaddr is also needed (in IVIL), while paddr is
- * specified in PTAG (similar to MMU v3)
+ * If PAE40 is enabled, independent of aliasing considerations, the higher bits
+ * needs to be written into PTAG_HI
*/
static inline
-void __cache_line_loop_v4(unsigned long paddr, unsigned long vaddr,
+void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
unsigned long sz, const int cacheop)
{
unsigned int aux_cmd;
@@ -336,6 +355,22 @@ void __cache_line_loop_v4(unsigned long paddr, unsigned long vaddr,
num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
+ /*
+ * For HS38 PAE40 configuration
+ * - upper 8 bits of paddr need to be written into PTAG_HI
+ * - (and needs to be written before the lower 32 bits)
+ */
+ if (is_pae40_enabled()) {
+ if (cacheop == OP_INV_IC)
+ /*
+ * Non aliasing I-cache in HS38,
+ * aliasing I-cache handled in __cache_line_loop_v3()
+ */
+ write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
+ else
+ write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32);
+ }
+
while (num_lines-- > 0) {
write_aux_reg(aux_cmd, paddr);
paddr += L1_CACHE_BYTES;
@@ -413,7 +448,7 @@ static inline void __dc_entire_op(const int op)
/*
* D-Cache Line ops: Per Line INV (discard or wback+discard) or FLUSH (wback)
*/
-static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr,
+static inline void __dc_line_op(phys_addr_t paddr, unsigned long vaddr,
unsigned long sz, const int op)
{
unsigned long flags;
@@ -446,7 +481,7 @@ static inline void __ic_entire_inv(void)
}
static inline void
-__ic_line_inv_vaddr_local(unsigned long paddr, unsigned long vaddr,
+__ic_line_inv_vaddr_local(phys_addr_t paddr, unsigned long vaddr,
unsigned long sz)
{
unsigned long flags;
@@ -463,7 +498,7 @@ __ic_line_inv_vaddr_local(unsigned long paddr, unsigned long vaddr,
#else
struct ic_inv_args {
- unsigned long paddr, vaddr;
+ phys_addr_t paddr, vaddr;
int sz;
};
@@ -474,7 +509,7 @@ static void __ic_line_inv_vaddr_helper(void *info)
__ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz);
}
-static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
+static void __ic_line_inv_vaddr(phys_addr_t paddr, unsigned long vaddr,
unsigned long sz)
{
struct ic_inv_args ic_inv = {
@@ -495,7 +530,7 @@ static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
#endif /* CONFIG_ARC_HAS_ICACHE */
-noinline void slc_op(unsigned long paddr, unsigned long sz, const int op)
+noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op)
{
#ifdef CONFIG_ISA_ARCV2
/*
@@ -585,7 +620,7 @@ void flush_dcache_page(struct page *page)
} else if (page_mapped(page)) {
/* kernel reading from page with U-mapping */
- unsigned long paddr = (unsigned long)page_address(page);
+ phys_addr_t paddr = (unsigned long)page_address(page);
unsigned long vaddr = page->index << PAGE_CACHE_SHIFT;
if (addr_not_cache_congruent(paddr, vaddr))
@@ -733,14 +768,14 @@ EXPORT_SYMBOL(flush_icache_range);
* builtin kernel page will not have any virtual mappings.
* kprobe on loadable module will be kernel vaddr.
*/
-void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len)
+void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len)
{
__dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
__ic_line_inv_vaddr(paddr, vaddr, len);
}
/* wrapper to compile time eliminate alignment checks in flush loop */
-void __inv_icache_page(unsigned long paddr, unsigned long vaddr)
+void __inv_icache_page(phys_addr_t paddr, unsigned long vaddr)
{
__ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
}
@@ -749,7 +784,7 @@ void __inv_icache_page(unsigned long paddr, unsigned long vaddr)
* wrapper to clearout kernel or userspace mappings of a page
* For kernel mappings @vaddr == @paddr
*/
-void __flush_dcache_page(unsigned long paddr, unsigned long vaddr)
+void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr)
{
__dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
}
@@ -807,8 +842,8 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page,
void copy_user_highpage(struct page *to, struct page *from,
unsigned long u_vaddr, struct vm_area_struct *vma)
{
- unsigned long kfrom = (unsigned long)page_address(from);
- unsigned long kto = (unsigned long)page_address(to);
+ void *kfrom = kmap_atomic(from);
+ void *kto = kmap_atomic(to);
int clean_src_k_mappings = 0;
/*
@@ -818,13 +853,16 @@ void copy_user_highpage(struct page *to, struct page *from,
*
* Note that while @u_vaddr refers to DST page's userspace vaddr, it is
* equally valid for SRC page as well
+ *
+ * For !VIPT cache, all of this gets compiled out as
+ * addr_not_cache_congruent() is 0
*/
if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
- __flush_dcache_page(kfrom, u_vaddr);
+ __flush_dcache_page((unsigned long)kfrom, u_vaddr);
clean_src_k_mappings = 1;
}
- copy_page((void *)kto, (void *)kfrom);
+ copy_page(kto, kfrom);
/*
* Mark DST page K-mapping as dirty for a later finalization by
@@ -841,11 +879,14 @@ void copy_user_highpage(struct page *to, struct page *from,
* sync the kernel mapping back to physical page
*/
if (clean_src_k_mappings) {
- __flush_dcache_page(kfrom, kfrom);
+ __flush_dcache_page((unsigned long)kfrom, (unsigned long)kfrom);
set_bit(PG_dc_clean, &from->flags);
} else {
clear_bit(PG_dc_clean, &from->flags);
}
+
+ kunmap_atomic(kto);
+ kunmap_atomic(kfrom);
}
void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index d948e4e9d..af63f4a13 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -18,7 +18,14 @@
#include <asm/pgalloc.h>
#include <asm/mmu.h>
-static int handle_vmalloc_fault(unsigned long address)
+/*
+ * kernel virtual address is required to implement vmalloc/pkmap/fixmap
+ * Refer to asm/processor.h for System Memory Map
+ *
+ * It simply copies the PMD entry (pointer to 2nd level page table or hugepage)
+ * from swapper pgdir to task pgdir. The 2nd level table/page is thus shared
+ */
+noinline static int handle_kernel_vaddr_fault(unsigned long address)
{
/*
* Synchronize this task's top level page-table
@@ -72,8 +79,8 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
* only copy the information from the master page table,
* nothing more.
*/
- if (address >= VMALLOC_START && address <= VMALLOC_END) {
- ret = handle_vmalloc_fault(address);
+ if (address >= VMALLOC_START) {
+ ret = handle_kernel_vaddr_fault(address);
if (unlikely(ret))
goto bad_area_nosemaphore;
else
diff --git a/arch/arc/mm/highmem.c b/arch/arc/mm/highmem.c
new file mode 100644
index 000000000..92dd92cad
--- /dev/null
+++ b/arch/arc/mm/highmem.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) 2015 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/bootmem.h>
+#include <linux/export.h>
+#include <linux/highmem.h>
+#include <asm/processor.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+
+/*
+ * HIGHMEM API:
+ *
+ * kmap() API provides sleep semantics hence refered to as "permanent maps"
+ * It allows mapping LAST_PKMAP pages, using @last_pkmap_nr as the cursor
+ * for book-keeping
+ *
+ * kmap_atomic() can't sleep (calls pagefault_disable()), thus it provides
+ * shortlived ala "temporary mappings" which historically were implemented as
+ * fixmaps (compile time addr etc). Their book-keeping is done per cpu.
+ *
+ * Both these facts combined (preemption disabled and per-cpu allocation)
+ * means the total number of concurrent fixmaps will be limited to max
+ * such allocations in a single control path. Thus KM_TYPE_NR (another
+ * historic relic) is a small'ish number which caps max percpu fixmaps
+ *
+ * ARC HIGHMEM Details
+ *
+ * - the kernel vaddr space from 0x7z to 0x8z (currently used by vmalloc/module)
+ * is now shared between vmalloc and kmap (non overlapping though)
+ *
+ * - Both fixmap/pkmap use a dedicated page table each, hooked up to swapper PGD
+ * This means each only has 1 PGDIR_SIZE worth of kvaddr mappings, which means
+ * 2M of kvaddr space for typical config (8K page and 11:8:13 traversal split)
+ *
+ * - fixmap anyhow needs a limited number of mappings. So 2M kvaddr == 256 PTE
+ * slots across NR_CPUS would be more than sufficient (generic code defines
+ * KM_TYPE_NR as 20).
+ *
+ * - pkmap being preemptible, in theory could do with more than 256 concurrent
+ * mappings. However, generic pkmap code: map_new_virtual(), doesn't traverse
+ * the PGD and only works with a single page table @pkmap_page_table, hence
+ * sets the limit
+ */
+
+extern pte_t * pkmap_page_table;
+static pte_t * fixmap_page_table;
+
+void *kmap(struct page *page)
+{
+ BUG_ON(in_interrupt());
+ if (!PageHighMem(page))
+ return page_address(page);
+
+ return kmap_high(page);
+}
+
+void *kmap_atomic(struct page *page)
+{
+ int idx, cpu_idx;
+ unsigned long vaddr;
+
+ preempt_disable();
+ pagefault_disable();
+ if (!PageHighMem(page))
+ return page_address(page);
+
+ cpu_idx = kmap_atomic_idx_push();
+ idx = cpu_idx + KM_TYPE_NR * smp_processor_id();
+ vaddr = FIXMAP_ADDR(idx);
+
+ set_pte_at(&init_mm, vaddr, fixmap_page_table + idx,
+ mk_pte(page, kmap_prot));
+
+ return (void *)vaddr;
+}
+EXPORT_SYMBOL(kmap_atomic);
+
+void __kunmap_atomic(void *kv)
+{
+ unsigned long kvaddr = (unsigned long)kv;
+
+ if (kvaddr >= FIXMAP_BASE && kvaddr < (FIXMAP_BASE + FIXMAP_SIZE)) {
+
+ /*
+ * Because preemption is disabled, this vaddr can be associated
+ * with the current allocated index.
+ * But in case of multiple live kmap_atomic(), it still relies on
+ * callers to unmap in right order.
+ */
+ int cpu_idx = kmap_atomic_idx();
+ int idx = cpu_idx + KM_TYPE_NR * smp_processor_id();
+
+ WARN_ON(kvaddr != FIXMAP_ADDR(idx));
+
+ pte_clear(&init_mm, kvaddr, fixmap_page_table + idx);
+ local_flush_tlb_kernel_range(kvaddr, kvaddr + PAGE_SIZE);
+
+ kmap_atomic_idx_pop();
+ }
+
+ pagefault_enable();
+ preempt_enable();
+}
+EXPORT_SYMBOL(__kunmap_atomic);
+
+static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr)
+{
+ pgd_t *pgd_k;
+ pud_t *pud_k;
+ pmd_t *pmd_k;
+ pte_t *pte_k;
+
+ pgd_k = pgd_offset_k(kvaddr);
+ pud_k = pud_offset(pgd_k, kvaddr);
+ pmd_k = pmd_offset(pud_k, kvaddr);
+
+ pte_k = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
+ pmd_populate_kernel(&init_mm, pmd_k, pte_k);
+ return pte_k;
+}
+
+void __init kmap_init(void)
+{
+ /* Due to recursive include hell, we can't do this in processor.h */
+ BUILD_BUG_ON(PAGE_OFFSET < (VMALLOC_END + FIXMAP_SIZE + PKMAP_SIZE));
+
+ BUILD_BUG_ON(KM_TYPE_NR > PTRS_PER_PTE);
+ pkmap_page_table = alloc_kmap_pgtable(PKMAP_BASE);
+
+ BUILD_BUG_ON(LAST_PKMAP > PTRS_PER_PTE);
+ fixmap_page_table = alloc_kmap_pgtable(FIXMAP_BASE);
+}
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index d44eedd8c..7d2c4fbf4 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -15,6 +15,7 @@
#endif
#include <linux/swap.h>
#include <linux/module.h>
+#include <linux/highmem.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/sections.h>
@@ -24,16 +25,22 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __aligned(PAGE_SIZE);
char empty_zero_page[PAGE_SIZE] __aligned(PAGE_SIZE);
EXPORT_SYMBOL(empty_zero_page);
-/* Default tot mem from .config */
-static unsigned long arc_mem_sz = 0x20000000; /* some default */
+static const unsigned long low_mem_start = CONFIG_LINUX_LINK_BASE;
+static unsigned long low_mem_sz;
+
+#ifdef CONFIG_HIGHMEM
+static unsigned long min_high_pfn;
+static u64 high_mem_start;
+static u64 high_mem_sz;
+#endif
/* User can over-ride above with "mem=nnn[KkMm]" in cmdline */
static int __init setup_mem_sz(char *str)
{
- arc_mem_sz = memparse(str, NULL) & PAGE_MASK;
+ low_mem_sz = memparse(str, NULL) & PAGE_MASK;
/* early console might not be setup yet - it will show up later */
- pr_info("\"mem=%s\": mem sz set to %ldM\n", str, TO_MB(arc_mem_sz));
+ pr_info("\"mem=%s\": mem sz set to %ldM\n", str, TO_MB(low_mem_sz));
return 0;
}
@@ -41,8 +48,24 @@ early_param("mem", setup_mem_sz);
void __init early_init_dt_add_memory_arch(u64 base, u64 size)
{
- arc_mem_sz = size & PAGE_MASK;
- pr_info("Memory size set via devicetree %ldM\n", TO_MB(arc_mem_sz));
+ int in_use = 0;
+
+ if (!low_mem_sz) {
+ if (base != low_mem_start)
+ panic("CONFIG_LINUX_LINK_BASE != DT memory { }");
+
+ low_mem_sz = size;
+ in_use = 1;
+ } else {
+#ifdef CONFIG_HIGHMEM
+ high_mem_start = base;
+ high_mem_sz = size;
+ in_use = 1;
+#endif
+ }
+
+ pr_info("Memory @ %llx [%lldM] %s\n",
+ base, TO_MB(size), !in_use ? "Not used":"");
}
#ifdef CONFIG_BLK_DEV_INITRD
@@ -72,46 +95,62 @@ early_param("initrd", early_initrd);
void __init setup_arch_memory(void)
{
unsigned long zones_size[MAX_NR_ZONES];
- unsigned long end_mem = CONFIG_LINUX_LINK_BASE + arc_mem_sz;
+ unsigned long zones_holes[MAX_NR_ZONES];
init_mm.start_code = (unsigned long)_text;
init_mm.end_code = (unsigned long)_etext;
init_mm.end_data = (unsigned long)_edata;
init_mm.brk = (unsigned long)_end;
- /*
- * We do it here, so that memory is correctly instantiated
- * even if "mem=xxx" cmline over-ride is given and/or
- * DT has memory node. Each causes an update to @arc_mem_sz
- * and we finally add memory one here
- */
- memblock_add(CONFIG_LINUX_LINK_BASE, arc_mem_sz);
-
- /*------------- externs in mm need setting up ---------------*/
-
/* first page of system - kernel .vector starts here */
min_low_pfn = ARCH_PFN_OFFSET;
- /* Last usable page of low mem (no HIGHMEM yet for ARC port) */
- max_low_pfn = max_pfn = PFN_DOWN(end_mem);
+ /* Last usable page of low mem */
+ max_low_pfn = max_pfn = PFN_DOWN(low_mem_start + low_mem_sz);
- max_mapnr = max_low_pfn - min_low_pfn;
+#ifdef CONFIG_HIGHMEM
+ min_high_pfn = PFN_DOWN(high_mem_start);
+ max_pfn = PFN_DOWN(high_mem_start + high_mem_sz);
+#endif
+
+ max_mapnr = max_pfn - min_low_pfn;
- /*------------- reserve kernel image -----------------------*/
- memblock_reserve(CONFIG_LINUX_LINK_BASE,
- __pa(_end) - CONFIG_LINUX_LINK_BASE);
+ /*------------- bootmem allocator setup -----------------------*/
+
+ /*
+ * seed the bootmem allocator after any DT memory node parsing or
+ * "mem=xxx" cmdline overrides have potentially updated @arc_mem_sz
+ *
+ * Only low mem is added, otherwise we have crashes when allocating
+ * mem_map[] itself. NO_BOOTMEM allocates mem_map[] at the end of
+ * avail memory, ending in highmem with a > 32-bit address. However
+ * it then tries to memset it with a truncaed 32-bit handle, causing
+ * the crash
+ */
+
+ memblock_add(low_mem_start, low_mem_sz);
+ memblock_reserve(low_mem_start, __pa(_end) - low_mem_start);
#ifdef CONFIG_BLK_DEV_INITRD
- /*------------- reserve initrd image -----------------------*/
if (initrd_start)
memblock_reserve(__pa(initrd_start), initrd_end - initrd_start);
#endif
memblock_dump_all();
- /*-------------- node setup --------------------------------*/
+ /*----------------- node/zones setup --------------------------*/
memset(zones_size, 0, sizeof(zones_size));
- zones_size[ZONE_NORMAL] = max_mapnr;
+ memset(zones_holes, 0, sizeof(zones_holes));
+
+ zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn;
+ zones_holes[ZONE_NORMAL] = 0;
+
+#ifdef CONFIG_HIGHMEM
+ zones_size[ZONE_HIGHMEM] = max_pfn - max_low_pfn;
+
+ /* This handles the peripheral address space hole */
+ zones_holes[ZONE_HIGHMEM] = min_high_pfn - max_low_pfn;
+#endif
/*
* We can't use the helper free_area_init(zones[]) because it uses
@@ -122,9 +161,12 @@ void __init setup_arch_memory(void)
free_area_init_node(0, /* node-id */
zones_size, /* num pages per zone */
min_low_pfn, /* first pfn of node */
- NULL); /* NO holes */
+ zones_holes); /* holes */
- high_memory = (void *)end_mem;
+#ifdef CONFIG_HIGHMEM
+ high_memory = (void *)(min_high_pfn << PAGE_SHIFT);
+ kmap_init();
+#endif
}
/*
@@ -135,6 +177,14 @@ void __init setup_arch_memory(void)
*/
void __init mem_init(void)
{
+#ifdef CONFIG_HIGHMEM
+ unsigned long tmp;
+
+ reset_all_zones_managed_pages();
+ for (tmp = min_high_pfn; tmp < max_pfn; tmp++)
+ free_highmem_page(pfn_to_page(tmp));
+#endif
+
free_all_bootmem();
mem_init_print_info(NULL);
}
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index 2c7ce8bb7..daf2bf52b 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -109,6 +109,10 @@ DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE;
static inline void __tlb_entry_erase(void)
{
write_aux_reg(ARC_REG_TLBPD1, 0);
+
+ if (is_pae40_enabled())
+ write_aux_reg(ARC_REG_TLBPD1HI, 0);
+
write_aux_reg(ARC_REG_TLBPD0, 0);
write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
}
@@ -182,7 +186,7 @@ static void utlb_invalidate(void)
}
-static void tlb_entry_insert(unsigned int pd0, unsigned int pd1)
+static void tlb_entry_insert(unsigned int pd0, pte_t pd1)
{
unsigned int idx;
@@ -225,10 +229,14 @@ static void tlb_entry_erase(unsigned int vaddr_n_asid)
write_aux_reg(ARC_REG_TLBCOMMAND, TLBDeleteEntry);
}
-static void tlb_entry_insert(unsigned int pd0, unsigned int pd1)
+static void tlb_entry_insert(unsigned int pd0, pte_t pd1)
{
write_aux_reg(ARC_REG_TLBPD0, pd0);
write_aux_reg(ARC_REG_TLBPD1, pd1);
+
+ if (is_pae40_enabled())
+ write_aux_reg(ARC_REG_TLBPD1HI, (u64)pd1 >> 32);
+
write_aux_reg(ARC_REG_TLBCOMMAND, TLBInsertEntry);
}
@@ -240,22 +248,39 @@ static void tlb_entry_insert(unsigned int pd0, unsigned int pd1)
noinline void local_flush_tlb_all(void)
{
+ struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
unsigned long flags;
unsigned int entry;
- struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
+ int num_tlb = mmu->sets * mmu->ways;
local_irq_save(flags);
/* Load PD0 and PD1 with template for a Blank Entry */
write_aux_reg(ARC_REG_TLBPD1, 0);
+
+ if (is_pae40_enabled())
+ write_aux_reg(ARC_REG_TLBPD1HI, 0);
+
write_aux_reg(ARC_REG_TLBPD0, 0);
- for (entry = 0; entry < mmu->num_tlb; entry++) {
+ for (entry = 0; entry < num_tlb; entry++) {
/* write this entry to the TLB */
write_aux_reg(ARC_REG_TLBINDEX, entry);
write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
}
+ if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
+ const int stlb_idx = 0x800;
+
+ /* Blank sTLB entry */
+ write_aux_reg(ARC_REG_TLBPD0, _PAGE_HW_SZ);
+
+ for (entry = stlb_idx; entry < stlb_idx + 16; entry++) {
+ write_aux_reg(ARC_REG_TLBINDEX, entry);
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
+ }
+ }
+
utlb_invalidate();
local_irq_restore(flags);
@@ -409,6 +434,15 @@ static inline void ipi_flush_tlb_range(void *arg)
local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
}
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline void ipi_flush_pmd_tlb_range(void *arg)
+{
+ struct tlb_args *ta = arg;
+
+ local_flush_pmd_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
+}
+#endif
+
static inline void ipi_flush_tlb_kernel_range(void *arg)
{
struct tlb_args *ta = (struct tlb_args *)arg;
@@ -449,6 +483,20 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1);
}
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ struct tlb_args ta = {
+ .ta_vma = vma,
+ .ta_start = start,
+ .ta_end = end
+ };
+
+ on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_pmd_tlb_range, &ta, 1);
+}
+#endif
+
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
struct tlb_args ta = {
@@ -463,11 +511,12 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
/*
* Routine to create a TLB entry
*/
-void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
+void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *ptep)
{
unsigned long flags;
unsigned int asid_or_sasid, rwx;
- unsigned long pd0, pd1;
+ unsigned long pd0;
+ pte_t pd1;
/*
* create_tlb() assumes that current->mm == vma->mm, since
@@ -499,9 +548,9 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
local_irq_save(flags);
- tlb_paranoid_check(asid_mm(vma->vm_mm, smp_processor_id()), address);
+ tlb_paranoid_check(asid_mm(vma->vm_mm, smp_processor_id()), vaddr);
- address &= PAGE_MASK;
+ vaddr &= PAGE_MASK;
/* update this PTE credentials */
pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED);
@@ -511,7 +560,7 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
/* ASID for this task */
asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff;
- pd0 = address | asid_or_sasid | (pte_val(*ptep) & PTE_BITS_IN_PD0);
+ pd0 = vaddr | asid_or_sasid | (pte_val(*ptep) & PTE_BITS_IN_PD0);
/*
* ARC MMU provides fully orthogonal access bits for K/U mode,
@@ -547,7 +596,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
pte_t *ptep)
{
unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
- unsigned long paddr = pte_val(*ptep) & PAGE_MASK;
+ phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK;
struct page *page = pfn_to_page(pte_pfn(*ptep));
create_tlb(vma, vaddr, ptep);
@@ -570,16 +619,105 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
int dirty = !test_and_set_bit(PG_dc_clean, &page->flags);
if (dirty) {
- /* wback + inv dcache lines */
+ /* wback + inv dcache lines (K-mapping) */
__flush_dcache_page(paddr, paddr);
- /* invalidate any existing icache lines */
+ /* invalidate any existing icache lines (U-mapping) */
if (vma->vm_flags & VM_EXEC)
__inv_icache_page(paddr, vaddr);
}
}
}
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+
+/*
+ * MMUv4 in HS38x cores supports Super Pages which are basis for Linux THP
+ * support.
+ *
+ * Normal and Super pages can co-exist (ofcourse not overlap) in TLB with a
+ * new bit "SZ" in TLB page desciptor to distinguish between them.
+ * Super Page size is configurable in hardware (4K to 16M), but fixed once
+ * RTL builds.
+ *
+ * The exact THP size a Linx configuration will support is a function of:
+ * - MMU page size (typical 8K, RTL fixed)
+ * - software page walker address split between PGD:PTE:PFN (typical
+ * 11:8:13, but can be changed with 1 line)
+ * So for above default, THP size supported is 8K * (2^8) = 2M
+ *
+ * Default Page Walker is 2 levels, PGD:PTE:PFN, which in THP regime
+ * reduces to 1 level (as PTE is folded into PGD and canonically referred
+ * to as PMD).
+ * Thus THP PMD accessors are implemented in terms of PTE (just like sparc)
+ */
+
+void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
+ pmd_t *pmd)
+{
+ pte_t pte = __pte(pmd_val(*pmd));
+ update_mmu_cache(vma, addr, &pte);
+}
+
+void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
+ pgtable_t pgtable)
+{
+ struct list_head *lh = (struct list_head *) pgtable;
+
+ assert_spin_locked(&mm->page_table_lock);
+
+ /* FIFO */
+ if (!pmd_huge_pte(mm, pmdp))
+ INIT_LIST_HEAD(lh);
+ else
+ list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
+ pmd_huge_pte(mm, pmdp) = pgtable;
+}
+
+pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
+{
+ struct list_head *lh;
+ pgtable_t pgtable;
+
+ assert_spin_locked(&mm->page_table_lock);
+
+ pgtable = pmd_huge_pte(mm, pmdp);
+ lh = (struct list_head *) pgtable;
+ if (list_empty(lh))
+ pmd_huge_pte(mm, pmdp) = NULL;
+ else {
+ pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
+ list_del(lh);
+ }
+
+ pte_val(pgtable[0]) = 0;
+ pte_val(pgtable[1]) = 0;
+
+ return pgtable;
+}
+
+void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ unsigned int cpu;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ cpu = smp_processor_id();
+
+ if (likely(asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID)) {
+ unsigned int asid = hw_pid(vma->vm_mm, cpu);
+
+ /* No need to loop here: this will always be for 1 Huge Page */
+ tlb_entry_erase(start | _PAGE_HW_SZ | asid);
+ }
+
+ local_irq_restore(flags);
+}
+
+#endif
+
/* Read the Cache Build Confuration Registers, Decode them and save into
* the cpuinfo structure for later use.
* No Validation is done here, simply read/convert the BCRs
@@ -598,10 +736,10 @@ void read_decode_mmu_bcr(void)
struct bcr_mmu_3 {
#ifdef CONFIG_CPU_BIG_ENDIAN
- unsigned int ver:8, ways:4, sets:4, osm:1, reserv:3, pg_sz:4,
+ unsigned int ver:8, ways:4, sets:4, res:3, sasid:1, pg_sz:4,
u_itlb:4, u_dtlb:4;
#else
- unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, reserv:3, osm:1, sets:4,
+ unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, sasid:1, res:3, sets:4,
ways:4, ver:8;
#endif
} *mmu3;
@@ -622,7 +760,7 @@ void read_decode_mmu_bcr(void)
if (mmu->ver <= 2) {
mmu2 = (struct bcr_mmu_1_2 *)&tmp;
- mmu->pg_sz_k = TO_KB(PAGE_SIZE);
+ mmu->pg_sz_k = TO_KB(0x2000);
mmu->sets = 1 << mmu2->sets;
mmu->ways = 1 << mmu2->ways;
mmu->u_dtlb = mmu2->u_dtlb;
@@ -634,6 +772,7 @@ void read_decode_mmu_bcr(void)
mmu->ways = 1 << mmu3->ways;
mmu->u_dtlb = mmu3->u_dtlb;
mmu->u_itlb = mmu3->u_itlb;
+ mmu->sasid = mmu3->sasid;
} else {
mmu4 = (struct bcr_mmu_4 *)&tmp;
mmu->pg_sz_k = 1 << (mmu4->sz0 - 1);
@@ -642,9 +781,9 @@ void read_decode_mmu_bcr(void)
mmu->ways = mmu4->n_ways * 2;
mmu->u_dtlb = mmu4->u_dtlb * 4;
mmu->u_itlb = mmu4->u_itlb * 4;
+ mmu->sasid = mmu4->sasid;
+ mmu->pae = mmu4->pae;
}
-
- mmu->num_tlb = mmu->sets * mmu->ways;
}
char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
@@ -655,14 +794,15 @@ char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
if (p_mmu->s_pg_sz_m)
scnprintf(super_pg, 64, "%dM Super Page%s, ",
- p_mmu->s_pg_sz_m, " (not used)");
+ p_mmu->s_pg_sz_m,
+ IS_USED_CFG(CONFIG_TRANSPARENT_HUGEPAGE));
n += scnprintf(buf + n, len - n,
- "MMU [v%x]\t: %dk PAGE, %sJTLB %d (%dx%d), uDTLB %d, uITLB %d %s\n",
+ "MMU [v%x]\t: %dk PAGE, %sJTLB %d (%dx%d), uDTLB %d, uITLB %d %s%s\n",
p_mmu->ver, p_mmu->pg_sz_k, super_pg,
- p_mmu->num_tlb, p_mmu->sets, p_mmu->ways,
+ p_mmu->sets * p_mmu->ways, p_mmu->sets, p_mmu->ways,
p_mmu->u_dtlb, p_mmu->u_itlb,
- IS_ENABLED(CONFIG_ARC_MMU_SASID) ? ",SASID" : "");
+ IS_AVAIL2(p_mmu->pae, "PAE40 ", CONFIG_ARC_HAS_PAE40));
return buf;
}
@@ -690,6 +830,14 @@ void arc_mmu_init(void)
if (mmu->pg_sz_k != TO_KB(PAGE_SIZE))
panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE));
+ if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
+ mmu->s_pg_sz_m != TO_MB(HPAGE_PMD_SIZE))
+ panic("MMU Super pg size != Linux HPAGE_PMD_SIZE (%luM)\n",
+ (unsigned long)TO_MB(HPAGE_PMD_SIZE));
+
+ if (IS_ENABLED(CONFIG_ARC_HAS_PAE40) && !mmu->pae)
+ panic("Hardware doesn't support PAE40\n");
+
/* Enable the MMU */
write_aux_reg(ARC_REG_PID, MMU_ENABLE);
@@ -725,15 +873,15 @@ void arc_mmu_init(void)
* the duplicate one.
* -Knob to be verbose abt it.(TODO: hook them up to debugfs)
*/
-volatile int dup_pd_verbose = 1;/* Be slient abt it or complain (default) */
+volatile int dup_pd_silent; /* Be slient abt it or complain (default) */
void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
struct pt_regs *regs)
{
- int set, way, n;
- unsigned long flags, is_valid;
struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
- unsigned int pd0[mmu->ways], pd1[mmu->ways];
+ unsigned int pd0[mmu->ways];
+ unsigned long flags;
+ int set;
local_irq_save(flags);
@@ -743,14 +891,16 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
/* loop thru all sets of TLB */
for (set = 0; set < mmu->sets; set++) {
+ int is_valid, way;
+
/* read out all the ways of current set */
for (way = 0, is_valid = 0; way < mmu->ways; way++) {
write_aux_reg(ARC_REG_TLBINDEX,
SET_WAY_TO_IDX(mmu, set, way));
write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead);
pd0[way] = read_aux_reg(ARC_REG_TLBPD0);
- pd1[way] = read_aux_reg(ARC_REG_TLBPD1);
is_valid |= pd0[way] & _PAGE_PRESENT;
+ pd0[way] &= PAGE_MASK;
}
/* If all the WAYS in SET are empty, skip to next SET */
@@ -759,30 +909,28 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
/* Scan the set for duplicate ways: needs a nested loop */
for (way = 0; way < mmu->ways - 1; way++) {
+
+ int n;
+
if (!pd0[way])
continue;
for (n = way + 1; n < mmu->ways; n++) {
- if ((pd0[way] & PAGE_MASK) ==
- (pd0[n] & PAGE_MASK)) {
-
- if (dup_pd_verbose) {
- pr_info("Duplicate PD's @"
- "[%d:%d]/[%d:%d]\n",
- set, way, set, n);
- pr_info("TLBPD0[%u]: %08x\n",
- way, pd0[way]);
- }
-
- /*
- * clear entry @way and not @n. This is
- * critical to our optimised loop
- */
- pd0[way] = pd1[way] = 0;
- write_aux_reg(ARC_REG_TLBINDEX,
+ if (pd0[way] != pd0[n])
+ continue;
+
+ if (!dup_pd_silent)
+ pr_info("Dup TLB PD0 %08x @ set %d ways %d,%d\n",
+ pd0[way], set, way, n);
+
+ /*
+ * clear entry @way and not @n.
+ * This is critical to our optimised loop
+ */
+ pd0[way] = 0;
+ write_aux_reg(ARC_REG_TLBINDEX,
SET_WAY_TO_IDX(mmu, set, way));
- __tlb_entry_erase();
- }
+ __tlb_entry_erase();
}
}
}
diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S
index f6f4c3cb5..f1967eeb3 100644
--- a/arch/arc/mm/tlbex.S
+++ b/arch/arc/mm/tlbex.S
@@ -88,7 +88,7 @@ ex_saved_reg1:
#ifdef CONFIG_SMP
sr r0, [ARC_REG_SCRATCH_DATA0] ; freeup r0 to code with
GET_CPU_ID r0 ; get to per cpu scratch mem,
- lsl r0, r0, L1_CACHE_SHIFT ; cache line wide per cpu
+ asl r0, r0, L1_CACHE_SHIFT ; cache line wide per cpu
add r0, @ex_saved_reg1, r0
#else
st r0, [@ex_saved_reg1]
@@ -107,7 +107,7 @@ ex_saved_reg1:
.macro TLBMISS_RESTORE_REGS
#ifdef CONFIG_SMP
GET_CPU_ID r0 ; get to per cpu scratch mem
- lsl r0, r0, L1_CACHE_SHIFT ; each is cache line wide
+ asl r0, r0, L1_CACHE_SHIFT ; each is cache line wide
add r0, @ex_saved_reg1, r0
ld_s r3, [r0,12]
ld_s r2, [r0, 8]
@@ -205,20 +205,38 @@ ex_saved_reg1:
#endif
lsr r0, r2, PGDIR_SHIFT ; Bits for indexing into PGD
- ld.as r1, [r1, r0] ; PGD entry corresp to faulting addr
- and.f r1, r1, PAGE_MASK ; Ignoring protection and other flags
- ; contains Ptr to Page Table
- bz.d do_slow_path_pf ; if no Page Table, do page fault
+ ld.as r3, [r1, r0] ; PGD entry corresp to faulting addr
+ tst r3, r3
+ bz do_slow_path_pf ; if no Page Table, do page fault
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ and.f 0, r3, _PAGE_HW_SZ ; Is this Huge PMD (thp)
+ add2.nz r1, r1, r0
+ bnz.d 2f ; YES: PGD == PMD has THP PTE: stop pgd walk
+ mov.nz r0, r3
+
+#endif
+ and r1, r3, PAGE_MASK
; Get the PTE entry: The idea is
; (1) x = addr >> PAGE_SHIFT -> masks page-off bits from @fault-addr
; (2) y = x & (PTRS_PER_PTE - 1) -> to get index
- ; (3) z = pgtbl[y]
- ; To avoid the multiply by in end, we do the -2, <<2 below
+ ; (3) z = (pgtbl + y * 4)
+
+#ifdef CONFIG_ARC_HAS_PAE40
+#define PTE_SIZE_LOG 3 /* 8 == 2 ^ 3 */
+#else
+#define PTE_SIZE_LOG 2 /* 4 == 2 ^ 2 */
+#endif
+
+ ; multiply in step (3) above avoided by shifting lesser in step (1)
+ lsr r0, r2, ( PAGE_SHIFT - PTE_SIZE_LOG )
+ and r0, r0, ( (PTRS_PER_PTE - 1) << PTE_SIZE_LOG )
+ ld.aw r0, [r1, r0] ; r0: PTE (lower word only for PAE40)
+ ; r1: PTE ptr
+
+2:
- lsr r0, r2, (PAGE_SHIFT - 2)
- and r0, r0, ( (PTRS_PER_PTE - 1) << 2)
- ld.aw r0, [r1, r0] ; get PTE and PTE ptr for fault addr
#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
and.f 0, r0, _PAGE_PRESENT
bz 1f
@@ -233,18 +251,23 @@ ex_saved_reg1:
;-----------------------------------------------------------------
; Convert Linux PTE entry into TLB entry
; A one-word PTE entry is programmed as two-word TLB Entry [PD0:PD1] in mmu
+; (for PAE40, two-words PTE, while three-word TLB Entry [PD0:PD1:PD1HI])
; IN: r0 = PTE, r1 = ptr to PTE
.macro CONV_PTE_TO_TLB
- and r3, r0, PTE_BITS_RWX ; r w x
- lsl r2, r3, 3 ; r w x 0 0 0 (GLOBAL, kernel only)
+ and r3, r0, PTE_BITS_RWX ; r w x
+ asl r2, r3, 3 ; Kr Kw Kx 0 0 0 (GLOBAL, kernel only)
and.f 0, r0, _PAGE_GLOBAL
- or.z r2, r2, r3 ; r w x r w x (!GLOBAL, user page)
+ or.z r2, r2, r3 ; Kr Kw Kx Ur Uw Ux (!GLOBAL, user page)
and r3, r0, PTE_BITS_NON_RWX_IN_PD1 ; Extract PFN+cache bits from PTE
or r3, r3, r2
- sr r3, [ARC_REG_TLBPD1] ; these go in PD1
+ sr r3, [ARC_REG_TLBPD1] ; paddr[31..13] | Kr Kw Kx Ur Uw Ux | C
+#ifdef CONFIG_ARC_HAS_PAE40
+ ld r3, [r1, 4] ; paddr[39..32]
+ sr r3, [ARC_REG_TLBPD1HI]
+#endif
and r2, r0, PTE_BITS_IN_PD0 ; Extract other PTE flags: (V)alid, (G)lb
@@ -365,7 +388,7 @@ ENTRY(EV_TLBMissD)
lr r3, [ecr]
or r0, r0, _PAGE_ACCESSED ; Accessed bit always
btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; See if it was a Write Access ?
- or.nz r0, r0, _PAGE_MODIFIED ; if Write, set Dirty bit as well
+ or.nz r0, r0, _PAGE_DIRTY ; if Write, set Dirty bit as well
st_s r0, [r1] ; Write back PTE
CONV_PTE_TO_TLB
diff --git a/arch/arc/plat-axs10x/axs10x.c b/arch/arc/plat-axs10x/axs10x.c
index 0a77b19e1..1b0f0f458 100644
--- a/arch/arc/plat-axs10x/axs10x.c
+++ b/arch/arc/plat-axs10x/axs10x.c
@@ -455,11 +455,6 @@ static void __init axs103_early_init(void)
axs10x_print_board_ver(AXC003_CREG + 4088, "AXC003 CPU Card");
axs10x_early_init();
-
-#ifdef CONFIG_ARC_MCIP
- /* No Hardware init, but filling the smp ops callbacks */
- mcip_init_early_smp();
-#endif
}
#endif
@@ -487,9 +482,6 @@ static const char *axs103_compat[] __initconst = {
MACHINE_START(AXS103, "axs103")
.dt_compat = axs103_compat,
.init_early = axs103_early_init,
-#ifdef CONFIG_ARC_MCIP
- .init_smp = mcip_init_smp,
-#endif
MACHINE_END
/*
diff --git a/arch/arc/plat-sim/platform.c b/arch/arc/plat-sim/platform.c
index d9e35b4a2..e4fe51456 100644
--- a/arch/arc/plat-sim/platform.c
+++ b/arch/arc/plat-sim/platform.c
@@ -10,7 +10,6 @@
#include <linux/init.h>
#include <asm/mach_desc.h>
-#include <asm/mcip.h>
/*----------------------- Machine Descriptions ------------------------------
*
@@ -30,8 +29,4 @@ static const char *simulation_compat[] __initconst = {
MACHINE_START(SIMULATION, "simulation")
.dt_compat = simulation_compat,
-#ifdef CONFIG_ARC_MCIP
- .init_early = mcip_init_early_smp,
- .init_smp = mcip_init_smp,
-#endif
MACHINE_END