summaryrefslogtreecommitdiff
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig22
-rw-r--r--arch/arm/mm/Makefile1
-rw-r--r--arch/arm/mm/alignment.c2
-rw-r--r--arch/arm/mm/cache-uniphier.c555
-rw-r--r--arch/arm/mm/context.c38
-rw-r--r--arch/arm/mm/dma-mapping.c8
-rw-r--r--arch/arm/mm/fault.c22
-rw-r--r--arch/arm/mm/fault.h1
-rw-r--r--arch/arm/mm/highmem.c10
-rw-r--r--arch/arm/mm/init.c92
-rw-r--r--arch/arm/mm/mmu.c4
-rw-r--r--arch/arm/mm/proc-v7.S4
12 files changed, 688 insertions, 71 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index df7537f12..41218867a 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -419,28 +419,24 @@ config CPU_THUMBONLY
config CPU_32v3
bool
select CPU_USE_DOMAINS if MMU
- select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
select NEED_KUSER_HELPERS
select TLS_REG_EMUL if SMP || !MMU
config CPU_32v4
bool
select CPU_USE_DOMAINS if MMU
- select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
select NEED_KUSER_HELPERS
select TLS_REG_EMUL if SMP || !MMU
config CPU_32v4T
bool
select CPU_USE_DOMAINS if MMU
- select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
select NEED_KUSER_HELPERS
select TLS_REG_EMUL if SMP || !MMU
config CPU_32v5
bool
select CPU_USE_DOMAINS if MMU
- select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
select NEED_KUSER_HELPERS
select TLS_REG_EMUL if SMP || !MMU
@@ -805,14 +801,6 @@ config TLS_REG_EMUL
a few prototypes like that in existence) and therefore access to
that required register must be emulated.
-config NEEDS_SYSCALL_FOR_CMPXCHG
- bool
- select NEED_KUSER_HELPERS
- help
- SMP on a pre-ARMv6 processor? Well OK then.
- Forget about fast user space cmpxchg support.
- It is just not possible.
-
config NEED_KUSER_HELPERS
bool
@@ -986,6 +974,16 @@ config CACHE_TAUROS2
This option enables the Tauros2 L2 cache controller (as
found on PJ1/PJ4).
+config CACHE_UNIPHIER
+ bool "Enable the UniPhier outer cache controller"
+ depends on ARCH_UNIPHIER
+ default y
+ select OUTER_CACHE
+ select OUTER_CACHE_SYNC
+ help
+ This option enables the UniPhier outer cache (system cache)
+ controller.
+
config CACHE_XSC3L2
bool "Enable the L2 cache on XScale3"
depends on CPU_XSC3
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 57c8df500..7f76d96ce 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -103,3 +103,4 @@ obj-$(CONFIG_CACHE_FEROCEON_L2) += cache-feroceon-l2.o
obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o l2c-l2x0-resume.o
obj-$(CONFIG_CACHE_XSC3L2) += cache-xsc3l2.o
obj-$(CONFIG_CACHE_TAUROS2) += cache-tauros2.o
+obj-$(CONFIG_CACHE_UNIPHIER) += cache-uniphier.o
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index 00b7f7de2..7d5f4c736 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -803,7 +803,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
}
}
} else {
- fault = probe_kernel_address(instrptr, instr);
+ fault = probe_kernel_address((void *)instrptr, instr);
instr = __mem_to_opcode_arm(instr);
}
diff --git a/arch/arm/mm/cache-uniphier.c b/arch/arm/mm/cache-uniphier.c
new file mode 100644
index 000000000..0502ba17a
--- /dev/null
+++ b/arch/arm/mm/cache-uniphier.c
@@ -0,0 +1,555 @@
+/*
+ * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "uniphier: " fmt
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/log2.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <asm/hardware/cache-uniphier.h>
+#include <asm/outercache.h>
+
+/* control registers */
+#define UNIPHIER_SSCC 0x0 /* Control Register */
+#define UNIPHIER_SSCC_BST BIT(20) /* UCWG burst read */
+#define UNIPHIER_SSCC_ACT BIT(19) /* Inst-Data separate */
+#define UNIPHIER_SSCC_WTG BIT(18) /* WT gathering on */
+#define UNIPHIER_SSCC_PRD BIT(17) /* enable pre-fetch */
+#define UNIPHIER_SSCC_ON BIT(0) /* enable cache */
+#define UNIPHIER_SSCLPDAWCR 0x30 /* Unified/Data Active Way Control */
+#define UNIPHIER_SSCLPIAWCR 0x34 /* Instruction Active Way Control */
+
+/* revision registers */
+#define UNIPHIER_SSCID 0x0 /* ID Register */
+
+/* operation registers */
+#define UNIPHIER_SSCOPE 0x244 /* Cache Operation Primitive Entry */
+#define UNIPHIER_SSCOPE_CM_INV 0x0 /* invalidate */
+#define UNIPHIER_SSCOPE_CM_CLEAN 0x1 /* clean */
+#define UNIPHIER_SSCOPE_CM_FLUSH 0x2 /* flush */
+#define UNIPHIER_SSCOPE_CM_SYNC 0x8 /* sync (drain bufs) */
+#define UNIPHIER_SSCOPE_CM_FLUSH_PREFETCH 0x9 /* flush p-fetch buf */
+#define UNIPHIER_SSCOQM 0x248 /* Cache Operation Queue Mode */
+#define UNIPHIER_SSCOQM_TID_MASK (0x3 << 21)
+#define UNIPHIER_SSCOQM_TID_LRU_DATA (0x0 << 21)
+#define UNIPHIER_SSCOQM_TID_LRU_INST (0x1 << 21)
+#define UNIPHIER_SSCOQM_TID_WAY (0x2 << 21)
+#define UNIPHIER_SSCOQM_S_MASK (0x3 << 17)
+#define UNIPHIER_SSCOQM_S_RANGE (0x0 << 17)
+#define UNIPHIER_SSCOQM_S_ALL (0x1 << 17)
+#define UNIPHIER_SSCOQM_S_WAY (0x2 << 17)
+#define UNIPHIER_SSCOQM_CE BIT(15) /* notify completion */
+#define UNIPHIER_SSCOQM_CM_INV 0x0 /* invalidate */
+#define UNIPHIER_SSCOQM_CM_CLEAN 0x1 /* clean */
+#define UNIPHIER_SSCOQM_CM_FLUSH 0x2 /* flush */
+#define UNIPHIER_SSCOQM_CM_PREFETCH 0x3 /* prefetch to cache */
+#define UNIPHIER_SSCOQM_CM_PREFETCH_BUF 0x4 /* prefetch to pf-buf */
+#define UNIPHIER_SSCOQM_CM_TOUCH 0x5 /* touch */
+#define UNIPHIER_SSCOQM_CM_TOUCH_ZERO 0x6 /* touch to zero */
+#define UNIPHIER_SSCOQM_CM_TOUCH_DIRTY 0x7 /* touch with dirty */
+#define UNIPHIER_SSCOQAD 0x24c /* Cache Operation Queue Address */
+#define UNIPHIER_SSCOQSZ 0x250 /* Cache Operation Queue Size */
+#define UNIPHIER_SSCOQMASK 0x254 /* Cache Operation Queue Address Mask */
+#define UNIPHIER_SSCOQWN 0x258 /* Cache Operation Queue Way Number */
+#define UNIPHIER_SSCOPPQSEF 0x25c /* Cache Operation Queue Set Complete*/
+#define UNIPHIER_SSCOPPQSEF_FE BIT(1)
+#define UNIPHIER_SSCOPPQSEF_OE BIT(0)
+#define UNIPHIER_SSCOLPQS 0x260 /* Cache Operation Queue Status */
+#define UNIPHIER_SSCOLPQS_EF BIT(2)
+#define UNIPHIER_SSCOLPQS_EST BIT(1)
+#define UNIPHIER_SSCOLPQS_QST BIT(0)
+
+/* Is the touch/pre-fetch destination specified by ways? */
+#define UNIPHIER_SSCOQM_TID_IS_WAY(op) \
+ ((op & UNIPHIER_SSCOQM_TID_MASK) == UNIPHIER_SSCOQM_TID_WAY)
+/* Is the operation region specified by address range? */
+#define UNIPHIER_SSCOQM_S_IS_RANGE(op) \
+ ((op & UNIPHIER_SSCOQM_S_MASK) == UNIPHIER_SSCOQM_S_RANGE)
+
+/**
+ * uniphier_cache_data - UniPhier outer cache specific data
+ *
+ * @ctrl_base: virtual base address of control registers
+ * @rev_base: virtual base address of revision registers
+ * @op_base: virtual base address of operation registers
+ * @way_present_mask: each bit specifies if the way is present
+ * @way_locked_mask: each bit specifies if the way is locked
+ * @nsets: number of associativity sets
+ * @line_size: line size in bytes
+ * @range_op_max_size: max size that can be handled by a single range operation
+ * @list: list node to include this level in the whole cache hierarchy
+ */
+struct uniphier_cache_data {
+ void __iomem *ctrl_base;
+ void __iomem *rev_base;
+ void __iomem *op_base;
+ u32 way_present_mask;
+ u32 way_locked_mask;
+ u32 nsets;
+ u32 line_size;
+ u32 range_op_max_size;
+ struct list_head list;
+};
+
+/*
+ * List of the whole outer cache hierarchy. This list is only modified during
+ * the early boot stage, so no mutex is taken for the access to the list.
+ */
+static LIST_HEAD(uniphier_cache_list);
+
+/**
+ * __uniphier_cache_sync - perform a sync point for a particular cache level
+ *
+ * @data: cache controller specific data
+ */
+static void __uniphier_cache_sync(struct uniphier_cache_data *data)
+{
+ /* This sequence need not be atomic. Do not disable IRQ. */
+ writel_relaxed(UNIPHIER_SSCOPE_CM_SYNC,
+ data->op_base + UNIPHIER_SSCOPE);
+ /* need a read back to confirm */
+ readl_relaxed(data->op_base + UNIPHIER_SSCOPE);
+}
+
+/**
+ * __uniphier_cache_maint_common - run a queue operation for a particular level
+ *
+ * @data: cache controller specific data
+ * @start: start address of range operation (don't care for "all" operation)
+ * @size: data size of range operation (don't care for "all" operation)
+ * @operation: flags to specify the desired cache operation
+ */
+static void __uniphier_cache_maint_common(struct uniphier_cache_data *data,
+ unsigned long start,
+ unsigned long size,
+ u32 operation)
+{
+ unsigned long flags;
+
+ /*
+ * No spin lock is necessary here because:
+ *
+ * [1] This outer cache controller is able to accept maintenance
+ * operations from multiple CPUs at a time in an SMP system; if a
+ * maintenance operation is under way and another operation is issued,
+ * the new one is stored in the queue. The controller performs one
+ * operation after another. If the queue is full, the status register,
+ * UNIPHIER_SSCOPPQSEF, indicates that the queue registration has
+ * failed. The status registers, UNIPHIER_{SSCOPPQSEF, SSCOLPQS}, have
+ * different instances for each CPU, i.e. each CPU can track the status
+ * of the maintenance operations triggered by itself.
+ *
+ * [2] The cache command registers, UNIPHIER_{SSCOQM, SSCOQAD, SSCOQSZ,
+ * SSCOQWN}, are shared between multiple CPUs, but the hardware still
+ * guarantees the registration sequence is atomic; the write access to
+ * them are arbitrated by the hardware. The first accessor to the
+ * register, UNIPHIER_SSCOQM, holds the access right and it is released
+ * by reading the status register, UNIPHIER_SSCOPPQSEF. While one CPU
+ * is holding the access right, other CPUs fail to register operations.
+ * One CPU should not hold the access right for a long time, so local
+ * IRQs should be disabled while the following sequence.
+ */
+ local_irq_save(flags);
+
+ /* clear the complete notification flag */
+ writel_relaxed(UNIPHIER_SSCOLPQS_EF, data->op_base + UNIPHIER_SSCOLPQS);
+
+ do {
+ /* set cache operation */
+ writel_relaxed(UNIPHIER_SSCOQM_CE | operation,
+ data->op_base + UNIPHIER_SSCOQM);
+
+ /* set address range if needed */
+ if (likely(UNIPHIER_SSCOQM_S_IS_RANGE(operation))) {
+ writel_relaxed(start, data->op_base + UNIPHIER_SSCOQAD);
+ writel_relaxed(size, data->op_base + UNIPHIER_SSCOQSZ);
+ }
+
+ /* set target ways if needed */
+ if (unlikely(UNIPHIER_SSCOQM_TID_IS_WAY(operation)))
+ writel_relaxed(data->way_locked_mask,
+ data->op_base + UNIPHIER_SSCOQWN);
+ } while (unlikely(readl_relaxed(data->op_base + UNIPHIER_SSCOPPQSEF) &
+ (UNIPHIER_SSCOPPQSEF_FE | UNIPHIER_SSCOPPQSEF_OE)));
+
+ /* wait until the operation is completed */
+ while (likely(readl_relaxed(data->op_base + UNIPHIER_SSCOLPQS) !=
+ UNIPHIER_SSCOLPQS_EF))
+ cpu_relax();
+
+ local_irq_restore(flags);
+}
+
+static void __uniphier_cache_maint_all(struct uniphier_cache_data *data,
+ u32 operation)
+{
+ __uniphier_cache_maint_common(data, 0, 0,
+ UNIPHIER_SSCOQM_S_ALL | operation);
+
+ __uniphier_cache_sync(data);
+}
+
+static void __uniphier_cache_maint_range(struct uniphier_cache_data *data,
+ unsigned long start, unsigned long end,
+ u32 operation)
+{
+ unsigned long size;
+
+ /*
+ * If the start address is not aligned,
+ * perform a cache operation for the first cache-line
+ */
+ start = start & ~(data->line_size - 1);
+
+ size = end - start;
+
+ if (unlikely(size >= (unsigned long)(-data->line_size))) {
+ /* this means cache operation for all range */
+ __uniphier_cache_maint_all(data, operation);
+ return;
+ }
+
+ /*
+ * If the end address is not aligned,
+ * perform a cache operation for the last cache-line
+ */
+ size = ALIGN(size, data->line_size);
+
+ while (size) {
+ unsigned long chunk_size = min_t(unsigned long, size,
+ data->range_op_max_size);
+
+ __uniphier_cache_maint_common(data, start, chunk_size,
+ UNIPHIER_SSCOQM_S_RANGE | operation);
+
+ start += chunk_size;
+ size -= chunk_size;
+ }
+
+ __uniphier_cache_sync(data);
+}
+
+static void __uniphier_cache_enable(struct uniphier_cache_data *data, bool on)
+{
+ u32 val = 0;
+
+ if (on)
+ val = UNIPHIER_SSCC_WTG | UNIPHIER_SSCC_PRD | UNIPHIER_SSCC_ON;
+
+ writel_relaxed(val, data->ctrl_base + UNIPHIER_SSCC);
+}
+
+static void __init __uniphier_cache_set_locked_ways(
+ struct uniphier_cache_data *data,
+ u32 way_mask)
+{
+ data->way_locked_mask = way_mask & data->way_present_mask;
+
+ writel_relaxed(~data->way_locked_mask & data->way_present_mask,
+ data->ctrl_base + UNIPHIER_SSCLPDAWCR);
+}
+
+static void uniphier_cache_maint_range(unsigned long start, unsigned long end,
+ u32 operation)
+{
+ struct uniphier_cache_data *data;
+
+ list_for_each_entry(data, &uniphier_cache_list, list)
+ __uniphier_cache_maint_range(data, start, end, operation);
+}
+
+static void uniphier_cache_maint_all(u32 operation)
+{
+ struct uniphier_cache_data *data;
+
+ list_for_each_entry(data, &uniphier_cache_list, list)
+ __uniphier_cache_maint_all(data, operation);
+}
+
+static void uniphier_cache_inv_range(unsigned long start, unsigned long end)
+{
+ uniphier_cache_maint_range(start, end, UNIPHIER_SSCOQM_CM_INV);
+}
+
+static void uniphier_cache_clean_range(unsigned long start, unsigned long end)
+{
+ uniphier_cache_maint_range(start, end, UNIPHIER_SSCOQM_CM_CLEAN);
+}
+
+static void uniphier_cache_flush_range(unsigned long start, unsigned long end)
+{
+ uniphier_cache_maint_range(start, end, UNIPHIER_SSCOQM_CM_FLUSH);
+}
+
+static void __init uniphier_cache_inv_all(void)
+{
+ uniphier_cache_maint_all(UNIPHIER_SSCOQM_CM_INV);
+}
+
+static void uniphier_cache_flush_all(void)
+{
+ uniphier_cache_maint_all(UNIPHIER_SSCOQM_CM_FLUSH);
+}
+
+static void uniphier_cache_disable(void)
+{
+ struct uniphier_cache_data *data;
+
+ list_for_each_entry_reverse(data, &uniphier_cache_list, list)
+ __uniphier_cache_enable(data, false);
+
+ uniphier_cache_flush_all();
+}
+
+static void __init uniphier_cache_enable(void)
+{
+ struct uniphier_cache_data *data;
+
+ uniphier_cache_inv_all();
+
+ list_for_each_entry(data, &uniphier_cache_list, list) {
+ __uniphier_cache_enable(data, true);
+ __uniphier_cache_set_locked_ways(data, 0);
+ }
+}
+
+static void uniphier_cache_sync(void)
+{
+ struct uniphier_cache_data *data;
+
+ list_for_each_entry(data, &uniphier_cache_list, list)
+ __uniphier_cache_sync(data);
+}
+
+int __init uniphier_cache_l2_is_enabled(void)
+{
+ struct uniphier_cache_data *data;
+
+ data = list_first_entry_or_null(&uniphier_cache_list,
+ struct uniphier_cache_data, list);
+ if (!data)
+ return 0;
+
+ return !!(readl_relaxed(data->ctrl_base + UNIPHIER_SSCC) &
+ UNIPHIER_SSCC_ON);
+}
+
+void __init uniphier_cache_l2_touch_range(unsigned long start,
+ unsigned long end)
+{
+ struct uniphier_cache_data *data;
+
+ data = list_first_entry_or_null(&uniphier_cache_list,
+ struct uniphier_cache_data, list);
+ if (data)
+ __uniphier_cache_maint_range(data, start, end,
+ UNIPHIER_SSCOQM_TID_WAY |
+ UNIPHIER_SSCOQM_CM_TOUCH);
+}
+
+void __init uniphier_cache_l2_set_locked_ways(u32 way_mask)
+{
+ struct uniphier_cache_data *data;
+
+ data = list_first_entry_or_null(&uniphier_cache_list,
+ struct uniphier_cache_data, list);
+ if (data)
+ __uniphier_cache_set_locked_ways(data, way_mask);
+}
+
+static const struct of_device_id uniphier_cache_match[] __initconst = {
+ {
+ .compatible = "socionext,uniphier-system-cache",
+ },
+ { /* sentinel */ }
+};
+
+static struct device_node * __init uniphier_cache_get_next_level_node(
+ struct device_node *np)
+{
+ u32 phandle;
+
+ if (of_property_read_u32(np, "next-level-cache", &phandle))
+ return NULL;
+
+ return of_find_node_by_phandle(phandle);
+}
+
+static int __init __uniphier_cache_init(struct device_node *np,
+ unsigned int *cache_level)
+{
+ struct uniphier_cache_data *data;
+ u32 level, cache_size;
+ struct device_node *next_np;
+ int ret = 0;
+
+ if (!of_match_node(uniphier_cache_match, np)) {
+ pr_err("L%d: not compatible with uniphier cache\n",
+ *cache_level);
+ return -EINVAL;
+ }
+
+ if (of_property_read_u32(np, "cache-level", &level)) {
+ pr_err("L%d: cache-level is not specified\n", *cache_level);
+ return -EINVAL;
+ }
+
+ if (level != *cache_level) {
+ pr_err("L%d: cache-level is unexpected value %d\n",
+ *cache_level, level);
+ return -EINVAL;
+ }
+
+ if (!of_property_read_bool(np, "cache-unified")) {
+ pr_err("L%d: cache-unified is not specified\n", *cache_level);
+ return -EINVAL;
+ }
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ if (of_property_read_u32(np, "cache-line-size", &data->line_size) ||
+ !is_power_of_2(data->line_size)) {
+ pr_err("L%d: cache-line-size is unspecified or invalid\n",
+ *cache_level);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (of_property_read_u32(np, "cache-sets", &data->nsets) ||
+ !is_power_of_2(data->nsets)) {
+ pr_err("L%d: cache-sets is unspecified or invalid\n",
+ *cache_level);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (of_property_read_u32(np, "cache-size", &cache_size) ||
+ cache_size == 0 || cache_size % (data->nsets * data->line_size)) {
+ pr_err("L%d: cache-size is unspecified or invalid\n",
+ *cache_level);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ data->way_present_mask =
+ ((u32)1 << cache_size / data->nsets / data->line_size) - 1;
+
+ data->ctrl_base = of_iomap(np, 0);
+ if (!data->ctrl_base) {
+ pr_err("L%d: failed to map control register\n", *cache_level);
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ data->rev_base = of_iomap(np, 1);
+ if (!data->rev_base) {
+ pr_err("L%d: failed to map revision register\n", *cache_level);
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ data->op_base = of_iomap(np, 2);
+ if (!data->op_base) {
+ pr_err("L%d: failed to map operation register\n", *cache_level);
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ if (*cache_level == 2) {
+ u32 revision = readl(data->rev_base + UNIPHIER_SSCID);
+ /*
+ * The size of range operation is limited to (1 << 22) or less
+ * for PH-sLD8 or older SoCs.
+ */
+ if (revision <= 0x16)
+ data->range_op_max_size = (u32)1 << 22;
+ }
+
+ data->range_op_max_size -= data->line_size;
+
+ INIT_LIST_HEAD(&data->list);
+ list_add_tail(&data->list, &uniphier_cache_list); /* no mutex */
+
+ /*
+ * OK, this level has been successfully initialized. Look for the next
+ * level cache. Do not roll back even if the initialization of the
+ * next level cache fails because we want to continue with available
+ * cache levels.
+ */
+ next_np = uniphier_cache_get_next_level_node(np);
+ if (next_np) {
+ (*cache_level)++;
+ ret = __uniphier_cache_init(next_np, cache_level);
+ }
+ of_node_put(next_np);
+
+ return ret;
+err:
+ iounmap(data->op_base);
+ iounmap(data->rev_base);
+ iounmap(data->ctrl_base);
+ kfree(data);
+
+ return ret;
+}
+
+int __init uniphier_cache_init(void)
+{
+ struct device_node *np = NULL;
+ unsigned int cache_level;
+ int ret = 0;
+
+ /* look for level 2 cache */
+ while ((np = of_find_matching_node(np, uniphier_cache_match)))
+ if (!of_property_read_u32(np, "cache-level", &cache_level) &&
+ cache_level == 2)
+ break;
+
+ if (!np)
+ return -ENODEV;
+
+ ret = __uniphier_cache_init(np, &cache_level);
+ of_node_put(np);
+
+ if (ret) {
+ /*
+ * Error out iif L2 initialization fails. Continue with any
+ * error on L3 or outer because they are optional.
+ */
+ if (cache_level == 2) {
+ pr_err("failed to initialize L2 cache\n");
+ return ret;
+ }
+
+ cache_level--;
+ ret = 0;
+ }
+
+ outer_cache.inv_range = uniphier_cache_inv_range;
+ outer_cache.clean_range = uniphier_cache_clean_range;
+ outer_cache.flush_range = uniphier_cache_flush_range;
+ outer_cache.flush_all = uniphier_cache_flush_all;
+ outer_cache.disable = uniphier_cache_disable;
+ outer_cache.sync = uniphier_cache_sync;
+
+ uniphier_cache_enable();
+
+ pr_info("enabled outer cache (cache level: %d)\n", cache_level);
+
+ return ret;
+}
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 845769e41..c8c8b9ed0 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -165,13 +165,28 @@ static void flush_context(unsigned int cpu)
__flush_icache_all();
}
-static int is_reserved_asid(u64 asid)
+static bool check_update_reserved_asid(u64 asid, u64 newasid)
{
int cpu;
- for_each_possible_cpu(cpu)
- if (per_cpu(reserved_asids, cpu) == asid)
- return 1;
- return 0;
+ bool hit = false;
+
+ /*
+ * Iterate over the set of reserved ASIDs looking for a match.
+ * If we find one, then we can update our mm to use newasid
+ * (i.e. the same ASID in the current generation) but we can't
+ * exit the loop early, since we need to ensure that all copies
+ * of the old ASID are updated to reflect the mm. Failure to do
+ * so could result in us missing the reserved ASID in a future
+ * generation.
+ */
+ for_each_possible_cpu(cpu) {
+ if (per_cpu(reserved_asids, cpu) == asid) {
+ hit = true;
+ per_cpu(reserved_asids, cpu) = newasid;
+ }
+ }
+
+ return hit;
}
static u64 new_context(struct mm_struct *mm, unsigned int cpu)
@@ -181,12 +196,14 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
u64 generation = atomic64_read(&asid_generation);
if (asid != 0) {
+ u64 newasid = generation | (asid & ~ASID_MASK);
+
/*
* If our current ASID was active during a rollover, we
* can continue to use it and this was just a false alarm.
*/
- if (is_reserved_asid(asid))
- return generation | (asid & ~ASID_MASK);
+ if (check_update_reserved_asid(asid, newasid))
+ return newasid;
/*
* We had a valid ASID in a previous life, so try to re-use
@@ -194,7 +211,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
*/
asid &= ~ASID_MASK;
if (!__test_and_set_bit(asid, asid_map))
- goto bump_gen;
+ return newasid;
}
/*
@@ -216,11 +233,8 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
__set_bit(asid, asid_map);
cur_idx = asid;
-
-bump_gen:
- asid |= generation;
cpumask_clear(mm_cpumask(mm));
- return asid;
+ return asid | generation;
}
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index ad4eb2d26..534a60ae2 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -651,12 +651,12 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
if (nommu())
addr = __alloc_simple_buffer(dev, size, gfp, &page);
- else if (dev_get_cma_area(dev) && (gfp & __GFP_WAIT))
+ else if (dev_get_cma_area(dev) && (gfp & __GFP_DIRECT_RECLAIM))
addr = __alloc_from_contiguous(dev, size, prot, &page,
caller, want_vaddr);
else if (is_coherent)
addr = __alloc_simple_buffer(dev, size, gfp, &page);
- else if (!(gfp & __GFP_WAIT))
+ else if (!gfpflags_allow_blocking(gfp))
addr = __alloc_from_pool(size, &page);
else
addr = __alloc_remap_buffer(dev, size, gfp, prot, &page,
@@ -1363,7 +1363,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
*handle = DMA_ERROR_CODE;
size = PAGE_ALIGN(size);
- if (!(gfp & __GFP_WAIT))
+ if (!gfpflags_allow_blocking(gfp))
return __iommu_alloc_atomic(dev, size, handle);
/*
@@ -1521,7 +1521,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
return -ENOMEM;
for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
- phys_addr_t phys = sg_phys(s) & PAGE_MASK;
+ phys_addr_t phys = page_to_phys(sg_page(s));
unsigned int len = PAGE_ALIGN(s->offset + s->length);
if (!is_coherent &&
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 0d629b8f9..daafcf121 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -593,6 +593,28 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
arm_notify_die("", regs, &info, ifsr, 0);
}
+/*
+ * Abort handler to be used only during first unmasking of asynchronous aborts
+ * on the boot CPU. This makes sure that the machine will not die if the
+ * firmware/bootloader left an imprecise abort pending for us to trip over.
+ */
+static int __init early_abort_handler(unsigned long addr, unsigned int fsr,
+ struct pt_regs *regs)
+{
+ pr_warn("Hit pending asynchronous external abort (FSR=0x%08x) during "
+ "first unmask, this is most likely caused by a "
+ "firmware/bootloader bug.\n", fsr);
+
+ return 0;
+}
+
+void __init early_abt_enable(void)
+{
+ fsr_info[22].fn = early_abort_handler;
+ local_abt_enable();
+ fsr_info[22].fn = do_bad;
+}
+
#ifndef CONFIG_ARM_LPAE
static int __init exceptions_init(void)
{
diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
index cf08bdfbe..05ec5e0df 100644
--- a/arch/arm/mm/fault.h
+++ b/arch/arm/mm/fault.h
@@ -24,5 +24,6 @@ static inline int fsr_fs(unsigned int fsr)
void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
unsigned long search_exception_table(unsigned long addr);
+void early_abt_enable(void);
#endif /* __ARCH_ARM_FAULT_H */
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index 9df5f0958..d02f8187b 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -147,13 +147,3 @@ void *kmap_atomic_pfn(unsigned long pfn)
return (void *)vaddr;
}
-
-struct page *kmap_atomic_to_page(const void *ptr)
-{
- unsigned long vaddr = (unsigned long)ptr;
-
- if (vaddr < FIXADDR_START)
- return virt_to_page(ptr);
-
- return pte_page(get_fixmap_pte(vaddr));
-}
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 8a63b4cdc..7f8cd1b35 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -22,6 +22,7 @@
#include <linux/memblock.h>
#include <linux/dma-contiguous.h>
#include <linux/sizes.h>
+#include <linux/stop_machine.h>
#include <asm/cp15.h>
#include <asm/mach-types.h>
@@ -627,12 +628,10 @@ static struct section_perm ro_perms[] = {
* safe to be called with preemption disabled, as under stop_machine().
*/
static inline void section_update(unsigned long addr, pmdval_t mask,
- pmdval_t prot)
+ pmdval_t prot, struct mm_struct *mm)
{
- struct mm_struct *mm;
pmd_t *pmd;
- mm = current->active_mm;
pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
#ifdef CONFIG_ARM_LPAE
@@ -656,49 +655,82 @@ static inline bool arch_has_strict_perms(void)
return !!(get_cr() & CR_XP);
}
-#define set_section_perms(perms, field) { \
- size_t i; \
- unsigned long addr; \
- \
- if (!arch_has_strict_perms()) \
- return; \
- \
- for (i = 0; i < ARRAY_SIZE(perms); i++) { \
- if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) || \
- !IS_ALIGNED(perms[i].end, SECTION_SIZE)) { \
- pr_err("BUG: section %lx-%lx not aligned to %lx\n", \
- perms[i].start, perms[i].end, \
- SECTION_SIZE); \
- continue; \
- } \
- \
- for (addr = perms[i].start; \
- addr < perms[i].end; \
- addr += SECTION_SIZE) \
- section_update(addr, perms[i].mask, \
- perms[i].field); \
- } \
+void set_section_perms(struct section_perm *perms, int n, bool set,
+ struct mm_struct *mm)
+{
+ size_t i;
+ unsigned long addr;
+
+ if (!arch_has_strict_perms())
+ return;
+
+ for (i = 0; i < n; i++) {
+ if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||
+ !IS_ALIGNED(perms[i].end, SECTION_SIZE)) {
+ pr_err("BUG: section %lx-%lx not aligned to %lx\n",
+ perms[i].start, perms[i].end,
+ SECTION_SIZE);
+ continue;
+ }
+
+ for (addr = perms[i].start;
+ addr < perms[i].end;
+ addr += SECTION_SIZE)
+ section_update(addr, perms[i].mask,
+ set ? perms[i].prot : perms[i].clear, mm);
+ }
+
}
-static inline void fix_kernmem_perms(void)
+static void update_sections_early(struct section_perm perms[], int n)
{
- set_section_perms(nx_perms, prot);
+ struct task_struct *t, *s;
+
+ read_lock(&tasklist_lock);
+ for_each_process(t) {
+ if (t->flags & PF_KTHREAD)
+ continue;
+ for_each_thread(t, s)
+ set_section_perms(perms, n, true, s->mm);
+ }
+ read_unlock(&tasklist_lock);
+ set_section_perms(perms, n, true, current->active_mm);
+ set_section_perms(perms, n, true, &init_mm);
+}
+
+int __fix_kernmem_perms(void *unused)
+{
+ update_sections_early(nx_perms, ARRAY_SIZE(nx_perms));
+ return 0;
+}
+
+void fix_kernmem_perms(void)
+{
+ stop_machine(__fix_kernmem_perms, NULL, NULL);
}
#ifdef CONFIG_DEBUG_RODATA
+int __mark_rodata_ro(void *unused)
+{
+ update_sections_early(ro_perms, ARRAY_SIZE(ro_perms));
+ return 0;
+}
+
void mark_rodata_ro(void)
{
- set_section_perms(ro_perms, prot);
+ stop_machine(__mark_rodata_ro, NULL, NULL);
}
void set_kernel_text_rw(void)
{
- set_section_perms(ro_perms, clear);
+ set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false,
+ current->active_mm);
}
void set_kernel_text_ro(void)
{
- set_section_perms(ro_perms, prot);
+ set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
+ current->active_mm);
}
#endif /* CONFIG_DEBUG_RODATA */
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 7cd15143a..4867f5daf 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -38,6 +38,7 @@
#include <asm/mach/pci.h>
#include <asm/fixmap.h>
+#include "fault.h"
#include "mm.h"
#include "tcm.h"
@@ -1363,6 +1364,9 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
*/
local_flush_tlb_all();
flush_cache_all();
+
+ /* Enable asynchronous aborts */
+ early_abt_enable();
}
static void __init kmap_init(void)
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index de2b246fe..8e1ea433c 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -95,7 +95,7 @@ ENDPROC(cpu_v7_dcache_clean_area)
.equ cpu_v7_suspend_size, 4 * 9
#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_v7_do_suspend)
- stmfd sp!, {r4 - r10, lr}
+ stmfd sp!, {r4 - r11, lr}
mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
mrc p15, 0, r5, c13, c0, 3 @ User r/o thread ID
stmia r0!, {r4 - r5}
@@ -112,7 +112,7 @@ ENTRY(cpu_v7_do_suspend)
mrc p15, 0, r9, c1, c0, 1 @ Auxiliary control register
mrc p15, 0, r10, c1, c0, 2 @ Co-processor access control
stmia r0, {r5 - r11}
- ldmfd sp!, {r4 - r10, pc}
+ ldmfd sp!, {r4 - r11, pc}
ENDPROC(cpu_v7_do_suspend)
ENTRY(cpu_v7_do_resume)