summaryrefslogtreecommitdiff
path: root/arch/arm/mach-vexpress
diff options
context:
space:
mode:
authorAndré Fabian Silva Delgado <emulatorman@parabola.nu>2015-08-05 17:04:01 -0300
committerAndré Fabian Silva Delgado <emulatorman@parabola.nu>2015-08-05 17:04:01 -0300
commit57f0f512b273f60d52568b8c6b77e17f5636edc0 (patch)
tree5e910f0e82173f4ef4f51111366a3f1299037a7b /arch/arm/mach-vexpress
Initial import
Diffstat (limited to 'arch/arm/mach-vexpress')
-rw-r--r--arch/arm/mach-vexpress/Kconfig82
-rw-r--r--arch/arm/mach-vexpress/Makefile17
-rw-r--r--arch/arm/mach-vexpress/core.h5
-rw-r--r--arch/arm/mach-vexpress/dcscb.c174
-rw-r--r--arch/arm/mach-vexpress/dcscb_setup.S38
-rw-r--r--arch/arm/mach-vexpress/hotplug.c106
-rw-r--r--arch/arm/mach-vexpress/platsmp.c74
-rw-r--r--arch/arm/mach-vexpress/spc.c592
-rw-r--r--arch/arm/mach-vexpress/spc.h25
-rw-r--r--arch/arm/mach-vexpress/tc2_pm.c266
-rw-r--r--arch/arm/mach-vexpress/v2m.c16
11 files changed, 1395 insertions, 0 deletions
diff --git a/arch/arm/mach-vexpress/Kconfig b/arch/arm/mach-vexpress/Kconfig
new file mode 100644
index 000000000..10f938957
--- /dev/null
+++ b/arch/arm/mach-vexpress/Kconfig
@@ -0,0 +1,82 @@
+menuconfig ARCH_VEXPRESS
+ bool "ARM Ltd. Versatile Express family" if ARCH_MULTI_V7
+ select ARCH_REQUIRE_GPIOLIB
+ select ARCH_SUPPORTS_BIG_ENDIAN
+ select ARM_AMBA
+ select ARM_GIC
+ select ARM_GLOBAL_TIMER
+ select ARM_TIMER_SP804
+ select COMMON_CLK_VERSATILE
+ select HAVE_ARM_SCU if SMP
+ select HAVE_ARM_TWD if SMP
+ select HAVE_PATA_PLATFORM
+ select ICST
+ select NO_IOPORT_MAP
+ select PLAT_VERSATILE
+ select POWER_RESET
+ select POWER_RESET_VEXPRESS
+ select POWER_SUPPLY
+ select REGULATOR if MMC_ARMMMCI
+ select REGULATOR_FIXED_VOLTAGE if REGULATOR
+ select VEXPRESS_CONFIG
+ select VEXPRESS_SYSCFG
+ select MFD_VEXPRESS_SYSREG
+ help
+ This option enables support for systems using Cortex processor based
+ ARM core and logic (FPGA) tiles on the Versatile Express motherboard,
+ for example:
+
+ - CoreTile Express A5x2 (V2P-CA5s)
+ - CoreTile Express A9x4 (V2P-CA9)
+ - CoreTile Express A15x2 (V2P-CA15)
+ - LogicTile Express 13MG (V2F-2XV6) with A5, A7, A9 or A15 SMMs
+ (Soft Macrocell Models)
+ - Versatile Express RTSMs (Models)
+
+ You must boot using a Flattened Device Tree in order to use these
+ platforms. The traditional (ATAGs) boot method is not usable on
+ these boards with this option.
+
+if ARCH_VEXPRESS
+
+config ARCH_VEXPRESS_CORTEX_A5_A9_ERRATA
+ bool "Enable A5 and A9 only errata work-arounds"
+ default y
+ select ARM_ERRATA_643719 if SMP
+ select ARM_ERRATA_720789
+ select PL310_ERRATA_753970 if CACHE_L2X0
+ help
+ Provides common dependencies for Versatile Express platforms
+ based on Cortex-A5 and Cortex-A9 processors. In order to
+ build a working kernel, you must also enable relevant core
+ tile support or Flattened Device Tree based support options.
+
+config ARCH_VEXPRESS_DCSCB
+ bool "Dual Cluster System Control Block (DCSCB) support"
+ depends on MCPM
+ select ARM_CCI400_PORT_CTRL
+ help
+ Support for the Dual Cluster System Configuration Block (DCSCB).
+ This is needed to provide CPU and cluster power management
+ on RTSM implementing big.LITTLE.
+
+config ARCH_VEXPRESS_SPC
+ bool "Versatile Express Serial Power Controller (SPC)"
+ select PM_OPP
+ help
+ The TC2 (A15x2 A7x3) versatile express core tile integrates a logic
+ block called Serial Power Controller (SPC) that provides the interface
+ between the dual cluster test-chip and the M3 microcontroller that
+ carries out power management.
+
+config ARCH_VEXPRESS_TC2_PM
+ bool "Versatile Express TC2 power management"
+ depends on MCPM
+ select ARM_CCI400_PORT_CTRL
+ select ARCH_VEXPRESS_SPC
+ select ARM_CPU_SUSPEND
+ help
+ Support for CPU and cluster power management on Versatile Express
+ with a TC2 (A15x2 A7x3) big.LITTLE core tile.
+
+endif
diff --git a/arch/arm/mach-vexpress/Makefile b/arch/arm/mach-vexpress/Makefile
new file mode 100644
index 000000000..f5c1006dd
--- /dev/null
+++ b/arch/arm/mach-vexpress/Makefile
@@ -0,0 +1,17 @@
+#
+# Makefile for the linux kernel.
+#
+ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := \
+ -I$(srctree)/arch/arm/plat-versatile/include
+
+obj-y := v2m.o
+obj-$(CONFIG_ARCH_VEXPRESS_DCSCB) += dcscb.o dcscb_setup.o
+CFLAGS_dcscb.o += -march=armv7-a
+CFLAGS_REMOVE_dcscb.o = -pg
+obj-$(CONFIG_ARCH_VEXPRESS_SPC) += spc.o
+CFLAGS_REMOVE_spc.o = -pg
+obj-$(CONFIG_ARCH_VEXPRESS_TC2_PM) += tc2_pm.o
+CFLAGS_tc2_pm.o += -march=armv7-a
+CFLAGS_REMOVE_tc2_pm.o = -pg
+obj-$(CONFIG_SMP) += platsmp.o
+obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
diff --git a/arch/arm/mach-vexpress/core.h b/arch/arm/mach-vexpress/core.h
new file mode 100644
index 000000000..2a11d3ac8
--- /dev/null
+++ b/arch/arm/mach-vexpress/core.h
@@ -0,0 +1,5 @@
+bool vexpress_smp_init_ops(void);
+
+extern struct smp_operations vexpress_smp_dt_ops;
+
+extern void vexpress_cpu_die(unsigned int cpu);
diff --git a/arch/arm/mach-vexpress/dcscb.c b/arch/arm/mach-vexpress/dcscb.c
new file mode 100644
index 000000000..5cedcf572
--- /dev/null
+++ b/arch/arm/mach-vexpress/dcscb.c
@@ -0,0 +1,174 @@
+/*
+ * arch/arm/mach-vexpress/dcscb.c - Dual Cluster System Configuration Block
+ *
+ * Created by: Nicolas Pitre, May 2012
+ * Copyright: (C) 2012-2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/of_address.h>
+#include <linux/vexpress.h>
+#include <linux/arm-cci.h>
+
+#include <asm/mcpm.h>
+#include <asm/proc-fns.h>
+#include <asm/cacheflush.h>
+#include <asm/cputype.h>
+#include <asm/cp15.h>
+
+
+#define RST_HOLD0 0x0
+#define RST_HOLD1 0x4
+#define SYS_SWRESET 0x8
+#define RST_STAT0 0xc
+#define RST_STAT1 0x10
+#define EAG_CFG_R 0x20
+#define EAG_CFG_W 0x24
+#define KFC_CFG_R 0x28
+#define KFC_CFG_W 0x2c
+#define DCS_CFG_R 0x30
+
+static void __iomem *dcscb_base;
+static int dcscb_allcpus_mask[2];
+
+static int dcscb_cpu_powerup(unsigned int cpu, unsigned int cluster)
+{
+ unsigned int rst_hold, cpumask = (1 << cpu);
+
+ pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
+ if (cluster >= 2 || !(cpumask & dcscb_allcpus_mask[cluster]))
+ return -EINVAL;
+
+ rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
+ rst_hold &= ~(cpumask | (cpumask << 4));
+ writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
+ return 0;
+}
+
+static int dcscb_cluster_powerup(unsigned int cluster)
+{
+ unsigned int rst_hold;
+
+ pr_debug("%s: cluster %u\n", __func__, cluster);
+ if (cluster >= 2)
+ return -EINVAL;
+
+ /* remove cluster reset and add individual CPU's reset */
+ rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
+ rst_hold &= ~(1 << 8);
+ rst_hold |= dcscb_allcpus_mask[cluster];
+ writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
+ return 0;
+}
+
+static void dcscb_cpu_powerdown_prepare(unsigned int cpu, unsigned int cluster)
+{
+ unsigned int rst_hold;
+
+ pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
+ BUG_ON(cluster >= 2 || !((1 << cpu) & dcscb_allcpus_mask[cluster]));
+
+ rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
+ rst_hold |= (1 << cpu);
+ writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
+}
+
+static void dcscb_cluster_powerdown_prepare(unsigned int cluster)
+{
+ unsigned int rst_hold;
+
+ pr_debug("%s: cluster %u\n", __func__, cluster);
+ BUG_ON(cluster >= 2);
+
+ rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
+ rst_hold |= (1 << 8);
+ writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
+}
+
+static void dcscb_cpu_cache_disable(void)
+{
+ /* Disable and flush the local CPU cache. */
+ v7_exit_coherency_flush(louis);
+}
+
+static void dcscb_cluster_cache_disable(void)
+{
+ /* Flush all cache levels for this cluster. */
+ v7_exit_coherency_flush(all);
+
+ /*
+ * A full outer cache flush could be needed at this point
+ * on platforms with such a cache, depending on where the
+ * outer cache sits. In some cases the notion of a "last
+ * cluster standing" would need to be implemented if the
+ * outer cache is shared across clusters. In any case, when
+ * the outer cache needs flushing, there is no concurrent
+ * access to the cache controller to worry about and no
+ * special locking besides what is already provided by the
+ * MCPM state machinery is needed.
+ */
+
+ /*
+ * Disable cluster-level coherency by masking
+ * incoming snoops and DVM messages:
+ */
+ cci_disable_port_by_cpu(read_cpuid_mpidr());
+}
+
+static const struct mcpm_platform_ops dcscb_power_ops = {
+ .cpu_powerup = dcscb_cpu_powerup,
+ .cluster_powerup = dcscb_cluster_powerup,
+ .cpu_powerdown_prepare = dcscb_cpu_powerdown_prepare,
+ .cluster_powerdown_prepare = dcscb_cluster_powerdown_prepare,
+ .cpu_cache_disable = dcscb_cpu_cache_disable,
+ .cluster_cache_disable = dcscb_cluster_cache_disable,
+};
+
+extern void dcscb_power_up_setup(unsigned int affinity_level);
+
+static int __init dcscb_init(void)
+{
+ struct device_node *node;
+ unsigned int cfg;
+ int ret;
+
+ if (!cci_probed())
+ return -ENODEV;
+
+ node = of_find_compatible_node(NULL, NULL, "arm,rtsm,dcscb");
+ if (!node)
+ return -ENODEV;
+ dcscb_base = of_iomap(node, 0);
+ if (!dcscb_base)
+ return -EADDRNOTAVAIL;
+ cfg = readl_relaxed(dcscb_base + DCS_CFG_R);
+ dcscb_allcpus_mask[0] = (1 << (((cfg >> 16) >> (0 << 2)) & 0xf)) - 1;
+ dcscb_allcpus_mask[1] = (1 << (((cfg >> 16) >> (1 << 2)) & 0xf)) - 1;
+
+ ret = mcpm_platform_register(&dcscb_power_ops);
+ if (!ret)
+ ret = mcpm_sync_init(dcscb_power_up_setup);
+ if (ret) {
+ iounmap(dcscb_base);
+ return ret;
+ }
+
+ pr_info("VExpress DCSCB support installed\n");
+
+ /*
+ * Future entries into the kernel can now go
+ * through the cluster entry vectors.
+ */
+ vexpress_flags_set(virt_to_phys(mcpm_entry_point));
+
+ return 0;
+}
+
+early_initcall(dcscb_init);
diff --git a/arch/arm/mach-vexpress/dcscb_setup.S b/arch/arm/mach-vexpress/dcscb_setup.S
new file mode 100644
index 000000000..4bb7fbe0f
--- /dev/null
+++ b/arch/arm/mach-vexpress/dcscb_setup.S
@@ -0,0 +1,38 @@
+/*
+ * arch/arm/include/asm/dcscb_setup.S
+ *
+ * Created by: Dave Martin, 2012-06-22
+ * Copyright: (C) 2012-2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+
+
+ENTRY(dcscb_power_up_setup)
+
+ cmp r0, #0 @ check affinity level
+ beq 2f
+
+/*
+ * Enable cluster-level coherency, in preparation for turning on the MMU.
+ * The ACTLR SMP bit does not need to be set here, because cpu_resume()
+ * already restores that.
+ *
+ * A15/A7 may not require explicit L2 invalidation on reset, dependent
+ * on hardware integration decisions.
+ * For now, this code assumes that L2 is either already invalidated,
+ * or invalidation is not required.
+ */
+
+ b cci_enable_port_for_self
+
+2: @ Implementation-specific local CPU setup operations should go here,
+ @ if any. In this case, there is nothing to do.
+
+ bx lr
+
+ENDPROC(dcscb_power_up_setup)
diff --git a/arch/arm/mach-vexpress/hotplug.c b/arch/arm/mach-vexpress/hotplug.c
new file mode 100644
index 000000000..f0ce6b8f5
--- /dev/null
+++ b/arch/arm/mach-vexpress/hotplug.c
@@ -0,0 +1,106 @@
+/*
+ * linux/arch/arm/mach-realview/hotplug.c
+ *
+ * Copyright (C) 2002 ARM Ltd.
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/smp.h>
+
+#include <asm/smp_plat.h>
+#include <asm/cp15.h>
+
+static inline void cpu_enter_lowpower(void)
+{
+ unsigned int v;
+
+ asm volatile(
+ "mcr p15, 0, %1, c7, c5, 0\n"
+ " mcr p15, 0, %1, c7, c10, 4\n"
+ /*
+ * Turn off coherency
+ */
+ " mrc p15, 0, %0, c1, c0, 1\n"
+ " bic %0, %0, %3\n"
+ " mcr p15, 0, %0, c1, c0, 1\n"
+ " mrc p15, 0, %0, c1, c0, 0\n"
+ " bic %0, %0, %2\n"
+ " mcr p15, 0, %0, c1, c0, 0\n"
+ : "=&r" (v)
+ : "r" (0), "Ir" (CR_C), "Ir" (0x40)
+ : "cc");
+}
+
+static inline void cpu_leave_lowpower(void)
+{
+ unsigned int v;
+
+ asm volatile(
+ "mrc p15, 0, %0, c1, c0, 0\n"
+ " orr %0, %0, %1\n"
+ " mcr p15, 0, %0, c1, c0, 0\n"
+ " mrc p15, 0, %0, c1, c0, 1\n"
+ " orr %0, %0, %2\n"
+ " mcr p15, 0, %0, c1, c0, 1\n"
+ : "=&r" (v)
+ : "Ir" (CR_C), "Ir" (0x40)
+ : "cc");
+}
+
+static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
+{
+ /*
+ * there is no power-control hardware on this platform, so all
+ * we can do is put the core into WFI; this is safe as the calling
+ * code will have already disabled interrupts
+ */
+ for (;;) {
+ wfi();
+
+ if (pen_release == cpu_logical_map(cpu)) {
+ /*
+ * OK, proper wakeup, we're done
+ */
+ break;
+ }
+
+ /*
+ * Getting here, means that we have come out of WFI without
+ * having been woken up - this shouldn't happen
+ *
+ * Just note it happening - when we're woken, we can report
+ * its occurrence.
+ */
+ (*spurious)++;
+ }
+}
+
+/*
+ * platform-specific code to shutdown a CPU
+ *
+ * Called with IRQs disabled
+ */
+void __ref vexpress_cpu_die(unsigned int cpu)
+{
+ int spurious = 0;
+
+ /*
+ * we're ready for shutdown now, so do it
+ */
+ cpu_enter_lowpower();
+ platform_do_lowpower(cpu, &spurious);
+
+ /*
+ * bring this CPU back into the world of cache
+ * coherency, and then restore interrupts
+ */
+ cpu_leave_lowpower();
+
+ if (spurious)
+ pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
+}
diff --git a/arch/arm/mach-vexpress/platsmp.c b/arch/arm/mach-vexpress/platsmp.c
new file mode 100644
index 000000000..83188cf18
--- /dev/null
+++ b/arch/arm/mach-vexpress/platsmp.c
@@ -0,0 +1,74 @@
+/*
+ * linux/arch/arm/mach-vexpress/platsmp.c
+ *
+ * Copyright (C) 2002 ARM Ltd.
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/smp.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/vexpress.h>
+
+#include <asm/mcpm.h>
+#include <asm/smp_scu.h>
+#include <asm/mach/map.h>
+
+#include <plat/platsmp.h>
+
+#include "core.h"
+
+bool __init vexpress_smp_init_ops(void)
+{
+#ifdef CONFIG_MCPM
+ /*
+ * The best way to detect a multi-cluster configuration at the moment
+ * is to look for the presence of a CCI in the system.
+ * Override the default vexpress_smp_ops if so.
+ */
+ struct device_node *node;
+ node = of_find_compatible_node(NULL, NULL, "arm,cci-400");
+ if (node && of_device_is_available(node)) {
+ mcpm_smp_set_ops();
+ return true;
+ }
+#endif
+ return false;
+}
+
+static const struct of_device_id vexpress_smp_dt_scu_match[] __initconst = {
+ { .compatible = "arm,cortex-a5-scu", },
+ { .compatible = "arm,cortex-a9-scu", },
+ {}
+};
+
+static void __init vexpress_smp_dt_prepare_cpus(unsigned int max_cpus)
+{
+ struct device_node *scu = of_find_matching_node(NULL,
+ vexpress_smp_dt_scu_match);
+
+ if (scu)
+ scu_enable(of_iomap(scu, 0));
+
+ /*
+ * Write the address of secondary startup into the
+ * system-wide flags register. The boot monitor waits
+ * until it receives a soft interrupt, and then the
+ * secondary CPU branches to this address.
+ */
+ vexpress_flags_set(virt_to_phys(versatile_secondary_startup));
+}
+
+struct smp_operations __initdata vexpress_smp_dt_ops = {
+ .smp_prepare_cpus = vexpress_smp_dt_prepare_cpus,
+ .smp_secondary_init = versatile_secondary_init,
+ .smp_boot_secondary = versatile_boot_secondary,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_die = vexpress_cpu_die,
+#endif
+};
diff --git a/arch/arm/mach-vexpress/spc.c b/arch/arm/mach-vexpress/spc.c
new file mode 100644
index 000000000..f61158c6c
--- /dev/null
+++ b/arch/arm/mach-vexpress/spc.c
@@ -0,0 +1,592 @@
+/*
+ * Versatile Express Serial Power Controller (SPC) support
+ *
+ * Copyright (C) 2013 ARM Ltd.
+ *
+ * Authors: Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
+ * Achin Gupta <achin.gupta@arm.com>
+ * Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/cpu.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
+#include <linux/semaphore.h>
+
+#include <asm/cacheflush.h>
+
+#define SPCLOG "vexpress-spc: "
+
+#define PERF_LVL_A15 0x00
+#define PERF_REQ_A15 0x04
+#define PERF_LVL_A7 0x08
+#define PERF_REQ_A7 0x0c
+#define COMMS 0x10
+#define COMMS_REQ 0x14
+#define PWC_STATUS 0x18
+#define PWC_FLAG 0x1c
+
+/* SPC wake-up IRQs status and mask */
+#define WAKE_INT_MASK 0x24
+#define WAKE_INT_RAW 0x28
+#define WAKE_INT_STAT 0x2c
+/* SPC power down registers */
+#define A15_PWRDN_EN 0x30
+#define A7_PWRDN_EN 0x34
+/* SPC per-CPU mailboxes */
+#define A15_BX_ADDR0 0x68
+#define A7_BX_ADDR0 0x78
+
+/* SPC CPU/cluster reset statue */
+#define STANDBYWFI_STAT 0x3c
+#define STANDBYWFI_STAT_A15_CPU_MASK(cpu) (1 << (cpu))
+#define STANDBYWFI_STAT_A7_CPU_MASK(cpu) (1 << (3 + (cpu)))
+
+/* SPC system config interface registers */
+#define SYSCFG_WDATA 0x70
+#define SYSCFG_RDATA 0x74
+
+/* A15/A7 OPP virtual register base */
+#define A15_PERFVAL_BASE 0xC10
+#define A7_PERFVAL_BASE 0xC30
+
+/* Config interface control bits */
+#define SYSCFG_START (1 << 31)
+#define SYSCFG_SCC (6 << 20)
+#define SYSCFG_STAT (14 << 20)
+
+/* wake-up interrupt masks */
+#define GBL_WAKEUP_INT_MSK (0x3 << 10)
+
+/* TC2 static dual-cluster configuration */
+#define MAX_CLUSTERS 2
+
+/*
+ * Even though the SPC takes max 3-5 ms to complete any OPP/COMMS
+ * operation, the operation could start just before jiffie is about
+ * to be incremented. So setting timeout value of 20ms = 2jiffies@100Hz
+ */
+#define TIMEOUT_US 20000
+
+#define MAX_OPPS 8
+#define CA15_DVFS 0
+#define CA7_DVFS 1
+#define SPC_SYS_CFG 2
+#define STAT_COMPLETE(type) ((1 << 0) << (type << 2))
+#define STAT_ERR(type) ((1 << 1) << (type << 2))
+#define RESPONSE_MASK(type) (STAT_COMPLETE(type) | STAT_ERR(type))
+
+struct ve_spc_opp {
+ unsigned long freq;
+ unsigned long u_volt;
+};
+
+struct ve_spc_drvdata {
+ void __iomem *baseaddr;
+ /*
+ * A15s cluster identifier
+ * It corresponds to A15 processors MPIDR[15:8] bitfield
+ */
+ u32 a15_clusid;
+ uint32_t cur_rsp_mask;
+ uint32_t cur_rsp_stat;
+ struct semaphore sem;
+ struct completion done;
+ struct ve_spc_opp *opps[MAX_CLUSTERS];
+ int num_opps[MAX_CLUSTERS];
+};
+
+static struct ve_spc_drvdata *info;
+
+static inline bool cluster_is_a15(u32 cluster)
+{
+ return cluster == info->a15_clusid;
+}
+
+/**
+ * ve_spc_global_wakeup_irq()
+ *
+ * Function to set/clear global wakeup IRQs. Not protected by locking since
+ * it might be used in code paths where normal cacheable locks are not
+ * working. Locking must be provided by the caller to ensure atomicity.
+ *
+ * @set: if true, global wake-up IRQs are set, if false they are cleared
+ */
+void ve_spc_global_wakeup_irq(bool set)
+{
+ u32 reg;
+
+ reg = readl_relaxed(info->baseaddr + WAKE_INT_MASK);
+
+ if (set)
+ reg |= GBL_WAKEUP_INT_MSK;
+ else
+ reg &= ~GBL_WAKEUP_INT_MSK;
+
+ writel_relaxed(reg, info->baseaddr + WAKE_INT_MASK);
+}
+
+/**
+ * ve_spc_cpu_wakeup_irq()
+ *
+ * Function to set/clear per-CPU wake-up IRQs. Not protected by locking since
+ * it might be used in code paths where normal cacheable locks are not
+ * working. Locking must be provided by the caller to ensure atomicity.
+ *
+ * @cluster: mpidr[15:8] bitfield describing cluster affinity level
+ * @cpu: mpidr[7:0] bitfield describing cpu affinity level
+ * @set: if true, wake-up IRQs are set, if false they are cleared
+ */
+void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set)
+{
+ u32 mask, reg;
+
+ if (cluster >= MAX_CLUSTERS)
+ return;
+
+ mask = 1 << cpu;
+
+ if (!cluster_is_a15(cluster))
+ mask <<= 4;
+
+ reg = readl_relaxed(info->baseaddr + WAKE_INT_MASK);
+
+ if (set)
+ reg |= mask;
+ else
+ reg &= ~mask;
+
+ writel_relaxed(reg, info->baseaddr + WAKE_INT_MASK);
+}
+
+/**
+ * ve_spc_set_resume_addr() - set the jump address used for warm boot
+ *
+ * @cluster: mpidr[15:8] bitfield describing cluster affinity level
+ * @cpu: mpidr[7:0] bitfield describing cpu affinity level
+ * @addr: physical resume address
+ */
+void ve_spc_set_resume_addr(u32 cluster, u32 cpu, u32 addr)
+{
+ void __iomem *baseaddr;
+
+ if (cluster >= MAX_CLUSTERS)
+ return;
+
+ if (cluster_is_a15(cluster))
+ baseaddr = info->baseaddr + A15_BX_ADDR0 + (cpu << 2);
+ else
+ baseaddr = info->baseaddr + A7_BX_ADDR0 + (cpu << 2);
+
+ writel_relaxed(addr, baseaddr);
+}
+
+/**
+ * ve_spc_powerdown()
+ *
+ * Function to enable/disable cluster powerdown. Not protected by locking
+ * since it might be used in code paths where normal cacheable locks are not
+ * working. Locking must be provided by the caller to ensure atomicity.
+ *
+ * @cluster: mpidr[15:8] bitfield describing cluster affinity level
+ * @enable: if true enables powerdown, if false disables it
+ */
+void ve_spc_powerdown(u32 cluster, bool enable)
+{
+ u32 pwdrn_reg;
+
+ if (cluster >= MAX_CLUSTERS)
+ return;
+
+ pwdrn_reg = cluster_is_a15(cluster) ? A15_PWRDN_EN : A7_PWRDN_EN;
+ writel_relaxed(enable, info->baseaddr + pwdrn_reg);
+}
+
+static u32 standbywfi_cpu_mask(u32 cpu, u32 cluster)
+{
+ return cluster_is_a15(cluster) ?
+ STANDBYWFI_STAT_A15_CPU_MASK(cpu)
+ : STANDBYWFI_STAT_A7_CPU_MASK(cpu);
+}
+
+/**
+ * ve_spc_cpu_in_wfi(u32 cpu, u32 cluster)
+ *
+ * @cpu: mpidr[7:0] bitfield describing CPU affinity level within cluster
+ * @cluster: mpidr[15:8] bitfield describing cluster affinity level
+ *
+ * @return: non-zero if and only if the specified CPU is in WFI
+ *
+ * Take care when interpreting the result of this function: a CPU might
+ * be in WFI temporarily due to idle, and is not necessarily safely
+ * parked.
+ */
+int ve_spc_cpu_in_wfi(u32 cpu, u32 cluster)
+{
+ int ret;
+ u32 mask = standbywfi_cpu_mask(cpu, cluster);
+
+ if (cluster >= MAX_CLUSTERS)
+ return 1;
+
+ ret = readl_relaxed(info->baseaddr + STANDBYWFI_STAT);
+
+ pr_debug("%s: PCFGREG[0x%X] = 0x%08X, mask = 0x%X\n",
+ __func__, STANDBYWFI_STAT, ret, mask);
+
+ return ret & mask;
+}
+
+static int ve_spc_get_performance(int cluster, u32 *freq)
+{
+ struct ve_spc_opp *opps = info->opps[cluster];
+ u32 perf_cfg_reg = 0;
+ u32 perf;
+
+ perf_cfg_reg = cluster_is_a15(cluster) ? PERF_LVL_A15 : PERF_LVL_A7;
+
+ perf = readl_relaxed(info->baseaddr + perf_cfg_reg);
+ if (perf >= info->num_opps[cluster])
+ return -EINVAL;
+
+ opps += perf;
+ *freq = opps->freq;
+
+ return 0;
+}
+
+/* find closest match to given frequency in OPP table */
+static int ve_spc_round_performance(int cluster, u32 freq)
+{
+ int idx, max_opp = info->num_opps[cluster];
+ struct ve_spc_opp *opps = info->opps[cluster];
+ u32 fmin = 0, fmax = ~0, ftmp;
+
+ freq /= 1000; /* OPP entries in kHz */
+ for (idx = 0; idx < max_opp; idx++, opps++) {
+ ftmp = opps->freq;
+ if (ftmp >= freq) {
+ if (ftmp <= fmax)
+ fmax = ftmp;
+ } else {
+ if (ftmp >= fmin)
+ fmin = ftmp;
+ }
+ }
+ if (fmax != ~0)
+ return fmax * 1000;
+ else
+ return fmin * 1000;
+}
+
+static int ve_spc_find_performance_index(int cluster, u32 freq)
+{
+ int idx, max_opp = info->num_opps[cluster];
+ struct ve_spc_opp *opps = info->opps[cluster];
+
+ for (idx = 0; idx < max_opp; idx++, opps++)
+ if (opps->freq == freq)
+ break;
+ return (idx == max_opp) ? -EINVAL : idx;
+}
+
+static int ve_spc_waitforcompletion(int req_type)
+{
+ int ret = wait_for_completion_interruptible_timeout(
+ &info->done, usecs_to_jiffies(TIMEOUT_US));
+ if (ret == 0)
+ ret = -ETIMEDOUT;
+ else if (ret > 0)
+ ret = info->cur_rsp_stat & STAT_COMPLETE(req_type) ? 0 : -EIO;
+ return ret;
+}
+
+static int ve_spc_set_performance(int cluster, u32 freq)
+{
+ u32 perf_cfg_reg, perf_stat_reg;
+ int ret, perf, req_type;
+
+ if (cluster_is_a15(cluster)) {
+ req_type = CA15_DVFS;
+ perf_cfg_reg = PERF_LVL_A15;
+ perf_stat_reg = PERF_REQ_A15;
+ } else {
+ req_type = CA7_DVFS;
+ perf_cfg_reg = PERF_LVL_A7;
+ perf_stat_reg = PERF_REQ_A7;
+ }
+
+ perf = ve_spc_find_performance_index(cluster, freq);
+
+ if (perf < 0)
+ return perf;
+
+ if (down_timeout(&info->sem, usecs_to_jiffies(TIMEOUT_US)))
+ return -ETIME;
+
+ init_completion(&info->done);
+ info->cur_rsp_mask = RESPONSE_MASK(req_type);
+
+ writel(perf, info->baseaddr + perf_cfg_reg);
+ ret = ve_spc_waitforcompletion(req_type);
+
+ info->cur_rsp_mask = 0;
+ up(&info->sem);
+
+ return ret;
+}
+
+static int ve_spc_read_sys_cfg(int func, int offset, uint32_t *data)
+{
+ int ret;
+
+ if (down_timeout(&info->sem, usecs_to_jiffies(TIMEOUT_US)))
+ return -ETIME;
+
+ init_completion(&info->done);
+ info->cur_rsp_mask = RESPONSE_MASK(SPC_SYS_CFG);
+
+ /* Set the control value */
+ writel(SYSCFG_START | func | offset >> 2, info->baseaddr + COMMS);
+ ret = ve_spc_waitforcompletion(SPC_SYS_CFG);
+
+ if (ret == 0)
+ *data = readl(info->baseaddr + SYSCFG_RDATA);
+
+ info->cur_rsp_mask = 0;
+ up(&info->sem);
+
+ return ret;
+}
+
+static irqreturn_t ve_spc_irq_handler(int irq, void *data)
+{
+ struct ve_spc_drvdata *drv_data = data;
+ uint32_t status = readl_relaxed(drv_data->baseaddr + PWC_STATUS);
+
+ if (info->cur_rsp_mask & status) {
+ info->cur_rsp_stat = status;
+ complete(&drv_data->done);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * +--------------------------+
+ * | 31 20 | 19 0 |
+ * +--------------------------+
+ * | m_volt | freq(kHz) |
+ * +--------------------------+
+ */
+#define MULT_FACTOR 20
+#define VOLT_SHIFT 20
+#define FREQ_MASK (0xFFFFF)
+static int ve_spc_populate_opps(uint32_t cluster)
+{
+ uint32_t data = 0, off, ret, idx;
+ struct ve_spc_opp *opps;
+
+ opps = kzalloc(sizeof(*opps) * MAX_OPPS, GFP_KERNEL);
+ if (!opps)
+ return -ENOMEM;
+
+ info->opps[cluster] = opps;
+
+ off = cluster_is_a15(cluster) ? A15_PERFVAL_BASE : A7_PERFVAL_BASE;
+ for (idx = 0; idx < MAX_OPPS; idx++, off += 4, opps++) {
+ ret = ve_spc_read_sys_cfg(SYSCFG_SCC, off, &data);
+ if (!ret) {
+ opps->freq = (data & FREQ_MASK) * MULT_FACTOR;
+ opps->u_volt = (data >> VOLT_SHIFT) * 1000;
+ } else {
+ break;
+ }
+ }
+ info->num_opps[cluster] = idx;
+
+ return ret;
+}
+
+static int ve_init_opp_table(struct device *cpu_dev)
+{
+ int cluster;
+ int idx, ret = 0, max_opp;
+ struct ve_spc_opp *opps;
+
+ cluster = topology_physical_package_id(cpu_dev->id);
+ cluster = cluster < 0 ? 0 : cluster;
+
+ max_opp = info->num_opps[cluster];
+ opps = info->opps[cluster];
+
+ for (idx = 0; idx < max_opp; idx++, opps++) {
+ ret = dev_pm_opp_add(cpu_dev, opps->freq * 1000, opps->u_volt);
+ if (ret) {
+ dev_warn(cpu_dev, "failed to add opp %lu %lu\n",
+ opps->freq, opps->u_volt);
+ return ret;
+ }
+ }
+ return ret;
+}
+
+int __init ve_spc_init(void __iomem *baseaddr, u32 a15_clusid, int irq)
+{
+ int ret;
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ pr_err(SPCLOG "unable to allocate mem\n");
+ return -ENOMEM;
+ }
+
+ info->baseaddr = baseaddr;
+ info->a15_clusid = a15_clusid;
+
+ if (irq <= 0) {
+ pr_err(SPCLOG "Invalid IRQ %d\n", irq);
+ kfree(info);
+ return -EINVAL;
+ }
+
+ init_completion(&info->done);
+
+ readl_relaxed(info->baseaddr + PWC_STATUS);
+
+ ret = request_irq(irq, ve_spc_irq_handler, IRQF_TRIGGER_HIGH
+ | IRQF_ONESHOT, "vexpress-spc", info);
+ if (ret) {
+ pr_err(SPCLOG "IRQ %d request failed\n", irq);
+ kfree(info);
+ return -ENODEV;
+ }
+
+ sema_init(&info->sem, 1);
+ /*
+ * Multi-cluster systems may need this data when non-coherent, during
+ * cluster power-up/power-down. Make sure driver info reaches main
+ * memory.
+ */
+ sync_cache_w(info);
+ sync_cache_w(&info);
+
+ return 0;
+}
+
+struct clk_spc {
+ struct clk_hw hw;
+ int cluster;
+};
+
+#define to_clk_spc(spc) container_of(spc, struct clk_spc, hw)
+static unsigned long spc_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_spc *spc = to_clk_spc(hw);
+ u32 freq;
+
+ if (ve_spc_get_performance(spc->cluster, &freq))
+ return -EIO;
+
+ return freq * 1000;
+}
+
+static long spc_round_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long *parent_rate)
+{
+ struct clk_spc *spc = to_clk_spc(hw);
+
+ return ve_spc_round_performance(spc->cluster, drate);
+}
+
+static int spc_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_spc *spc = to_clk_spc(hw);
+
+ return ve_spc_set_performance(spc->cluster, rate / 1000);
+}
+
+static struct clk_ops clk_spc_ops = {
+ .recalc_rate = spc_recalc_rate,
+ .round_rate = spc_round_rate,
+ .set_rate = spc_set_rate,
+};
+
+static struct clk *ve_spc_clk_register(struct device *cpu_dev)
+{
+ struct clk_init_data init;
+ struct clk_spc *spc;
+
+ spc = kzalloc(sizeof(*spc), GFP_KERNEL);
+ if (!spc) {
+ pr_err("could not allocate spc clk\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ spc->hw.init = &init;
+ spc->cluster = topology_physical_package_id(cpu_dev->id);
+
+ spc->cluster = spc->cluster < 0 ? 0 : spc->cluster;
+
+ init.name = dev_name(cpu_dev);
+ init.ops = &clk_spc_ops;
+ init.flags = CLK_IS_ROOT | CLK_GET_RATE_NOCACHE;
+ init.num_parents = 0;
+
+ return devm_clk_register(cpu_dev, &spc->hw);
+}
+
+static int __init ve_spc_clk_init(void)
+{
+ int cpu;
+ struct clk *clk;
+
+ if (!info)
+ return 0; /* Continue only if SPC is initialised */
+
+ if (ve_spc_populate_opps(0) || ve_spc_populate_opps(1)) {
+ pr_err("failed to build OPP table\n");
+ return -ENODEV;
+ }
+
+ for_each_possible_cpu(cpu) {
+ struct device *cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev) {
+ pr_warn("failed to get cpu%d device\n", cpu);
+ continue;
+ }
+ clk = ve_spc_clk_register(cpu_dev);
+ if (IS_ERR(clk)) {
+ pr_warn("failed to register cpu%d clock\n", cpu);
+ continue;
+ }
+ if (clk_register_clkdev(clk, NULL, dev_name(cpu_dev))) {
+ pr_warn("failed to register cpu%d clock lookup\n", cpu);
+ continue;
+ }
+
+ if (ve_init_opp_table(cpu_dev))
+ pr_warn("failed to initialise cpu%d opp table\n", cpu);
+ }
+
+ platform_device_register_simple("vexpress-spc-cpufreq", -1, NULL, 0);
+ return 0;
+}
+module_init(ve_spc_clk_init);
diff --git a/arch/arm/mach-vexpress/spc.h b/arch/arm/mach-vexpress/spc.h
new file mode 100644
index 000000000..793d06524
--- /dev/null
+++ b/arch/arm/mach-vexpress/spc.h
@@ -0,0 +1,25 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2012 ARM Limited
+ */
+
+
+#ifndef __SPC_H_
+#define __SPC_H_
+
+int __init ve_spc_init(void __iomem *base, u32 a15_clusid, int irq);
+void ve_spc_global_wakeup_irq(bool set);
+void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set);
+void ve_spc_set_resume_addr(u32 cluster, u32 cpu, u32 addr);
+void ve_spc_powerdown(u32 cluster, bool enable);
+int ve_spc_cpu_in_wfi(u32 cpu, u32 cluster);
+
+#endif
diff --git a/arch/arm/mach-vexpress/tc2_pm.c b/arch/arm/mach-vexpress/tc2_pm.c
new file mode 100644
index 000000000..b3328cd46
--- /dev/null
+++ b/arch/arm/mach-vexpress/tc2_pm.c
@@ -0,0 +1,266 @@
+/*
+ * arch/arm/mach-vexpress/tc2_pm.c - TC2 power management support
+ *
+ * Created by: Nicolas Pitre, October 2012
+ * Copyright: (C) 2012-2013 Linaro Limited
+ *
+ * Some portions of this file were originally written by Achin Gupta
+ * Copyright: (C) 2012 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/errno.h>
+#include <linux/irqchip/arm-gic.h>
+
+#include <asm/mcpm.h>
+#include <asm/proc-fns.h>
+#include <asm/cacheflush.h>
+#include <asm/cputype.h>
+#include <asm/cp15.h>
+
+#include <linux/arm-cci.h>
+
+#include "spc.h"
+
+/* SCC conf registers */
+#define RESET_CTRL 0x018
+#define RESET_A15_NCORERESET(cpu) (1 << (2 + (cpu)))
+#define RESET_A7_NCORERESET(cpu) (1 << (16 + (cpu)))
+
+#define A15_CONF 0x400
+#define A7_CONF 0x500
+#define SYS_INFO 0x700
+#define SPC_BASE 0xb00
+
+static void __iomem *scc;
+
+#define TC2_CLUSTERS 2
+#define TC2_MAX_CPUS_PER_CLUSTER 3
+
+static unsigned int tc2_nr_cpus[TC2_CLUSTERS];
+
+static int tc2_pm_cpu_powerup(unsigned int cpu, unsigned int cluster)
+{
+ pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
+ if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster])
+ return -EINVAL;
+ ve_spc_set_resume_addr(cluster, cpu,
+ virt_to_phys(mcpm_entry_point));
+ ve_spc_cpu_wakeup_irq(cluster, cpu, true);
+ return 0;
+}
+
+static int tc2_pm_cluster_powerup(unsigned int cluster)
+{
+ pr_debug("%s: cluster %u\n", __func__, cluster);
+ if (cluster >= TC2_CLUSTERS)
+ return -EINVAL;
+ ve_spc_powerdown(cluster, false);
+ return 0;
+}
+
+static void tc2_pm_cpu_powerdown_prepare(unsigned int cpu, unsigned int cluster)
+{
+ pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
+ BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER);
+ ve_spc_cpu_wakeup_irq(cluster, cpu, true);
+ /*
+ * If the CPU is committed to power down, make sure
+ * the power controller will be in charge of waking it
+ * up upon IRQ, ie IRQ lines are cut from GIC CPU IF
+ * to the CPU by disabling the GIC CPU IF to prevent wfi
+ * from completing execution behind power controller back
+ */
+ gic_cpu_if_down();
+}
+
+static void tc2_pm_cluster_powerdown_prepare(unsigned int cluster)
+{
+ pr_debug("%s: cluster %u\n", __func__, cluster);
+ BUG_ON(cluster >= TC2_CLUSTERS);
+ ve_spc_powerdown(cluster, true);
+ ve_spc_global_wakeup_irq(true);
+}
+
+static void tc2_pm_cpu_cache_disable(void)
+{
+ v7_exit_coherency_flush(louis);
+}
+
+static void tc2_pm_cluster_cache_disable(void)
+{
+ if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
+ /*
+ * On the Cortex-A15 we need to disable
+ * L2 prefetching before flushing the cache.
+ */
+ asm volatile(
+ "mcr p15, 1, %0, c15, c0, 3 \n\t"
+ "isb \n\t"
+ "dsb "
+ : : "r" (0x400) );
+ }
+
+ v7_exit_coherency_flush(all);
+ cci_disable_port_by_cpu(read_cpuid_mpidr());
+}
+
+static int tc2_core_in_reset(unsigned int cpu, unsigned int cluster)
+{
+ u32 mask = cluster ?
+ RESET_A7_NCORERESET(cpu)
+ : RESET_A15_NCORERESET(cpu);
+
+ return !(readl_relaxed(scc + RESET_CTRL) & mask);
+}
+
+#define POLL_MSEC 10
+#define TIMEOUT_MSEC 1000
+
+static int tc2_pm_wait_for_powerdown(unsigned int cpu, unsigned int cluster)
+{
+ unsigned tries;
+
+ pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
+ BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER);
+
+ for (tries = 0; tries < TIMEOUT_MSEC / POLL_MSEC; ++tries) {
+ pr_debug("%s(cpu=%u, cluster=%u): RESET_CTRL = 0x%08X\n",
+ __func__, cpu, cluster,
+ readl_relaxed(scc + RESET_CTRL));
+
+ /*
+ * We need the CPU to reach WFI, but the power
+ * controller may put the cluster in reset and
+ * power it off as soon as that happens, before
+ * we have a chance to see STANDBYWFI.
+ *
+ * So we need to check for both conditions:
+ */
+ if (tc2_core_in_reset(cpu, cluster) ||
+ ve_spc_cpu_in_wfi(cpu, cluster))
+ return 0; /* success: the CPU is halted */
+
+ /* Otherwise, wait and retry: */
+ msleep(POLL_MSEC);
+ }
+
+ return -ETIMEDOUT; /* timeout */
+}
+
+static void tc2_pm_cpu_suspend_prepare(unsigned int cpu, unsigned int cluster)
+{
+ ve_spc_set_resume_addr(cluster, cpu, virt_to_phys(mcpm_entry_point));
+}
+
+static void tc2_pm_cpu_is_up(unsigned int cpu, unsigned int cluster)
+{
+ pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
+ BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER);
+ ve_spc_cpu_wakeup_irq(cluster, cpu, false);
+ ve_spc_set_resume_addr(cluster, cpu, 0);
+}
+
+static void tc2_pm_cluster_is_up(unsigned int cluster)
+{
+ pr_debug("%s: cluster %u\n", __func__, cluster);
+ BUG_ON(cluster >= TC2_CLUSTERS);
+ ve_spc_powerdown(cluster, false);
+ ve_spc_global_wakeup_irq(false);
+}
+
+static const struct mcpm_platform_ops tc2_pm_power_ops = {
+ .cpu_powerup = tc2_pm_cpu_powerup,
+ .cluster_powerup = tc2_pm_cluster_powerup,
+ .cpu_suspend_prepare = tc2_pm_cpu_suspend_prepare,
+ .cpu_powerdown_prepare = tc2_pm_cpu_powerdown_prepare,
+ .cluster_powerdown_prepare = tc2_pm_cluster_powerdown_prepare,
+ .cpu_cache_disable = tc2_pm_cpu_cache_disable,
+ .cluster_cache_disable = tc2_pm_cluster_cache_disable,
+ .wait_for_powerdown = tc2_pm_wait_for_powerdown,
+ .cpu_is_up = tc2_pm_cpu_is_up,
+ .cluster_is_up = tc2_pm_cluster_is_up,
+};
+
+/*
+ * Enable cluster-level coherency, in preparation for turning on the MMU.
+ */
+static void __naked tc2_pm_power_up_setup(unsigned int affinity_level)
+{
+ asm volatile (" \n"
+" cmp r0, #1 \n"
+" bxne lr \n"
+" b cci_enable_port_for_self ");
+}
+
+static int __init tc2_pm_init(void)
+{
+ unsigned int mpidr, cpu, cluster;
+ int ret, irq;
+ u32 a15_cluster_id, a7_cluster_id, sys_info;
+ struct device_node *np;
+
+ /*
+ * The power management-related features are hidden behind
+ * SCC registers. We need to extract runtime information like
+ * cluster ids and number of CPUs really available in clusters.
+ */
+ np = of_find_compatible_node(NULL, NULL,
+ "arm,vexpress-scc,v2p-ca15_a7");
+ scc = of_iomap(np, 0);
+ if (!scc)
+ return -ENODEV;
+
+ a15_cluster_id = readl_relaxed(scc + A15_CONF) & 0xf;
+ a7_cluster_id = readl_relaxed(scc + A7_CONF) & 0xf;
+ if (a15_cluster_id >= TC2_CLUSTERS || a7_cluster_id >= TC2_CLUSTERS)
+ return -EINVAL;
+
+ sys_info = readl_relaxed(scc + SYS_INFO);
+ tc2_nr_cpus[a15_cluster_id] = (sys_info >> 16) & 0xf;
+ tc2_nr_cpus[a7_cluster_id] = (sys_info >> 20) & 0xf;
+
+ irq = irq_of_parse_and_map(np, 0);
+
+ /*
+ * A subset of the SCC registers is also used to communicate
+ * with the SPC (power controller). We need to be able to
+ * drive it very early in the boot process to power up
+ * processors, so we initialize the SPC driver here.
+ */
+ ret = ve_spc_init(scc + SPC_BASE, a15_cluster_id, irq);
+ if (ret)
+ return ret;
+
+ if (!cci_probed())
+ return -ENODEV;
+
+ mpidr = read_cpuid_mpidr();
+ cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
+ if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster]) {
+ pr_err("%s: boot CPU is out of bound!\n", __func__);
+ return -EINVAL;
+ }
+
+ ret = mcpm_platform_register(&tc2_pm_power_ops);
+ if (!ret) {
+ mcpm_sync_init(tc2_pm_power_up_setup);
+ /* test if we can (re)enable the CCI on our own */
+ BUG_ON(mcpm_loopback(tc2_pm_cluster_cache_disable) != 0);
+ pr_info("TC2 power management initialized\n");
+ }
+ return ret;
+}
+
+early_initcall(tc2_pm_init);
diff --git a/arch/arm/mach-vexpress/v2m.c b/arch/arm/mach-vexpress/v2m.c
new file mode 100644
index 000000000..a0400f4cc
--- /dev/null
+++ b/arch/arm/mach-vexpress/v2m.c
@@ -0,0 +1,16 @@
+#include <asm/mach/arch.h>
+
+#include "core.h"
+
+static const char * const v2m_dt_match[] __initconst = {
+ "arm,vexpress",
+ NULL,
+};
+
+DT_MACHINE_START(VEXPRESS_DT, "ARM-Versatile Express")
+ .dt_compat = v2m_dt_match,
+ .l2c_aux_val = 0x00400000,
+ .l2c_aux_mask = 0xfe0fffff,
+ .smp = smp_ops(vexpress_smp_dt_ops),
+ .smp_init = smp_init_ops(vexpress_smp_init_ops),
+MACHINE_END