diff options
author | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2015-09-08 01:01:14 -0300 |
---|---|---|
committer | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2015-09-08 01:01:14 -0300 |
commit | e5fd91f1ef340da553f7a79da9540c3db711c937 (patch) | |
tree | b11842027dc6641da63f4bcc524f8678263304a3 /arch/arm/common | |
parent | 2a9b0348e685a63d97486f6749622b61e9e3292f (diff) |
Linux-libre 4.2-gnu
Diffstat (limited to 'arch/arm/common')
-rw-r--r-- | arch/arm/common/Makefile | 1 | ||||
-rw-r--r-- | arch/arm/common/edma.c | 3 | ||||
-rw-r--r-- | arch/arm/common/mcpm_entry.c | 281 | ||||
-rw-r--r-- | arch/arm/common/mcpm_head.S | 2 | ||||
-rw-r--r-- | arch/arm/common/sa1111.c | 7 | ||||
-rw-r--r-- | arch/arm/common/timer-sp.c | 304 |
6 files changed, 133 insertions, 465 deletions
diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile index 70b1eff47..6ee5959a8 100644 --- a/arch/arm/common/Makefile +++ b/arch/arm/common/Makefile @@ -11,7 +11,6 @@ obj-$(CONFIG_SHARP_LOCOMO) += locomo.o obj-$(CONFIG_SHARP_PARAM) += sharpsl_param.o obj-$(CONFIG_SHARP_SCOOP) += scoop.o obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o -obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp.o obj-$(CONFIG_MCPM) += mcpm_head.o mcpm_entry.o mcpm_platsmp.o vlock.o CFLAGS_REMOVE_mcpm_entry.o = -pg AFLAGS_mcpm_head.o := -march=armv7-a diff --git a/arch/arm/common/edma.c b/arch/arm/common/edma.c index 5662a8726..873dbfcc7 100644 --- a/arch/arm/common/edma.c +++ b/arch/arm/common/edma.c @@ -1350,6 +1350,9 @@ void edma_stop(unsigned channel) edma_shadow0_write_array(ctlr, SH_SECR, j, mask); edma_write_array(ctlr, EDMA_EMCR, j, mask); + /* clear possibly pending completion interrupt */ + edma_shadow0_write_array(ctlr, SH_ICR, j, mask); + pr_debug("EDMA: EER%d %08x\n", j, edma_shadow0_read_array(ctlr, SH_EER, j)); diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c index 5f8a52ac7..a923524d1 100644 --- a/arch/arm/common/mcpm_entry.c +++ b/arch/arm/common/mcpm_entry.c @@ -20,6 +20,126 @@ #include <asm/cputype.h> #include <asm/suspend.h> +/* + * The public API for this code is documented in arch/arm/include/asm/mcpm.h. + * For a comprehensive description of the main algorithm used here, please + * see Documentation/arm/cluster-pm-race-avoidance.txt. + */ + +struct sync_struct mcpm_sync; + +/* + * __mcpm_cpu_going_down: Indicates that the cpu is being torn down. + * This must be called at the point of committing to teardown of a CPU. + * The CPU cache (SCTRL.C bit) is expected to still be active. + */ +static void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster) +{ + mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN; + sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); +} + +/* + * __mcpm_cpu_down: Indicates that cpu teardown is complete and that the + * cluster can be torn down without disrupting this CPU. + * To avoid deadlocks, this must be called before a CPU is powered down. + * The CPU cache (SCTRL.C bit) is expected to be off. + * However L2 cache might or might not be active. + */ +static void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster) +{ + dmb(); + mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN; + sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); + sev(); +} + +/* + * __mcpm_outbound_leave_critical: Leave the cluster teardown critical section. + * @state: the final state of the cluster: + * CLUSTER_UP: no destructive teardown was done and the cluster has been + * restored to the previous state (CPU cache still active); or + * CLUSTER_DOWN: the cluster has been torn-down, ready for power-off + * (CPU cache disabled, L2 cache either enabled or disabled). + */ +static void __mcpm_outbound_leave_critical(unsigned int cluster, int state) +{ + dmb(); + mcpm_sync.clusters[cluster].cluster = state; + sync_cache_w(&mcpm_sync.clusters[cluster].cluster); + sev(); +} + +/* + * __mcpm_outbound_enter_critical: Enter the cluster teardown critical section. + * This function should be called by the last man, after local CPU teardown + * is complete. CPU cache expected to be active. + * + * Returns: + * false: the critical section was not entered because an inbound CPU was + * observed, or the cluster is already being set up; + * true: the critical section was entered: it is now safe to tear down the + * cluster. + */ +static bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster) +{ + unsigned int i; + struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster]; + + /* Warn inbound CPUs that the cluster is being torn down: */ + c->cluster = CLUSTER_GOING_DOWN; + sync_cache_w(&c->cluster); + + /* Back out if the inbound cluster is already in the critical region: */ + sync_cache_r(&c->inbound); + if (c->inbound == INBOUND_COMING_UP) + goto abort; + + /* + * Wait for all CPUs to get out of the GOING_DOWN state, so that local + * teardown is complete on each CPU before tearing down the cluster. + * + * If any CPU has been woken up again from the DOWN state, then we + * shouldn't be taking the cluster down at all: abort in that case. + */ + sync_cache_r(&c->cpus); + for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++) { + int cpustate; + + if (i == cpu) + continue; + + while (1) { + cpustate = c->cpus[i].cpu; + if (cpustate != CPU_GOING_DOWN) + break; + + wfe(); + sync_cache_r(&c->cpus[i].cpu); + } + + switch (cpustate) { + case CPU_DOWN: + continue; + + default: + goto abort; + } + } + + return true; + +abort: + __mcpm_outbound_leave_critical(cluster, CLUSTER_UP); + return false; +} + +static int __mcpm_cluster_state(unsigned int cluster) +{ + sync_cache_r(&mcpm_sync.clusters[cluster].cluster); + return mcpm_sync.clusters[cluster].cluster; +} + extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER]; void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr) @@ -78,16 +198,11 @@ int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster) bool cpu_is_down, cluster_is_down; int ret = 0; + pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); if (!platform_ops) return -EUNATCH; /* try not to shadow power_up errors */ might_sleep(); - /* backward compatibility callback */ - if (platform_ops->power_up) - return platform_ops->power_up(cpu, cluster); - - pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); - /* * Since this is called with IRQs enabled, and no arch_spin_lock_irq * variant exists, we need to disable IRQs manually here. @@ -128,29 +243,17 @@ void mcpm_cpu_power_down(void) bool cpu_going_down, last_man; phys_reset_t phys_reset; + mpidr = read_cpuid_mpidr(); + cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); + cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); + pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); if (WARN_ON_ONCE(!platform_ops)) return; BUG_ON(!irqs_disabled()); - /* - * Do this before calling into the power_down method, - * as it might not always be safe to do afterwards. - */ setup_mm_for_reboot(); - /* backward compatibility callback */ - if (platform_ops->power_down) { - platform_ops->power_down(); - goto not_dead; - } - - mpidr = read_cpuid_mpidr(); - cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); - cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); - pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); - __mcpm_cpu_going_down(cpu, cluster); - arch_spin_lock(&mcpm_lock); BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP); @@ -187,7 +290,6 @@ void mcpm_cpu_power_down(void) if (cpu_going_down) wfi(); -not_dead: /* * It is possible for a power_up request to happen concurrently * with a power_down request for the same CPU. In this case the @@ -219,22 +321,11 @@ int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster) return ret; } -void mcpm_cpu_suspend(u64 expected_residency) +void mcpm_cpu_suspend(void) { if (WARN_ON_ONCE(!platform_ops)) return; - /* backward compatibility callback */ - if (platform_ops->suspend) { - phys_reset_t phys_reset; - BUG_ON(!irqs_disabled()); - setup_mm_for_reboot(); - platform_ops->suspend(expected_residency); - phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); - phys_reset(virt_to_phys(mcpm_entry_point)); - BUG(); - } - /* Some platforms might have to enable special resume modes, etc. */ if (platform_ops->cpu_suspend_prepare) { unsigned int mpidr = read_cpuid_mpidr(); @@ -256,12 +347,6 @@ int mcpm_cpu_powered_up(void) if (!platform_ops) return -EUNATCH; - /* backward compatibility callback */ - if (platform_ops->powered_up) { - platform_ops->powered_up(); - return 0; - } - mpidr = read_cpuid_mpidr(); cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); @@ -334,120 +419,6 @@ int __init mcpm_loopback(void (*cache_disable)(void)) #endif -struct sync_struct mcpm_sync; - -/* - * __mcpm_cpu_going_down: Indicates that the cpu is being torn down. - * This must be called at the point of committing to teardown of a CPU. - * The CPU cache (SCTRL.C bit) is expected to still be active. - */ -void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster) -{ - mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN; - sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); -} - -/* - * __mcpm_cpu_down: Indicates that cpu teardown is complete and that the - * cluster can be torn down without disrupting this CPU. - * To avoid deadlocks, this must be called before a CPU is powered down. - * The CPU cache (SCTRL.C bit) is expected to be off. - * However L2 cache might or might not be active. - */ -void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster) -{ - dmb(); - mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN; - sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); - sev(); -} - -/* - * __mcpm_outbound_leave_critical: Leave the cluster teardown critical section. - * @state: the final state of the cluster: - * CLUSTER_UP: no destructive teardown was done and the cluster has been - * restored to the previous state (CPU cache still active); or - * CLUSTER_DOWN: the cluster has been torn-down, ready for power-off - * (CPU cache disabled, L2 cache either enabled or disabled). - */ -void __mcpm_outbound_leave_critical(unsigned int cluster, int state) -{ - dmb(); - mcpm_sync.clusters[cluster].cluster = state; - sync_cache_w(&mcpm_sync.clusters[cluster].cluster); - sev(); -} - -/* - * __mcpm_outbound_enter_critical: Enter the cluster teardown critical section. - * This function should be called by the last man, after local CPU teardown - * is complete. CPU cache expected to be active. - * - * Returns: - * false: the critical section was not entered because an inbound CPU was - * observed, or the cluster is already being set up; - * true: the critical section was entered: it is now safe to tear down the - * cluster. - */ -bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster) -{ - unsigned int i; - struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster]; - - /* Warn inbound CPUs that the cluster is being torn down: */ - c->cluster = CLUSTER_GOING_DOWN; - sync_cache_w(&c->cluster); - - /* Back out if the inbound cluster is already in the critical region: */ - sync_cache_r(&c->inbound); - if (c->inbound == INBOUND_COMING_UP) - goto abort; - - /* - * Wait for all CPUs to get out of the GOING_DOWN state, so that local - * teardown is complete on each CPU before tearing down the cluster. - * - * If any CPU has been woken up again from the DOWN state, then we - * shouldn't be taking the cluster down at all: abort in that case. - */ - sync_cache_r(&c->cpus); - for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++) { - int cpustate; - - if (i == cpu) - continue; - - while (1) { - cpustate = c->cpus[i].cpu; - if (cpustate != CPU_GOING_DOWN) - break; - - wfe(); - sync_cache_r(&c->cpus[i].cpu); - } - - switch (cpustate) { - case CPU_DOWN: - continue; - - default: - goto abort; - } - } - - return true; - -abort: - __mcpm_outbound_leave_critical(cluster, CLUSTER_UP); - return false; -} - -int __mcpm_cluster_state(unsigned int cluster) -{ - sync_cache_r(&mcpm_sync.clusters[cluster].cluster); - return mcpm_sync.clusters[cluster].cluster; -} - extern unsigned long mcpm_power_up_setup_phys; int __init mcpm_sync_init( diff --git a/arch/arm/common/mcpm_head.S b/arch/arm/common/mcpm_head.S index e02db4b81..08b3bb9bc 100644 --- a/arch/arm/common/mcpm_head.S +++ b/arch/arm/common/mcpm_head.S @@ -49,7 +49,7 @@ ENTRY(mcpm_entry_point) ARM_BE8(setend be) - THUMB( adr r12, BSYM(1f) ) + THUMB( badr r12, 1f ) THUMB( bx r12 ) THUMB( .thumb ) 1: diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c index 5cc779c8e..93ee70dbb 100644 --- a/arch/arm/common/sa1111.c +++ b/arch/arm/common/sa1111.c @@ -501,8 +501,8 @@ static int sa1111_setup_irq(struct sa1111 *sachip, unsigned irq_base) * Register SA1111 interrupt */ irq_set_irq_type(sachip->irq, IRQ_TYPE_EDGE_RISING); - irq_set_handler_data(sachip->irq, sachip); - irq_set_chained_handler(sachip->irq, sa1111_irq_handler); + irq_set_chained_handler_and_data(sachip->irq, sa1111_irq_handler, + sachip); dev_info(sachip->dev, "Providing IRQ%u-%u\n", sachip->irq_base, sachip->irq_base + SA1111_IRQ_NR - 1); @@ -836,8 +836,7 @@ static void __sa1111_remove(struct sa1111 *sachip) clk_unprepare(sachip->clk); if (sachip->irq != NO_IRQ) { - irq_set_chained_handler(sachip->irq, NULL); - irq_set_handler_data(sachip->irq, NULL); + irq_set_chained_handler_and_data(sachip->irq, NULL, NULL); irq_free_descs(sachip->irq_base, SA1111_IRQ_NR); release_mem_region(sachip->phys + SA1111_INTC, 512); diff --git a/arch/arm/common/timer-sp.c b/arch/arm/common/timer-sp.c deleted file mode 100644 index 192113247..000000000 --- a/arch/arm/common/timer-sp.c +++ /dev/null @@ -1,304 +0,0 @@ -/* - * linux/arch/arm/common/timer-sp.c - * - * Copyright (C) 1999 - 2003 ARM Limited - * Copyright (C) 2000 Deep Blue Solutions Ltd - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ -#include <linux/clk.h> -#include <linux/clocksource.h> -#include <linux/clockchips.h> -#include <linux/err.h> -#include <linux/interrupt.h> -#include <linux/irq.h> -#include <linux/io.h> -#include <linux/of.h> -#include <linux/of_address.h> -#include <linux/of_irq.h> -#include <linux/sched_clock.h> - -#include <asm/hardware/arm_timer.h> -#include <asm/hardware/timer-sp.h> - -static long __init sp804_get_clock_rate(struct clk *clk) -{ - long rate; - int err; - - err = clk_prepare(clk); - if (err) { - pr_err("sp804: clock failed to prepare: %d\n", err); - clk_put(clk); - return err; - } - - err = clk_enable(clk); - if (err) { - pr_err("sp804: clock failed to enable: %d\n", err); - clk_unprepare(clk); - clk_put(clk); - return err; - } - - rate = clk_get_rate(clk); - if (rate < 0) { - pr_err("sp804: clock failed to get rate: %ld\n", rate); - clk_disable(clk); - clk_unprepare(clk); - clk_put(clk); - } - - return rate; -} - -static void __iomem *sched_clock_base; - -static u64 notrace sp804_read(void) -{ - return ~readl_relaxed(sched_clock_base + TIMER_VALUE); -} - -void __init __sp804_clocksource_and_sched_clock_init(void __iomem *base, - const char *name, - struct clk *clk, - int use_sched_clock) -{ - long rate; - - if (!clk) { - clk = clk_get_sys("sp804", name); - if (IS_ERR(clk)) { - pr_err("sp804: clock not found: %d\n", - (int)PTR_ERR(clk)); - return; - } - } - - rate = sp804_get_clock_rate(clk); - - if (rate < 0) - return; - - /* setup timer 0 as free-running clocksource */ - writel(0, base + TIMER_CTRL); - writel(0xffffffff, base + TIMER_LOAD); - writel(0xffffffff, base + TIMER_VALUE); - writel(TIMER_CTRL_32BIT | TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC, - base + TIMER_CTRL); - - clocksource_mmio_init(base + TIMER_VALUE, name, - rate, 200, 32, clocksource_mmio_readl_down); - - if (use_sched_clock) { - sched_clock_base = base; - sched_clock_register(sp804_read, 32, rate); - } -} - - -static void __iomem *clkevt_base; -static unsigned long clkevt_reload; - -/* - * IRQ handler for the timer - */ -static irqreturn_t sp804_timer_interrupt(int irq, void *dev_id) -{ - struct clock_event_device *evt = dev_id; - - /* clear the interrupt */ - writel(1, clkevt_base + TIMER_INTCLR); - - evt->event_handler(evt); - - return IRQ_HANDLED; -} - -static void sp804_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) -{ - unsigned long ctrl = TIMER_CTRL_32BIT | TIMER_CTRL_IE; - - writel(ctrl, clkevt_base + TIMER_CTRL); - - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - writel(clkevt_reload, clkevt_base + TIMER_LOAD); - ctrl |= TIMER_CTRL_PERIODIC | TIMER_CTRL_ENABLE; - break; - - case CLOCK_EVT_MODE_ONESHOT: - /* period set, and timer enabled in 'next_event' hook */ - ctrl |= TIMER_CTRL_ONESHOT; - break; - - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - default: - break; - } - - writel(ctrl, clkevt_base + TIMER_CTRL); -} - -static int sp804_set_next_event(unsigned long next, - struct clock_event_device *evt) -{ - unsigned long ctrl = readl(clkevt_base + TIMER_CTRL); - - writel(next, clkevt_base + TIMER_LOAD); - writel(ctrl | TIMER_CTRL_ENABLE, clkevt_base + TIMER_CTRL); - - return 0; -} - -static struct clock_event_device sp804_clockevent = { - .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | - CLOCK_EVT_FEAT_DYNIRQ, - .set_mode = sp804_set_mode, - .set_next_event = sp804_set_next_event, - .rating = 300, -}; - -static struct irqaction sp804_timer_irq = { - .name = "timer", - .flags = IRQF_TIMER | IRQF_IRQPOLL, - .handler = sp804_timer_interrupt, - .dev_id = &sp804_clockevent, -}; - -void __init __sp804_clockevents_init(void __iomem *base, unsigned int irq, struct clk *clk, const char *name) -{ - struct clock_event_device *evt = &sp804_clockevent; - long rate; - - if (!clk) - clk = clk_get_sys("sp804", name); - if (IS_ERR(clk)) { - pr_err("sp804: %s clock not found: %d\n", name, - (int)PTR_ERR(clk)); - return; - } - - rate = sp804_get_clock_rate(clk); - if (rate < 0) - return; - - clkevt_base = base; - clkevt_reload = DIV_ROUND_CLOSEST(rate, HZ); - evt->name = name; - evt->irq = irq; - evt->cpumask = cpu_possible_mask; - - writel(0, base + TIMER_CTRL); - - setup_irq(irq, &sp804_timer_irq); - clockevents_config_and_register(evt, rate, 0xf, 0xffffffff); -} - -static void __init sp804_of_init(struct device_node *np) -{ - static bool initialized = false; - void __iomem *base; - int irq; - u32 irq_num = 0; - struct clk *clk1, *clk2; - const char *name = of_get_property(np, "compatible", NULL); - - base = of_iomap(np, 0); - if (WARN_ON(!base)) - return; - - /* Ensure timers are disabled */ - writel(0, base + TIMER_CTRL); - writel(0, base + TIMER_2_BASE + TIMER_CTRL); - - if (initialized || !of_device_is_available(np)) - goto err; - - clk1 = of_clk_get(np, 0); - if (IS_ERR(clk1)) - clk1 = NULL; - - /* Get the 2nd clock if the timer has 3 timer clocks */ - if (of_count_phandle_with_args(np, "clocks", "#clock-cells") == 3) { - clk2 = of_clk_get(np, 1); - if (IS_ERR(clk2)) { - pr_err("sp804: %s clock not found: %d\n", np->name, - (int)PTR_ERR(clk2)); - clk2 = NULL; - } - } else - clk2 = clk1; - - irq = irq_of_parse_and_map(np, 0); - if (irq <= 0) - goto err; - - of_property_read_u32(np, "arm,sp804-has-irq", &irq_num); - if (irq_num == 2) { - __sp804_clockevents_init(base + TIMER_2_BASE, irq, clk2, name); - __sp804_clocksource_and_sched_clock_init(base, name, clk1, 1); - } else { - __sp804_clockevents_init(base, irq, clk1 , name); - __sp804_clocksource_and_sched_clock_init(base + TIMER_2_BASE, - name, clk2, 1); - } - initialized = true; - - return; -err: - iounmap(base); -} -CLOCKSOURCE_OF_DECLARE(sp804, "arm,sp804", sp804_of_init); - -static void __init integrator_cp_of_init(struct device_node *np) -{ - static int init_count = 0; - void __iomem *base; - int irq; - const char *name = of_get_property(np, "compatible", NULL); - struct clk *clk; - - base = of_iomap(np, 0); - if (WARN_ON(!base)) - return; - clk = of_clk_get(np, 0); - if (WARN_ON(IS_ERR(clk))) - return; - - /* Ensure timer is disabled */ - writel(0, base + TIMER_CTRL); - - if (init_count == 2 || !of_device_is_available(np)) - goto err; - - if (!init_count) - __sp804_clocksource_and_sched_clock_init(base, name, clk, 0); - else { - irq = irq_of_parse_and_map(np, 0); - if (irq <= 0) - goto err; - - __sp804_clockevents_init(base, irq, clk, name); - } - - init_count++; - return; -err: - iounmap(base); -} -CLOCKSOURCE_OF_DECLARE(intcp, "arm,integrator-cp-timer", integrator_cp_of_init); |