diff options
author | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2015-08-05 17:04:01 -0300 |
---|---|---|
committer | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2015-08-05 17:04:01 -0300 |
commit | 57f0f512b273f60d52568b8c6b77e17f5636edc0 (patch) | |
tree | 5e910f0e82173f4ef4f51111366a3f1299037a7b /drivers/sh |
Initial import
Diffstat (limited to 'drivers/sh')
-rw-r--r-- | drivers/sh/Kconfig | 5 | ||||
-rw-r--r-- | drivers/sh/Makefile | 11 | ||||
-rw-r--r-- | drivers/sh/clk/Makefile | 3 | ||||
-rw-r--r-- | drivers/sh/clk/core.c | 699 | ||||
-rw-r--r-- | drivers/sh/clk/cpg.c | 492 | ||||
-rw-r--r-- | drivers/sh/intc/Kconfig | 43 | ||||
-rw-r--r-- | drivers/sh/intc/Makefile | 5 | ||||
-rw-r--r-- | drivers/sh/intc/access.c | 246 | ||||
-rw-r--r-- | drivers/sh/intc/balancing.c | 97 | ||||
-rw-r--r-- | drivers/sh/intc/chip.c | 211 | ||||
-rw-r--r-- | drivers/sh/intc/core.c | 511 | ||||
-rw-r--r-- | drivers/sh/intc/handle.c | 306 | ||||
-rw-r--r-- | drivers/sh/intc/internals.h | 198 | ||||
-rw-r--r-- | drivers/sh/intc/irqdomain.c | 68 | ||||
-rw-r--r-- | drivers/sh/intc/userimask.c | 84 | ||||
-rw-r--r-- | drivers/sh/intc/virq-debugfs.c | 64 | ||||
-rw-r--r-- | drivers/sh/intc/virq.c | 265 | ||||
-rw-r--r-- | drivers/sh/maple/Makefile | 3 | ||||
-rw-r--r-- | drivers/sh/maple/maple.c | 893 | ||||
-rw-r--r-- | drivers/sh/pm_runtime.c | 101 | ||||
-rw-r--r-- | drivers/sh/superhyway/Makefile | 7 | ||||
-rw-r--r-- | drivers/sh/superhyway/superhyway-sysfs.c | 45 | ||||
-rw-r--r-- | drivers/sh/superhyway/superhyway.c | 238 |
23 files changed, 4595 insertions, 0 deletions
diff --git a/drivers/sh/Kconfig b/drivers/sh/Kconfig new file mode 100644 index 000000000..f168a6159 --- /dev/null +++ b/drivers/sh/Kconfig @@ -0,0 +1,5 @@ +menu "SuperH / SH-Mobile Driver Options" + +source "drivers/sh/intc/Kconfig" + +endmenu diff --git a/drivers/sh/Makefile b/drivers/sh/Makefile new file mode 100644 index 000000000..114203f32 --- /dev/null +++ b/drivers/sh/Makefile @@ -0,0 +1,11 @@ +# +# Makefile for the SuperH specific drivers. +# +obj-$(CONFIG_SH_INTC) += intc/ +ifneq ($(CONFIG_COMMON_CLK),y) +obj-$(CONFIG_HAVE_CLK) += clk/ +endif +obj-$(CONFIG_MAPLE) += maple/ +obj-$(CONFIG_SUPERHYWAY) += superhyway/ + +obj-y += pm_runtime.o diff --git a/drivers/sh/clk/Makefile b/drivers/sh/clk/Makefile new file mode 100644 index 000000000..5d15ebfaa --- /dev/null +++ b/drivers/sh/clk/Makefile @@ -0,0 +1,3 @@ +obj-y := core.o + +obj-$(CONFIG_SH_CLK_CPG) += cpg.o diff --git a/drivers/sh/clk/core.c b/drivers/sh/clk/core.c new file mode 100644 index 000000000..be56b22ca --- /dev/null +++ b/drivers/sh/clk/core.c @@ -0,0 +1,699 @@ +/* + * SuperH clock framework + * + * Copyright (C) 2005 - 2010 Paul Mundt + * + * This clock framework is derived from the OMAP version by: + * + * Copyright (C) 2004 - 2008 Nokia Corporation + * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com> + * + * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com> + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#define pr_fmt(fmt) "clock: " fmt + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/list.h> +#include <linux/syscore_ops.h> +#include <linux/seq_file.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/cpufreq.h> +#include <linux/clk.h> +#include <linux/sh_clk.h> + +static LIST_HEAD(clock_list); +static DEFINE_SPINLOCK(clock_lock); +static DEFINE_MUTEX(clock_list_sem); + +/* clock disable operations are not passed on to hardware during boot */ +static int allow_disable; + +void clk_rate_table_build(struct clk *clk, + struct cpufreq_frequency_table *freq_table, + int nr_freqs, + struct clk_div_mult_table *src_table, + unsigned long *bitmap) +{ + unsigned long mult, div; + unsigned long freq; + int i; + + clk->nr_freqs = nr_freqs; + + for (i = 0; i < nr_freqs; i++) { + div = 1; + mult = 1; + + if (src_table->divisors && i < src_table->nr_divisors) + div = src_table->divisors[i]; + + if (src_table->multipliers && i < src_table->nr_multipliers) + mult = src_table->multipliers[i]; + + if (!div || !mult || (bitmap && !test_bit(i, bitmap))) + freq = CPUFREQ_ENTRY_INVALID; + else + freq = clk->parent->rate * mult / div; + + freq_table[i].driver_data = i; + freq_table[i].frequency = freq; + } + + /* Termination entry */ + freq_table[i].driver_data = i; + freq_table[i].frequency = CPUFREQ_TABLE_END; +} + +struct clk_rate_round_data; + +struct clk_rate_round_data { + unsigned long rate; + unsigned int min, max; + long (*func)(unsigned int, struct clk_rate_round_data *); + void *arg; +}; + +#define for_each_frequency(pos, r, freq) \ + for (pos = r->min, freq = r->func(pos, r); \ + pos <= r->max; pos++, freq = r->func(pos, r)) \ + if (unlikely(freq == 0)) \ + ; \ + else + +static long clk_rate_round_helper(struct clk_rate_round_data *rounder) +{ + unsigned long rate_error, rate_error_prev = ~0UL; + unsigned long highest, lowest, freq; + long rate_best_fit = -ENOENT; + int i; + + highest = 0; + lowest = ~0UL; + + for_each_frequency(i, rounder, freq) { + if (freq > highest) + highest = freq; + if (freq < lowest) + lowest = freq; + + rate_error = abs(freq - rounder->rate); + if (rate_error < rate_error_prev) { + rate_best_fit = freq; + rate_error_prev = rate_error; + } + + if (rate_error == 0) + break; + } + + if (rounder->rate >= highest) + rate_best_fit = highest; + if (rounder->rate <= lowest) + rate_best_fit = lowest; + + return rate_best_fit; +} + +static long clk_rate_table_iter(unsigned int pos, + struct clk_rate_round_data *rounder) +{ + struct cpufreq_frequency_table *freq_table = rounder->arg; + unsigned long freq = freq_table[pos].frequency; + + if (freq == CPUFREQ_ENTRY_INVALID) + freq = 0; + + return freq; +} + +long clk_rate_table_round(struct clk *clk, + struct cpufreq_frequency_table *freq_table, + unsigned long rate) +{ + struct clk_rate_round_data table_round = { + .min = 0, + .max = clk->nr_freqs - 1, + .func = clk_rate_table_iter, + .arg = freq_table, + .rate = rate, + }; + + if (clk->nr_freqs < 1) + return -ENOSYS; + + return clk_rate_round_helper(&table_round); +} + +static long clk_rate_div_range_iter(unsigned int pos, + struct clk_rate_round_data *rounder) +{ + return clk_get_rate(rounder->arg) / pos; +} + +long clk_rate_div_range_round(struct clk *clk, unsigned int div_min, + unsigned int div_max, unsigned long rate) +{ + struct clk_rate_round_data div_range_round = { + .min = div_min, + .max = div_max, + .func = clk_rate_div_range_iter, + .arg = clk_get_parent(clk), + .rate = rate, + }; + + return clk_rate_round_helper(&div_range_round); +} + +static long clk_rate_mult_range_iter(unsigned int pos, + struct clk_rate_round_data *rounder) +{ + return clk_get_rate(rounder->arg) * pos; +} + +long clk_rate_mult_range_round(struct clk *clk, unsigned int mult_min, + unsigned int mult_max, unsigned long rate) +{ + struct clk_rate_round_data mult_range_round = { + .min = mult_min, + .max = mult_max, + .func = clk_rate_mult_range_iter, + .arg = clk_get_parent(clk), + .rate = rate, + }; + + return clk_rate_round_helper(&mult_range_round); +} + +int clk_rate_table_find(struct clk *clk, + struct cpufreq_frequency_table *freq_table, + unsigned long rate) +{ + struct cpufreq_frequency_table *pos; + + cpufreq_for_each_valid_entry(pos, freq_table) + if (pos->frequency == rate) + return pos - freq_table; + + return -ENOENT; +} + +/* Used for clocks that always have same value as the parent clock */ +unsigned long followparent_recalc(struct clk *clk) +{ + return clk->parent ? clk->parent->rate : 0; +} + +int clk_reparent(struct clk *child, struct clk *parent) +{ + list_del_init(&child->sibling); + if (parent) + list_add(&child->sibling, &parent->children); + child->parent = parent; + + return 0; +} + +/* Propagate rate to children */ +void propagate_rate(struct clk *tclk) +{ + struct clk *clkp; + + list_for_each_entry(clkp, &tclk->children, sibling) { + if (clkp->ops && clkp->ops->recalc) + clkp->rate = clkp->ops->recalc(clkp); + + propagate_rate(clkp); + } +} + +static void __clk_disable(struct clk *clk) +{ + if (WARN(!clk->usecount, "Trying to disable clock %p with 0 usecount\n", + clk)) + return; + + if (!(--clk->usecount)) { + if (likely(allow_disable && clk->ops && clk->ops->disable)) + clk->ops->disable(clk); + if (likely(clk->parent)) + __clk_disable(clk->parent); + } +} + +void clk_disable(struct clk *clk) +{ + unsigned long flags; + + if (!clk) + return; + + spin_lock_irqsave(&clock_lock, flags); + __clk_disable(clk); + spin_unlock_irqrestore(&clock_lock, flags); +} +EXPORT_SYMBOL_GPL(clk_disable); + +static int __clk_enable(struct clk *clk) +{ + int ret = 0; + + if (clk->usecount++ == 0) { + if (clk->parent) { + ret = __clk_enable(clk->parent); + if (unlikely(ret)) + goto err; + } + + if (clk->ops && clk->ops->enable) { + ret = clk->ops->enable(clk); + if (ret) { + if (clk->parent) + __clk_disable(clk->parent); + goto err; + } + } + } + + return ret; +err: + clk->usecount--; + return ret; +} + +int clk_enable(struct clk *clk) +{ + unsigned long flags; + int ret; + + if (!clk) + return -EINVAL; + + spin_lock_irqsave(&clock_lock, flags); + ret = __clk_enable(clk); + spin_unlock_irqrestore(&clock_lock, flags); + + return ret; +} +EXPORT_SYMBOL_GPL(clk_enable); + +static LIST_HEAD(root_clks); + +/** + * recalculate_root_clocks - recalculate and propagate all root clocks + * + * Recalculates all root clocks (clocks with no parent), which if the + * clock's .recalc is set correctly, should also propagate their rates. + * Called at init. + */ +void recalculate_root_clocks(void) +{ + struct clk *clkp; + + list_for_each_entry(clkp, &root_clks, sibling) { + if (clkp->ops && clkp->ops->recalc) + clkp->rate = clkp->ops->recalc(clkp); + propagate_rate(clkp); + } +} + +static struct clk_mapping dummy_mapping; + +static struct clk *lookup_root_clock(struct clk *clk) +{ + while (clk->parent) + clk = clk->parent; + + return clk; +} + +static int clk_establish_mapping(struct clk *clk) +{ + struct clk_mapping *mapping = clk->mapping; + + /* + * Propagate mappings. + */ + if (!mapping) { + struct clk *clkp; + + /* + * dummy mapping for root clocks with no specified ranges + */ + if (!clk->parent) { + clk->mapping = &dummy_mapping; + goto out; + } + + /* + * If we're on a child clock and it provides no mapping of its + * own, inherit the mapping from its root clock. + */ + clkp = lookup_root_clock(clk); + mapping = clkp->mapping; + BUG_ON(!mapping); + } + + /* + * Establish initial mapping. + */ + if (!mapping->base && mapping->phys) { + kref_init(&mapping->ref); + + mapping->base = ioremap_nocache(mapping->phys, mapping->len); + if (unlikely(!mapping->base)) + return -ENXIO; + } else if (mapping->base) { + /* + * Bump the refcount for an existing mapping + */ + kref_get(&mapping->ref); + } + + clk->mapping = mapping; +out: + clk->mapped_reg = clk->mapping->base; + clk->mapped_reg += (phys_addr_t)clk->enable_reg - clk->mapping->phys; + return 0; +} + +static void clk_destroy_mapping(struct kref *kref) +{ + struct clk_mapping *mapping; + + mapping = container_of(kref, struct clk_mapping, ref); + + iounmap(mapping->base); +} + +static void clk_teardown_mapping(struct clk *clk) +{ + struct clk_mapping *mapping = clk->mapping; + + /* Nothing to do */ + if (mapping == &dummy_mapping) + goto out; + + kref_put(&mapping->ref, clk_destroy_mapping); + clk->mapping = NULL; +out: + clk->mapped_reg = NULL; +} + +int clk_register(struct clk *clk) +{ + int ret; + + if (IS_ERR_OR_NULL(clk)) + return -EINVAL; + + /* + * trap out already registered clocks + */ + if (clk->node.next || clk->node.prev) + return 0; + + mutex_lock(&clock_list_sem); + + INIT_LIST_HEAD(&clk->children); + clk->usecount = 0; + + ret = clk_establish_mapping(clk); + if (unlikely(ret)) + goto out_unlock; + + if (clk->parent) + list_add(&clk->sibling, &clk->parent->children); + else + list_add(&clk->sibling, &root_clks); + + list_add(&clk->node, &clock_list); + +#ifdef CONFIG_SH_CLK_CPG_LEGACY + if (clk->ops && clk->ops->init) + clk->ops->init(clk); +#endif + +out_unlock: + mutex_unlock(&clock_list_sem); + + return ret; +} +EXPORT_SYMBOL_GPL(clk_register); + +void clk_unregister(struct clk *clk) +{ + mutex_lock(&clock_list_sem); + list_del(&clk->sibling); + list_del(&clk->node); + clk_teardown_mapping(clk); + mutex_unlock(&clock_list_sem); +} +EXPORT_SYMBOL_GPL(clk_unregister); + +void clk_enable_init_clocks(void) +{ + struct clk *clkp; + + list_for_each_entry(clkp, &clock_list, node) + if (clkp->flags & CLK_ENABLE_ON_INIT) + clk_enable(clkp); +} + +unsigned long clk_get_rate(struct clk *clk) +{ + return clk->rate; +} +EXPORT_SYMBOL_GPL(clk_get_rate); + +int clk_set_rate(struct clk *clk, unsigned long rate) +{ + int ret = -EOPNOTSUPP; + unsigned long flags; + + spin_lock_irqsave(&clock_lock, flags); + + if (likely(clk->ops && clk->ops->set_rate)) { + ret = clk->ops->set_rate(clk, rate); + if (ret != 0) + goto out_unlock; + } else { + clk->rate = rate; + ret = 0; + } + + if (clk->ops && clk->ops->recalc) + clk->rate = clk->ops->recalc(clk); + + propagate_rate(clk); + +out_unlock: + spin_unlock_irqrestore(&clock_lock, flags); + + return ret; +} +EXPORT_SYMBOL_GPL(clk_set_rate); + +int clk_set_parent(struct clk *clk, struct clk *parent) +{ + unsigned long flags; + int ret = -EINVAL; + + if (!parent || !clk) + return ret; + if (clk->parent == parent) + return 0; + + spin_lock_irqsave(&clock_lock, flags); + if (clk->usecount == 0) { + if (clk->ops->set_parent) + ret = clk->ops->set_parent(clk, parent); + else + ret = clk_reparent(clk, parent); + + if (ret == 0) { + if (clk->ops->recalc) + clk->rate = clk->ops->recalc(clk); + pr_debug("set parent of %p to %p (new rate %ld)\n", + clk, clk->parent, clk->rate); + propagate_rate(clk); + } + } else + ret = -EBUSY; + spin_unlock_irqrestore(&clock_lock, flags); + + return ret; +} +EXPORT_SYMBOL_GPL(clk_set_parent); + +struct clk *clk_get_parent(struct clk *clk) +{ + return clk->parent; +} +EXPORT_SYMBOL_GPL(clk_get_parent); + +long clk_round_rate(struct clk *clk, unsigned long rate) +{ + if (likely(clk->ops && clk->ops->round_rate)) { + unsigned long flags, rounded; + + spin_lock_irqsave(&clock_lock, flags); + rounded = clk->ops->round_rate(clk, rate); + spin_unlock_irqrestore(&clock_lock, flags); + + return rounded; + } + + return clk_get_rate(clk); +} +EXPORT_SYMBOL_GPL(clk_round_rate); + +long clk_round_parent(struct clk *clk, unsigned long target, + unsigned long *best_freq, unsigned long *parent_freq, + unsigned int div_min, unsigned int div_max) +{ + struct cpufreq_frequency_table *freq, *best = NULL; + unsigned long error = ULONG_MAX, freq_high, freq_low, div; + struct clk *parent = clk_get_parent(clk); + + if (!parent) { + *parent_freq = 0; + *best_freq = clk_round_rate(clk, target); + return abs(target - *best_freq); + } + + cpufreq_for_each_valid_entry(freq, parent->freq_table) { + if (unlikely(freq->frequency / target <= div_min - 1)) { + unsigned long freq_max; + + freq_max = (freq->frequency + div_min / 2) / div_min; + if (error > target - freq_max) { + error = target - freq_max; + best = freq; + if (best_freq) + *best_freq = freq_max; + } + + pr_debug("too low freq %u, error %lu\n", freq->frequency, + target - freq_max); + + if (!error) + break; + + continue; + } + + if (unlikely(freq->frequency / target >= div_max)) { + unsigned long freq_min; + + freq_min = (freq->frequency + div_max / 2) / div_max; + if (error > freq_min - target) { + error = freq_min - target; + best = freq; + if (best_freq) + *best_freq = freq_min; + } + + pr_debug("too high freq %u, error %lu\n", freq->frequency, + freq_min - target); + + if (!error) + break; + + continue; + } + + div = freq->frequency / target; + freq_high = freq->frequency / div; + freq_low = freq->frequency / (div + 1); + + if (freq_high - target < error) { + error = freq_high - target; + best = freq; + if (best_freq) + *best_freq = freq_high; + } + + if (target - freq_low < error) { + error = target - freq_low; + best = freq; + if (best_freq) + *best_freq = freq_low; + } + + pr_debug("%u / %lu = %lu, / %lu = %lu, best %lu, parent %u\n", + freq->frequency, div, freq_high, div + 1, freq_low, + *best_freq, best->frequency); + + if (!error) + break; + } + + if (parent_freq) + *parent_freq = best->frequency; + + return error; +} +EXPORT_SYMBOL_GPL(clk_round_parent); + +#ifdef CONFIG_PM +static void clks_core_resume(void) +{ + struct clk *clkp; + + list_for_each_entry(clkp, &clock_list, node) { + if (likely(clkp->usecount && clkp->ops)) { + unsigned long rate = clkp->rate; + + if (likely(clkp->ops->set_parent)) + clkp->ops->set_parent(clkp, + clkp->parent); + if (likely(clkp->ops->set_rate)) + clkp->ops->set_rate(clkp, rate); + else if (likely(clkp->ops->recalc)) + clkp->rate = clkp->ops->recalc(clkp); + } + } +} + +static struct syscore_ops clks_syscore_ops = { + .resume = clks_core_resume, +}; + +static int __init clk_syscore_init(void) +{ + register_syscore_ops(&clks_syscore_ops); + + return 0; +} +subsys_initcall(clk_syscore_init); +#endif + +static int __init clk_late_init(void) +{ + unsigned long flags; + struct clk *clk; + + /* disable all clocks with zero use count */ + mutex_lock(&clock_list_sem); + spin_lock_irqsave(&clock_lock, flags); + + list_for_each_entry(clk, &clock_list, node) + if (!clk->usecount && clk->ops && clk->ops->disable) + clk->ops->disable(clk); + + /* from now on allow clock disable operations */ + allow_disable = 1; + + spin_unlock_irqrestore(&clock_lock, flags); + mutex_unlock(&clock_list_sem); + return 0; +} +late_initcall(clk_late_init); diff --git a/drivers/sh/clk/cpg.c b/drivers/sh/clk/cpg.c new file mode 100644 index 000000000..7442bc130 --- /dev/null +++ b/drivers/sh/clk/cpg.c @@ -0,0 +1,492 @@ +/* + * Helper routines for SuperH Clock Pulse Generator blocks (CPG). + * + * Copyright (C) 2010 Magnus Damm + * Copyright (C) 2010 - 2012 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/clk.h> +#include <linux/compiler.h> +#include <linux/slab.h> +#include <linux/io.h> +#include <linux/sh_clk.h> + +#define CPG_CKSTP_BIT BIT(8) + +static unsigned int sh_clk_read(struct clk *clk) +{ + if (clk->flags & CLK_ENABLE_REG_8BIT) + return ioread8(clk->mapped_reg); + else if (clk->flags & CLK_ENABLE_REG_16BIT) + return ioread16(clk->mapped_reg); + + return ioread32(clk->mapped_reg); +} + +static void sh_clk_write(int value, struct clk *clk) +{ + if (clk->flags & CLK_ENABLE_REG_8BIT) + iowrite8(value, clk->mapped_reg); + else if (clk->flags & CLK_ENABLE_REG_16BIT) + iowrite16(value, clk->mapped_reg); + else + iowrite32(value, clk->mapped_reg); +} + +static unsigned int r8(const void __iomem *addr) +{ + return ioread8(addr); +} + +static unsigned int r16(const void __iomem *addr) +{ + return ioread16(addr); +} + +static unsigned int r32(const void __iomem *addr) +{ + return ioread32(addr); +} + +static int sh_clk_mstp_enable(struct clk *clk) +{ + sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk); + if (clk->status_reg) { + unsigned int (*read)(const void __iomem *addr); + int i; + void __iomem *mapped_status = (phys_addr_t)clk->status_reg - + (phys_addr_t)clk->enable_reg + clk->mapped_reg; + + if (clk->flags & CLK_ENABLE_REG_8BIT) + read = r8; + else if (clk->flags & CLK_ENABLE_REG_16BIT) + read = r16; + else + read = r32; + + for (i = 1000; + (read(mapped_status) & (1 << clk->enable_bit)) && i; + i--) + cpu_relax(); + if (!i) { + pr_err("cpg: failed to enable %p[%d]\n", + clk->enable_reg, clk->enable_bit); + return -ETIMEDOUT; + } + } + return 0; +} + +static void sh_clk_mstp_disable(struct clk *clk) +{ + sh_clk_write(sh_clk_read(clk) | (1 << clk->enable_bit), clk); +} + +static struct sh_clk_ops sh_clk_mstp_clk_ops = { + .enable = sh_clk_mstp_enable, + .disable = sh_clk_mstp_disable, + .recalc = followparent_recalc, +}; + +int __init sh_clk_mstp_register(struct clk *clks, int nr) +{ + struct clk *clkp; + int ret = 0; + int k; + + for (k = 0; !ret && (k < nr); k++) { + clkp = clks + k; + clkp->ops = &sh_clk_mstp_clk_ops; + ret |= clk_register(clkp); + } + + return ret; +} + +/* + * Div/mult table lookup helpers + */ +static inline struct clk_div_table *clk_to_div_table(struct clk *clk) +{ + return clk->priv; +} + +static inline struct clk_div_mult_table *clk_to_div_mult_table(struct clk *clk) +{ + return clk_to_div_table(clk)->div_mult_table; +} + +/* + * Common div ops + */ +static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate) +{ + return clk_rate_table_round(clk, clk->freq_table, rate); +} + +static unsigned long sh_clk_div_recalc(struct clk *clk) +{ + struct clk_div_mult_table *table = clk_to_div_mult_table(clk); + unsigned int idx; + + clk_rate_table_build(clk, clk->freq_table, table->nr_divisors, + table, clk->arch_flags ? &clk->arch_flags : NULL); + + idx = (sh_clk_read(clk) >> clk->enable_bit) & clk->div_mask; + + return clk->freq_table[idx].frequency; +} + +static int sh_clk_div_set_rate(struct clk *clk, unsigned long rate) +{ + struct clk_div_table *dt = clk_to_div_table(clk); + unsigned long value; + int idx; + + idx = clk_rate_table_find(clk, clk->freq_table, rate); + if (idx < 0) + return idx; + + value = sh_clk_read(clk); + value &= ~(clk->div_mask << clk->enable_bit); + value |= (idx << clk->enable_bit); + sh_clk_write(value, clk); + + /* XXX: Should use a post-change notifier */ + if (dt->kick) + dt->kick(clk); + + return 0; +} + +static int sh_clk_div_enable(struct clk *clk) +{ + if (clk->div_mask == SH_CLK_DIV6_MSK) { + int ret = sh_clk_div_set_rate(clk, clk->rate); + if (ret < 0) + return ret; + } + + sh_clk_write(sh_clk_read(clk) & ~CPG_CKSTP_BIT, clk); + return 0; +} + +static void sh_clk_div_disable(struct clk *clk) +{ + unsigned int val; + + val = sh_clk_read(clk); + val |= CPG_CKSTP_BIT; + + /* + * div6 clocks require the divisor field to be non-zero or the + * above CKSTP toggle silently fails. Ensure that the divisor + * array is reset to its initial state on disable. + */ + if (clk->flags & CLK_MASK_DIV_ON_DISABLE) + val |= clk->div_mask; + + sh_clk_write(val, clk); +} + +static struct sh_clk_ops sh_clk_div_clk_ops = { + .recalc = sh_clk_div_recalc, + .set_rate = sh_clk_div_set_rate, + .round_rate = sh_clk_div_round_rate, +}; + +static struct sh_clk_ops sh_clk_div_enable_clk_ops = { + .recalc = sh_clk_div_recalc, + .set_rate = sh_clk_div_set_rate, + .round_rate = sh_clk_div_round_rate, + .enable = sh_clk_div_enable, + .disable = sh_clk_div_disable, +}; + +static int __init sh_clk_init_parent(struct clk *clk) +{ + u32 val; + + if (clk->parent) + return 0; + + if (!clk->parent_table || !clk->parent_num) + return 0; + + if (!clk->src_width) { + pr_err("sh_clk_init_parent: cannot select parent clock\n"); + return -EINVAL; + } + + val = (sh_clk_read(clk) >> clk->src_shift); + val &= (1 << clk->src_width) - 1; + + if (val >= clk->parent_num) { + pr_err("sh_clk_init_parent: parent table size failed\n"); + return -EINVAL; + } + + clk_reparent(clk, clk->parent_table[val]); + if (!clk->parent) { + pr_err("sh_clk_init_parent: unable to set parent"); + return -EINVAL; + } + + return 0; +} + +static int __init sh_clk_div_register_ops(struct clk *clks, int nr, + struct clk_div_table *table, struct sh_clk_ops *ops) +{ + struct clk *clkp; + void *freq_table; + int nr_divs = table->div_mult_table->nr_divisors; + int freq_table_size = sizeof(struct cpufreq_frequency_table); + int ret = 0; + int k; + + freq_table_size *= (nr_divs + 1); + freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL); + if (!freq_table) { + pr_err("%s: unable to alloc memory\n", __func__); + return -ENOMEM; + } + + for (k = 0; !ret && (k < nr); k++) { + clkp = clks + k; + + clkp->ops = ops; + clkp->priv = table; + + clkp->freq_table = freq_table + (k * freq_table_size); + clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END; + + ret = clk_register(clkp); + if (ret == 0) + ret = sh_clk_init_parent(clkp); + } + + return ret; +} + +/* + * div6 support + */ +static int sh_clk_div6_divisors[64] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, + 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, + 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64 +}; + +static struct clk_div_mult_table div6_div_mult_table = { + .divisors = sh_clk_div6_divisors, + .nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors), +}; + +static struct clk_div_table sh_clk_div6_table = { + .div_mult_table = &div6_div_mult_table, +}; + +static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent) +{ + struct clk_div_mult_table *table = clk_to_div_mult_table(clk); + u32 value; + int ret, i; + + if (!clk->parent_table || !clk->parent_num) + return -EINVAL; + + /* Search the parent */ + for (i = 0; i < clk->parent_num; i++) + if (clk->parent_table[i] == parent) + break; + + if (i == clk->parent_num) + return -ENODEV; + + ret = clk_reparent(clk, parent); + if (ret < 0) + return ret; + + value = sh_clk_read(clk) & + ~(((1 << clk->src_width) - 1) << clk->src_shift); + + sh_clk_write(value | (i << clk->src_shift), clk); + + /* Rebuild the frequency table */ + clk_rate_table_build(clk, clk->freq_table, table->nr_divisors, + table, NULL); + + return 0; +} + +static struct sh_clk_ops sh_clk_div6_reparent_clk_ops = { + .recalc = sh_clk_div_recalc, + .round_rate = sh_clk_div_round_rate, + .set_rate = sh_clk_div_set_rate, + .enable = sh_clk_div_enable, + .disable = sh_clk_div_disable, + .set_parent = sh_clk_div6_set_parent, +}; + +int __init sh_clk_div6_register(struct clk *clks, int nr) +{ + return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table, + &sh_clk_div_enable_clk_ops); +} + +int __init sh_clk_div6_reparent_register(struct clk *clks, int nr) +{ + return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table, + &sh_clk_div6_reparent_clk_ops); +} + +/* + * div4 support + */ +static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent) +{ + struct clk_div_mult_table *table = clk_to_div_mult_table(clk); + u32 value; + int ret; + + /* we really need a better way to determine parent index, but for + * now assume internal parent comes with CLK_ENABLE_ON_INIT set, + * no CLK_ENABLE_ON_INIT means external clock... + */ + + if (parent->flags & CLK_ENABLE_ON_INIT) + value = sh_clk_read(clk) & ~(1 << 7); + else + value = sh_clk_read(clk) | (1 << 7); + + ret = clk_reparent(clk, parent); + if (ret < 0) + return ret; + + sh_clk_write(value, clk); + + /* Rebiuld the frequency table */ + clk_rate_table_build(clk, clk->freq_table, table->nr_divisors, + table, &clk->arch_flags); + + return 0; +} + +static struct sh_clk_ops sh_clk_div4_reparent_clk_ops = { + .recalc = sh_clk_div_recalc, + .set_rate = sh_clk_div_set_rate, + .round_rate = sh_clk_div_round_rate, + .enable = sh_clk_div_enable, + .disable = sh_clk_div_disable, + .set_parent = sh_clk_div4_set_parent, +}; + +int __init sh_clk_div4_register(struct clk *clks, int nr, + struct clk_div4_table *table) +{ + return sh_clk_div_register_ops(clks, nr, table, &sh_clk_div_clk_ops); +} + +int __init sh_clk_div4_enable_register(struct clk *clks, int nr, + struct clk_div4_table *table) +{ + return sh_clk_div_register_ops(clks, nr, table, + &sh_clk_div_enable_clk_ops); +} + +int __init sh_clk_div4_reparent_register(struct clk *clks, int nr, + struct clk_div4_table *table) +{ + return sh_clk_div_register_ops(clks, nr, table, + &sh_clk_div4_reparent_clk_ops); +} + +/* FSI-DIV */ +static unsigned long fsidiv_recalc(struct clk *clk) +{ + u32 value; + + value = __raw_readl(clk->mapping->base); + + value >>= 16; + if (value < 2) + return clk->parent->rate; + + return clk->parent->rate / value; +} + +static long fsidiv_round_rate(struct clk *clk, unsigned long rate) +{ + return clk_rate_div_range_round(clk, 1, 0xffff, rate); +} + +static void fsidiv_disable(struct clk *clk) +{ + __raw_writel(0, clk->mapping->base); +} + +static int fsidiv_enable(struct clk *clk) +{ + u32 value; + + value = __raw_readl(clk->mapping->base) >> 16; + if (value < 2) + return 0; + + __raw_writel((value << 16) | 0x3, clk->mapping->base); + + return 0; +} + +static int fsidiv_set_rate(struct clk *clk, unsigned long rate) +{ + int idx; + + idx = (clk->parent->rate / rate) & 0xffff; + if (idx < 2) + __raw_writel(0, clk->mapping->base); + else + __raw_writel(idx << 16, clk->mapping->base); + + return 0; +} + +static struct sh_clk_ops fsidiv_clk_ops = { + .recalc = fsidiv_recalc, + .round_rate = fsidiv_round_rate, + .set_rate = fsidiv_set_rate, + .enable = fsidiv_enable, + .disable = fsidiv_disable, +}; + +int __init sh_clk_fsidiv_register(struct clk *clks, int nr) +{ + struct clk_mapping *map; + int i; + + for (i = 0; i < nr; i++) { + + map = kzalloc(sizeof(struct clk_mapping), GFP_KERNEL); + if (!map) { + pr_err("%s: unable to alloc memory\n", __func__); + return -ENOMEM; + } + + /* clks[i].enable_reg came from SH_CLK_FSIDIV() */ + map->phys = (phys_addr_t)clks[i].enable_reg; + map->len = 8; + + clks[i].enable_reg = 0; /* remove .enable_reg */ + clks[i].ops = &fsidiv_clk_ops; + clks[i].mapping = map; + + clk_register(&clks[i]); + } + + return 0; +} diff --git a/drivers/sh/intc/Kconfig b/drivers/sh/intc/Kconfig new file mode 100644 index 000000000..6a1b05ddc --- /dev/null +++ b/drivers/sh/intc/Kconfig @@ -0,0 +1,43 @@ +config SH_INTC + bool + select IRQ_DOMAIN + +if SH_INTC + +comment "Interrupt controller options" + +config INTC_USERIMASK + bool "Userspace interrupt masking support" + depends on (SUPERH && CPU_SH4A) || COMPILE_TEST + help + This enables support for hardware-assisted userspace hardirq + masking. + + SH-4A and newer interrupt blocks all support a special shadowed + page with all non-masking registers obscured when mapped in to + userspace. This is primarily for use by userspace device + drivers that are using special priority levels. + + If in doubt, say N. + +config INTC_BALANCING + bool "Hardware IRQ balancing support" + depends on SMP && SUPERH && CPU_SHX3 + help + This enables support for IRQ auto-distribution mode on SH-X3 + SMP parts. All of the balancing and CPU wakeup decisions are + taken care of automatically by hardware for distributed + vectors. + + If in doubt, say N. + +config INTC_MAPPING_DEBUG + bool "Expose IRQ to per-controller id mapping via debugfs" + depends on DEBUG_FS + help + This will create a debugfs entry for showing the relationship + between system IRQs and the per-controller id tables. + + If in doubt, say N. + +endif diff --git a/drivers/sh/intc/Makefile b/drivers/sh/intc/Makefile new file mode 100644 index 000000000..54ec2a064 --- /dev/null +++ b/drivers/sh/intc/Makefile @@ -0,0 +1,5 @@ +obj-y := access.o chip.o core.o handle.o irqdomain.o virq.o + +obj-$(CONFIG_INTC_BALANCING) += balancing.o +obj-$(CONFIG_INTC_USERIMASK) += userimask.o +obj-$(CONFIG_INTC_MAPPING_DEBUG) += virq-debugfs.o diff --git a/drivers/sh/intc/access.c b/drivers/sh/intc/access.c new file mode 100644 index 000000000..114390f96 --- /dev/null +++ b/drivers/sh/intc/access.c @@ -0,0 +1,246 @@ +/* + * Common INTC2 register accessors + * + * Copyright (C) 2007, 2008 Magnus Damm + * Copyright (C) 2009, 2010 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/io.h> +#include "internals.h" + +unsigned long intc_phys_to_virt(struct intc_desc_int *d, unsigned long address) +{ + struct intc_window *window; + int k; + + /* scan through physical windows and convert address */ + for (k = 0; k < d->nr_windows; k++) { + window = d->window + k; + + if (address < window->phys) + continue; + + if (address >= (window->phys + window->size)) + continue; + + address -= window->phys; + address += (unsigned long)window->virt; + + return address; + } + + /* no windows defined, register must be 1:1 mapped virt:phys */ + return address; +} + +unsigned int intc_get_reg(struct intc_desc_int *d, unsigned long address) +{ + unsigned int k; + + address = intc_phys_to_virt(d, address); + + for (k = 0; k < d->nr_reg; k++) { + if (d->reg[k] == address) + return k; + } + + BUG(); + return 0; +} + +unsigned int intc_set_field_from_handle(unsigned int value, + unsigned int field_value, + unsigned int handle) +{ + unsigned int width = _INTC_WIDTH(handle); + unsigned int shift = _INTC_SHIFT(handle); + + value &= ~(((1 << width) - 1) << shift); + value |= field_value << shift; + return value; +} + +unsigned long intc_get_field_from_handle(unsigned int value, unsigned int handle) +{ + unsigned int width = _INTC_WIDTH(handle); + unsigned int shift = _INTC_SHIFT(handle); + unsigned int mask = ((1 << width) - 1) << shift; + + return (value & mask) >> shift; +} + +static unsigned long test_8(unsigned long addr, unsigned long h, + unsigned long ignore) +{ + void __iomem *ptr = (void __iomem *)addr; + return intc_get_field_from_handle(__raw_readb(ptr), h); +} + +static unsigned long test_16(unsigned long addr, unsigned long h, + unsigned long ignore) +{ + void __iomem *ptr = (void __iomem *)addr; + return intc_get_field_from_handle(__raw_readw(ptr), h); +} + +static unsigned long test_32(unsigned long addr, unsigned long h, + unsigned long ignore) +{ + void __iomem *ptr = (void __iomem *)addr; + return intc_get_field_from_handle(__raw_readl(ptr), h); +} + +static unsigned long write_8(unsigned long addr, unsigned long h, + unsigned long data) +{ + void __iomem *ptr = (void __iomem *)addr; + __raw_writeb(intc_set_field_from_handle(0, data, h), ptr); + (void)__raw_readb(ptr); /* Defeat write posting */ + return 0; +} + +static unsigned long write_16(unsigned long addr, unsigned long h, + unsigned long data) +{ + void __iomem *ptr = (void __iomem *)addr; + __raw_writew(intc_set_field_from_handle(0, data, h), ptr); + (void)__raw_readw(ptr); /* Defeat write posting */ + return 0; +} + +static unsigned long write_32(unsigned long addr, unsigned long h, + unsigned long data) +{ + void __iomem *ptr = (void __iomem *)addr; + __raw_writel(intc_set_field_from_handle(0, data, h), ptr); + (void)__raw_readl(ptr); /* Defeat write posting */ + return 0; +} + +static unsigned long modify_8(unsigned long addr, unsigned long h, + unsigned long data) +{ + void __iomem *ptr = (void __iomem *)addr; + unsigned long flags; + unsigned int value; + local_irq_save(flags); + value = intc_set_field_from_handle(__raw_readb(ptr), data, h); + __raw_writeb(value, ptr); + (void)__raw_readb(ptr); /* Defeat write posting */ + local_irq_restore(flags); + return 0; +} + +static unsigned long modify_16(unsigned long addr, unsigned long h, + unsigned long data) +{ + void __iomem *ptr = (void __iomem *)addr; + unsigned long flags; + unsigned int value; + local_irq_save(flags); + value = intc_set_field_from_handle(__raw_readw(ptr), data, h); + __raw_writew(value, ptr); + (void)__raw_readw(ptr); /* Defeat write posting */ + local_irq_restore(flags); + return 0; +} + +static unsigned long modify_32(unsigned long addr, unsigned long h, + unsigned long data) +{ + void __iomem *ptr = (void __iomem *)addr; + unsigned long flags; + unsigned int value; + local_irq_save(flags); + value = intc_set_field_from_handle(__raw_readl(ptr), data, h); + __raw_writel(value, ptr); + (void)__raw_readl(ptr); /* Defeat write posting */ + local_irq_restore(flags); + return 0; +} + +static unsigned long intc_mode_field(unsigned long addr, + unsigned long handle, + unsigned long (*fn)(unsigned long, + unsigned long, + unsigned long), + unsigned int irq) +{ + return fn(addr, handle, ((1 << _INTC_WIDTH(handle)) - 1)); +} + +static unsigned long intc_mode_zero(unsigned long addr, + unsigned long handle, + unsigned long (*fn)(unsigned long, + unsigned long, + unsigned long), + unsigned int irq) +{ + return fn(addr, handle, 0); +} + +static unsigned long intc_mode_prio(unsigned long addr, + unsigned long handle, + unsigned long (*fn)(unsigned long, + unsigned long, + unsigned long), + unsigned int irq) +{ + return fn(addr, handle, intc_get_prio_level(irq)); +} + +unsigned long (*intc_reg_fns[])(unsigned long addr, + unsigned long h, + unsigned long data) = { + [REG_FN_TEST_BASE + 0] = test_8, + [REG_FN_TEST_BASE + 1] = test_16, + [REG_FN_TEST_BASE + 3] = test_32, + [REG_FN_WRITE_BASE + 0] = write_8, + [REG_FN_WRITE_BASE + 1] = write_16, + [REG_FN_WRITE_BASE + 3] = write_32, + [REG_FN_MODIFY_BASE + 0] = modify_8, + [REG_FN_MODIFY_BASE + 1] = modify_16, + [REG_FN_MODIFY_BASE + 3] = modify_32, +}; + +unsigned long (*intc_enable_fns[])(unsigned long addr, + unsigned long handle, + unsigned long (*fn)(unsigned long, + unsigned long, + unsigned long), + unsigned int irq) = { + [MODE_ENABLE_REG] = intc_mode_field, + [MODE_MASK_REG] = intc_mode_zero, + [MODE_DUAL_REG] = intc_mode_field, + [MODE_PRIO_REG] = intc_mode_prio, + [MODE_PCLR_REG] = intc_mode_prio, +}; + +unsigned long (*intc_disable_fns[])(unsigned long addr, + unsigned long handle, + unsigned long (*fn)(unsigned long, + unsigned long, + unsigned long), + unsigned int irq) = { + [MODE_ENABLE_REG] = intc_mode_zero, + [MODE_MASK_REG] = intc_mode_field, + [MODE_DUAL_REG] = intc_mode_field, + [MODE_PRIO_REG] = intc_mode_zero, + [MODE_PCLR_REG] = intc_mode_field, +}; + +unsigned long (*intc_enable_noprio_fns[])(unsigned long addr, + unsigned long handle, + unsigned long (*fn)(unsigned long, + unsigned long, + unsigned long), + unsigned int irq) = { + [MODE_ENABLE_REG] = intc_mode_field, + [MODE_MASK_REG] = intc_mode_zero, + [MODE_DUAL_REG] = intc_mode_field, + [MODE_PRIO_REG] = intc_mode_field, + [MODE_PCLR_REG] = intc_mode_field, +}; diff --git a/drivers/sh/intc/balancing.c b/drivers/sh/intc/balancing.c new file mode 100644 index 000000000..bc780807c --- /dev/null +++ b/drivers/sh/intc/balancing.c @@ -0,0 +1,97 @@ +/* + * Support for hardware-managed IRQ auto-distribution. + * + * Copyright (C) 2010 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include "internals.h" + +static unsigned long dist_handle[INTC_NR_IRQS]; + +void intc_balancing_enable(unsigned int irq) +{ + struct intc_desc_int *d = get_intc_desc(irq); + unsigned long handle = dist_handle[irq]; + unsigned long addr; + + if (irq_balancing_disabled(irq) || !handle) + return; + + addr = INTC_REG(d, _INTC_ADDR_D(handle), 0); + intc_reg_fns[_INTC_FN(handle)](addr, handle, 1); +} + +void intc_balancing_disable(unsigned int irq) +{ + struct intc_desc_int *d = get_intc_desc(irq); + unsigned long handle = dist_handle[irq]; + unsigned long addr; + + if (irq_balancing_disabled(irq) || !handle) + return; + + addr = INTC_REG(d, _INTC_ADDR_D(handle), 0); + intc_reg_fns[_INTC_FN(handle)](addr, handle, 0); +} + +static unsigned int intc_dist_data(struct intc_desc *desc, + struct intc_desc_int *d, + intc_enum enum_id) +{ + struct intc_mask_reg *mr = desc->hw.mask_regs; + unsigned int i, j, fn, mode; + unsigned long reg_e, reg_d; + + for (i = 0; mr && enum_id && i < desc->hw.nr_mask_regs; i++) { + mr = desc->hw.mask_regs + i; + + /* + * Skip this entry if there's no auto-distribution + * register associated with it. + */ + if (!mr->dist_reg) + continue; + + for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) { + if (mr->enum_ids[j] != enum_id) + continue; + + fn = REG_FN_MODIFY_BASE; + mode = MODE_ENABLE_REG; + reg_e = mr->dist_reg; + reg_d = mr->dist_reg; + + fn += (mr->reg_width >> 3) - 1; + return _INTC_MK(fn, mode, + intc_get_reg(d, reg_e), + intc_get_reg(d, reg_d), + 1, + (mr->reg_width - 1) - j); + } + } + + /* + * It's possible we've gotten here with no distribution options + * available for the IRQ in question, so we just skip over those. + */ + return 0; +} + +void intc_set_dist_handle(unsigned int irq, struct intc_desc *desc, + struct intc_desc_int *d, intc_enum id) +{ + unsigned long flags; + + /* + * Nothing to do for this IRQ. + */ + if (!desc->hw.mask_regs) + return; + + raw_spin_lock_irqsave(&intc_big_lock, flags); + dist_handle[irq] = intc_dist_data(desc, d, id); + raw_spin_unlock_irqrestore(&intc_big_lock, flags); +} diff --git a/drivers/sh/intc/chip.c b/drivers/sh/intc/chip.c new file mode 100644 index 000000000..46427b48e --- /dev/null +++ b/drivers/sh/intc/chip.c @@ -0,0 +1,211 @@ +/* + * IRQ chip definitions for INTC IRQs. + * + * Copyright (C) 2007, 2008 Magnus Damm + * Copyright (C) 2009 - 2012 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/cpumask.h> +#include <linux/bsearch.h> +#include <linux/io.h> +#include "internals.h" + +void _intc_enable(struct irq_data *data, unsigned long handle) +{ + unsigned int irq = data->irq; + struct intc_desc_int *d = get_intc_desc(irq); + unsigned long addr; + unsigned int cpu; + + for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) { +#ifdef CONFIG_SMP + if (!cpumask_test_cpu(cpu, data->affinity)) + continue; +#endif + addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu); + intc_enable_fns[_INTC_MODE(handle)](addr, handle, intc_reg_fns\ + [_INTC_FN(handle)], irq); + } + + intc_balancing_enable(irq); +} + +static void intc_enable(struct irq_data *data) +{ + _intc_enable(data, (unsigned long)irq_data_get_irq_chip_data(data)); +} + +static void intc_disable(struct irq_data *data) +{ + unsigned int irq = data->irq; + struct intc_desc_int *d = get_intc_desc(irq); + unsigned long handle = (unsigned long)irq_data_get_irq_chip_data(data); + unsigned long addr; + unsigned int cpu; + + intc_balancing_disable(irq); + + for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) { +#ifdef CONFIG_SMP + if (!cpumask_test_cpu(cpu, data->affinity)) + continue; +#endif + addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu); + intc_disable_fns[_INTC_MODE(handle)](addr, handle,intc_reg_fns\ + [_INTC_FN(handle)], irq); + } +} + +#ifdef CONFIG_SMP +/* + * This is held with the irq desc lock held, so we don't require any + * additional locking here at the intc desc level. The affinity mask is + * later tested in the enable/disable paths. + */ +static int intc_set_affinity(struct irq_data *data, + const struct cpumask *cpumask, + bool force) +{ + if (!cpumask_intersects(cpumask, cpu_online_mask)) + return -1; + + cpumask_copy(data->affinity, cpumask); + + return IRQ_SET_MASK_OK_NOCOPY; +} +#endif + +static void intc_mask_ack(struct irq_data *data) +{ + unsigned int irq = data->irq; + struct intc_desc_int *d = get_intc_desc(irq); + unsigned long handle = intc_get_ack_handle(irq); + void __iomem *addr; + + intc_disable(data); + + /* read register and write zero only to the associated bit */ + if (handle) { + unsigned int value; + + addr = (void __iomem *)INTC_REG(d, _INTC_ADDR_D(handle), 0); + value = intc_set_field_from_handle(0, 1, handle); + + switch (_INTC_FN(handle)) { + case REG_FN_MODIFY_BASE + 0: /* 8bit */ + __raw_readb(addr); + __raw_writeb(0xff ^ value, addr); + break; + case REG_FN_MODIFY_BASE + 1: /* 16bit */ + __raw_readw(addr); + __raw_writew(0xffff ^ value, addr); + break; + case REG_FN_MODIFY_BASE + 3: /* 32bit */ + __raw_readl(addr); + __raw_writel(0xffffffff ^ value, addr); + break; + default: + BUG(); + break; + } + } +} + +static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp, + unsigned int nr_hp, + unsigned int irq) +{ + struct intc_handle_int key; + + key.irq = irq; + key.handle = 0; + + return bsearch(&key, hp, nr_hp, sizeof(*hp), intc_handle_int_cmp); +} + +int intc_set_priority(unsigned int irq, unsigned int prio) +{ + struct intc_desc_int *d = get_intc_desc(irq); + struct irq_data *data = irq_get_irq_data(irq); + struct intc_handle_int *ihp; + + if (!intc_get_prio_level(irq) || prio <= 1) + return -EINVAL; + + ihp = intc_find_irq(d->prio, d->nr_prio, irq); + if (ihp) { + if (prio >= (1 << _INTC_WIDTH(ihp->handle))) + return -EINVAL; + + intc_set_prio_level(irq, prio); + + /* + * only set secondary masking method directly + * primary masking method is using intc_prio_level[irq] + * priority level will be set during next enable() + */ + if (_INTC_FN(ihp->handle) != REG_FN_ERR) + _intc_enable(data, ihp->handle); + } + return 0; +} + +#define SENSE_VALID_FLAG 0x80 +#define VALID(x) (x | SENSE_VALID_FLAG) + +static unsigned char intc_irq_sense_table[IRQ_TYPE_SENSE_MASK + 1] = { + [IRQ_TYPE_EDGE_FALLING] = VALID(0), + [IRQ_TYPE_EDGE_RISING] = VALID(1), + [IRQ_TYPE_LEVEL_LOW] = VALID(2), + /* SH7706, SH7707 and SH7709 do not support high level triggered */ +#if !defined(CONFIG_CPU_SUBTYPE_SH7706) && \ + !defined(CONFIG_CPU_SUBTYPE_SH7707) && \ + !defined(CONFIG_CPU_SUBTYPE_SH7709) + [IRQ_TYPE_LEVEL_HIGH] = VALID(3), +#endif +#if defined(CONFIG_ARM) /* all recent SH-Mobile / R-Mobile ARM support this */ + [IRQ_TYPE_EDGE_BOTH] = VALID(4), +#endif +}; + +static int intc_set_type(struct irq_data *data, unsigned int type) +{ + unsigned int irq = data->irq; + struct intc_desc_int *d = get_intc_desc(irq); + unsigned char value = intc_irq_sense_table[type & IRQ_TYPE_SENSE_MASK]; + struct intc_handle_int *ihp; + unsigned long addr; + + if (!value) + return -EINVAL; + + value &= ~SENSE_VALID_FLAG; + + ihp = intc_find_irq(d->sense, d->nr_sense, irq); + if (ihp) { + /* PINT has 2-bit sense registers, should fail on EDGE_BOTH */ + if (value >= (1 << _INTC_WIDTH(ihp->handle))) + return -EINVAL; + + addr = INTC_REG(d, _INTC_ADDR_E(ihp->handle), 0); + intc_reg_fns[_INTC_FN(ihp->handle)](addr, ihp->handle, value); + } + + return 0; +} + +struct irq_chip intc_irq_chip = { + .irq_mask = intc_disable, + .irq_unmask = intc_enable, + .irq_mask_ack = intc_mask_ack, + .irq_enable = intc_enable, + .irq_disable = intc_disable, + .irq_set_type = intc_set_type, +#ifdef CONFIG_SMP + .irq_set_affinity = intc_set_affinity, +#endif + .flags = IRQCHIP_SKIP_SET_WAKE, +}; diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c new file mode 100644 index 000000000..81f22980b --- /dev/null +++ b/drivers/sh/intc/core.c @@ -0,0 +1,511 @@ +/* + * Shared interrupt handling code for IPR and INTC2 types of IRQs. + * + * Copyright (C) 2007, 2008 Magnus Damm + * Copyright (C) 2009 - 2012 Paul Mundt + * + * Based on intc2.c and ipr.c + * + * Copyright (C) 1999 Niibe Yutaka & Takeshi Yaegashi + * Copyright (C) 2000 Kazumoto Kojima + * Copyright (C) 2001 David J. Mckay (david.mckay@st.com) + * Copyright (C) 2003 Takashi Kusuda <kusuda-takashi@hitachi-ul.co.jp> + * Copyright (C) 2005, 2006 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#define pr_fmt(fmt) "intc: " fmt + +#include <linux/init.h> +#include <linux/irq.h> +#include <linux/io.h> +#include <linux/slab.h> +#include <linux/stat.h> +#include <linux/interrupt.h> +#include <linux/sh_intc.h> +#include <linux/irqdomain.h> +#include <linux/device.h> +#include <linux/syscore_ops.h> +#include <linux/list.h> +#include <linux/spinlock.h> +#include <linux/radix-tree.h> +#include <linux/export.h> +#include <linux/sort.h> +#include "internals.h" + +LIST_HEAD(intc_list); +DEFINE_RAW_SPINLOCK(intc_big_lock); +static unsigned int nr_intc_controllers; + +/* + * Default priority level + * - this needs to be at least 2 for 5-bit priorities on 7780 + */ +static unsigned int default_prio_level = 2; /* 2 - 16 */ +static unsigned int intc_prio_level[INTC_NR_IRQS]; /* for now */ + +unsigned int intc_get_dfl_prio_level(void) +{ + return default_prio_level; +} + +unsigned int intc_get_prio_level(unsigned int irq) +{ + return intc_prio_level[irq]; +} + +void intc_set_prio_level(unsigned int irq, unsigned int level) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&intc_big_lock, flags); + intc_prio_level[irq] = level; + raw_spin_unlock_irqrestore(&intc_big_lock, flags); +} + +static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc) +{ + generic_handle_irq((unsigned int)irq_get_handler_data(irq)); +} + +static void __init intc_register_irq(struct intc_desc *desc, + struct intc_desc_int *d, + intc_enum enum_id, + unsigned int irq) +{ + struct intc_handle_int *hp; + struct irq_data *irq_data; + unsigned int data[2], primary; + unsigned long flags; + + raw_spin_lock_irqsave(&intc_big_lock, flags); + radix_tree_insert(&d->tree, enum_id, intc_irq_xlate_get(irq)); + raw_spin_unlock_irqrestore(&intc_big_lock, flags); + + /* + * Prefer single interrupt source bitmap over other combinations: + * + * 1. bitmap, single interrupt source + * 2. priority, single interrupt source + * 3. bitmap, multiple interrupt sources (groups) + * 4. priority, multiple interrupt sources (groups) + */ + data[0] = intc_get_mask_handle(desc, d, enum_id, 0); + data[1] = intc_get_prio_handle(desc, d, enum_id, 0); + + primary = 0; + if (!data[0] && data[1]) + primary = 1; + + if (!data[0] && !data[1]) + pr_warning("missing unique irq mask for irq %d (vect 0x%04x)\n", + irq, irq2evt(irq)); + + data[0] = data[0] ? data[0] : intc_get_mask_handle(desc, d, enum_id, 1); + data[1] = data[1] ? data[1] : intc_get_prio_handle(desc, d, enum_id, 1); + + if (!data[primary]) + primary ^= 1; + + BUG_ON(!data[primary]); /* must have primary masking method */ + + irq_data = irq_get_irq_data(irq); + + disable_irq_nosync(irq); + irq_set_chip_and_handler_name(irq, &d->chip, handle_level_irq, + "level"); + irq_set_chip_data(irq, (void *)data[primary]); + + /* + * set priority level + */ + intc_set_prio_level(irq, intc_get_dfl_prio_level()); + + /* enable secondary masking method if present */ + if (data[!primary]) + _intc_enable(irq_data, data[!primary]); + + /* add irq to d->prio list if priority is available */ + if (data[1]) { + hp = d->prio + d->nr_prio; + hp->irq = irq; + hp->handle = data[1]; + + if (primary) { + /* + * only secondary priority should access registers, so + * set _INTC_FN(h) = REG_FN_ERR for intc_set_priority() + */ + hp->handle &= ~_INTC_MK(0x0f, 0, 0, 0, 0, 0); + hp->handle |= _INTC_MK(REG_FN_ERR, 0, 0, 0, 0, 0); + } + d->nr_prio++; + } + + /* add irq to d->sense list if sense is available */ + data[0] = intc_get_sense_handle(desc, d, enum_id); + if (data[0]) { + (d->sense + d->nr_sense)->irq = irq; + (d->sense + d->nr_sense)->handle = data[0]; + d->nr_sense++; + } + + /* irq should be disabled by default */ + d->chip.irq_mask(irq_data); + + intc_set_ack_handle(irq, desc, d, enum_id); + intc_set_dist_handle(irq, desc, d, enum_id); + + activate_irq(irq); +} + +static unsigned int __init save_reg(struct intc_desc_int *d, + unsigned int cnt, + unsigned long value, + unsigned int smp) +{ + if (value) { + value = intc_phys_to_virt(d, value); + + d->reg[cnt] = value; +#ifdef CONFIG_SMP + d->smp[cnt] = smp; +#endif + return 1; + } + + return 0; +} + +int __init register_intc_controller(struct intc_desc *desc) +{ + unsigned int i, k, smp; + struct intc_hw_desc *hw = &desc->hw; + struct intc_desc_int *d; + struct resource *res; + + pr_info("Registered controller '%s' with %u IRQs\n", + desc->name, hw->nr_vectors); + + d = kzalloc(sizeof(*d), GFP_NOWAIT); + if (!d) + goto err0; + + INIT_LIST_HEAD(&d->list); + list_add_tail(&d->list, &intc_list); + + raw_spin_lock_init(&d->lock); + INIT_RADIX_TREE(&d->tree, GFP_ATOMIC); + + d->index = nr_intc_controllers; + + if (desc->num_resources) { + d->nr_windows = desc->num_resources; + d->window = kzalloc(d->nr_windows * sizeof(*d->window), + GFP_NOWAIT); + if (!d->window) + goto err1; + + for (k = 0; k < d->nr_windows; k++) { + res = desc->resource + k; + WARN_ON(resource_type(res) != IORESOURCE_MEM); + d->window[k].phys = res->start; + d->window[k].size = resource_size(res); + d->window[k].virt = ioremap_nocache(res->start, + resource_size(res)); + if (!d->window[k].virt) + goto err2; + } + } + + d->nr_reg = hw->mask_regs ? hw->nr_mask_regs * 2 : 0; +#ifdef CONFIG_INTC_BALANCING + if (d->nr_reg) + d->nr_reg += hw->nr_mask_regs; +#endif + d->nr_reg += hw->prio_regs ? hw->nr_prio_regs * 2 : 0; + d->nr_reg += hw->sense_regs ? hw->nr_sense_regs : 0; + d->nr_reg += hw->ack_regs ? hw->nr_ack_regs : 0; + d->nr_reg += hw->subgroups ? hw->nr_subgroups : 0; + + d->reg = kzalloc(d->nr_reg * sizeof(*d->reg), GFP_NOWAIT); + if (!d->reg) + goto err2; + +#ifdef CONFIG_SMP + d->smp = kzalloc(d->nr_reg * sizeof(*d->smp), GFP_NOWAIT); + if (!d->smp) + goto err3; +#endif + k = 0; + + if (hw->mask_regs) { + for (i = 0; i < hw->nr_mask_regs; i++) { + smp = IS_SMP(hw->mask_regs[i]); + k += save_reg(d, k, hw->mask_regs[i].set_reg, smp); + k += save_reg(d, k, hw->mask_regs[i].clr_reg, smp); +#ifdef CONFIG_INTC_BALANCING + k += save_reg(d, k, hw->mask_regs[i].dist_reg, 0); +#endif + } + } + + if (hw->prio_regs) { + d->prio = kzalloc(hw->nr_vectors * sizeof(*d->prio), + GFP_NOWAIT); + if (!d->prio) + goto err4; + + for (i = 0; i < hw->nr_prio_regs; i++) { + smp = IS_SMP(hw->prio_regs[i]); + k += save_reg(d, k, hw->prio_regs[i].set_reg, smp); + k += save_reg(d, k, hw->prio_regs[i].clr_reg, smp); + } + + sort(d->prio, hw->nr_prio_regs, sizeof(*d->prio), + intc_handle_int_cmp, NULL); + } + + if (hw->sense_regs) { + d->sense = kzalloc(hw->nr_vectors * sizeof(*d->sense), + GFP_NOWAIT); + if (!d->sense) + goto err5; + + for (i = 0; i < hw->nr_sense_regs; i++) + k += save_reg(d, k, hw->sense_regs[i].reg, 0); + + sort(d->sense, hw->nr_sense_regs, sizeof(*d->sense), + intc_handle_int_cmp, NULL); + } + + if (hw->subgroups) + for (i = 0; i < hw->nr_subgroups; i++) + if (hw->subgroups[i].reg) + k+= save_reg(d, k, hw->subgroups[i].reg, 0); + + memcpy(&d->chip, &intc_irq_chip, sizeof(struct irq_chip)); + d->chip.name = desc->name; + + if (hw->ack_regs) + for (i = 0; i < hw->nr_ack_regs; i++) + k += save_reg(d, k, hw->ack_regs[i].set_reg, 0); + else + d->chip.irq_mask_ack = d->chip.irq_disable; + + /* disable bits matching force_disable before registering irqs */ + if (desc->force_disable) + intc_enable_disable_enum(desc, d, desc->force_disable, 0); + + /* disable bits matching force_enable before registering irqs */ + if (desc->force_enable) + intc_enable_disable_enum(desc, d, desc->force_enable, 0); + + BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */ + + intc_irq_domain_init(d, hw); + + /* register the vectors one by one */ + for (i = 0; i < hw->nr_vectors; i++) { + struct intc_vect *vect = hw->vectors + i; + unsigned int irq = evt2irq(vect->vect); + int res; + + if (!vect->enum_id) + continue; + + res = irq_create_identity_mapping(d->domain, irq); + if (unlikely(res)) { + if (res == -EEXIST) { + res = irq_domain_associate(d->domain, irq, irq); + if (unlikely(res)) { + pr_err("domain association failure\n"); + continue; + } + } else { + pr_err("can't identity map IRQ %d\n", irq); + continue; + } + } + + intc_irq_xlate_set(irq, vect->enum_id, d); + intc_register_irq(desc, d, vect->enum_id, irq); + + for (k = i + 1; k < hw->nr_vectors; k++) { + struct intc_vect *vect2 = hw->vectors + k; + unsigned int irq2 = evt2irq(vect2->vect); + + if (vect->enum_id != vect2->enum_id) + continue; + + /* + * In the case of multi-evt handling and sparse + * IRQ support, each vector still needs to have + * its own backing irq_desc. + */ + res = irq_create_identity_mapping(d->domain, irq2); + if (unlikely(res)) { + if (res == -EEXIST) { + res = irq_domain_associate(d->domain, + irq2, irq2); + if (unlikely(res)) { + pr_err("domain association " + "failure\n"); + continue; + } + } else { + pr_err("can't identity map IRQ %d\n", + irq); + continue; + } + } + + vect2->enum_id = 0; + + /* redirect this interrupts to the first one */ + irq_set_chip(irq2, &dummy_irq_chip); + irq_set_chained_handler(irq2, intc_redirect_irq); + irq_set_handler_data(irq2, (void *)irq); + } + } + + intc_subgroup_init(desc, d); + + /* enable bits matching force_enable after registering irqs */ + if (desc->force_enable) + intc_enable_disable_enum(desc, d, desc->force_enable, 1); + + d->skip_suspend = desc->skip_syscore_suspend; + + nr_intc_controllers++; + + return 0; +err5: + kfree(d->prio); +err4: +#ifdef CONFIG_SMP + kfree(d->smp); +err3: +#endif + kfree(d->reg); +err2: + for (k = 0; k < d->nr_windows; k++) + if (d->window[k].virt) + iounmap(d->window[k].virt); + + kfree(d->window); +err1: + kfree(d); +err0: + pr_err("unable to allocate INTC memory\n"); + + return -ENOMEM; +} + +static int intc_suspend(void) +{ + struct intc_desc_int *d; + + list_for_each_entry(d, &intc_list, list) { + int irq; + + if (d->skip_suspend) + continue; + + /* enable wakeup irqs belonging to this intc controller */ + for_each_active_irq(irq) { + struct irq_data *data; + struct irq_chip *chip; + + data = irq_get_irq_data(irq); + chip = irq_data_get_irq_chip(data); + if (chip != &d->chip) + continue; + if (irqd_is_wakeup_set(data)) + chip->irq_enable(data); + } + } + return 0; +} + +static void intc_resume(void) +{ + struct intc_desc_int *d; + + list_for_each_entry(d, &intc_list, list) { + int irq; + + if (d->skip_suspend) + continue; + + for_each_active_irq(irq) { + struct irq_data *data; + struct irq_chip *chip; + + data = irq_get_irq_data(irq); + chip = irq_data_get_irq_chip(data); + /* + * This will catch the redirect and VIRQ cases + * due to the dummy_irq_chip being inserted. + */ + if (chip != &d->chip) + continue; + if (irqd_irq_disabled(data)) + chip->irq_disable(data); + else + chip->irq_enable(data); + } + } +} + +struct syscore_ops intc_syscore_ops = { + .suspend = intc_suspend, + .resume = intc_resume, +}; + +struct bus_type intc_subsys = { + .name = "intc", + .dev_name = "intc", +}; + +static ssize_t +show_intc_name(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct intc_desc_int *d; + + d = container_of(dev, struct intc_desc_int, dev); + + return sprintf(buf, "%s\n", d->chip.name); +} + +static DEVICE_ATTR(name, S_IRUGO, show_intc_name, NULL); + +static int __init register_intc_devs(void) +{ + struct intc_desc_int *d; + int error; + + register_syscore_ops(&intc_syscore_ops); + + error = subsys_system_register(&intc_subsys, NULL); + if (!error) { + list_for_each_entry(d, &intc_list, list) { + d->dev.id = d->index; + d->dev.bus = &intc_subsys; + error = device_register(&d->dev); + if (error == 0) + error = device_create_file(&d->dev, + &dev_attr_name); + if (error) + break; + } + } + + if (error) + pr_err("device registration error\n"); + + return error; +} +device_initcall(register_intc_devs); diff --git a/drivers/sh/intc/handle.c b/drivers/sh/intc/handle.c new file mode 100644 index 000000000..7863a4491 --- /dev/null +++ b/drivers/sh/intc/handle.c @@ -0,0 +1,306 @@ +/* + * Shared interrupt handling code for IPR and INTC2 types of IRQs. + * + * Copyright (C) 2007, 2008 Magnus Damm + * Copyright (C) 2009, 2010 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/init.h> +#include <linux/irq.h> +#include <linux/spinlock.h> +#include "internals.h" + +static unsigned long ack_handle[INTC_NR_IRQS]; + +static intc_enum __init intc_grp_id(struct intc_desc *desc, + intc_enum enum_id) +{ + struct intc_group *g = desc->hw.groups; + unsigned int i, j; + + for (i = 0; g && enum_id && i < desc->hw.nr_groups; i++) { + g = desc->hw.groups + i; + + for (j = 0; g->enum_ids[j]; j++) { + if (g->enum_ids[j] != enum_id) + continue; + + return g->enum_id; + } + } + + return 0; +} + +static unsigned int __init _intc_mask_data(struct intc_desc *desc, + struct intc_desc_int *d, + intc_enum enum_id, + unsigned int *reg_idx, + unsigned int *fld_idx) +{ + struct intc_mask_reg *mr = desc->hw.mask_regs; + unsigned int fn, mode; + unsigned long reg_e, reg_d; + + while (mr && enum_id && *reg_idx < desc->hw.nr_mask_regs) { + mr = desc->hw.mask_regs + *reg_idx; + + for (; *fld_idx < ARRAY_SIZE(mr->enum_ids); (*fld_idx)++) { + if (mr->enum_ids[*fld_idx] != enum_id) + continue; + + if (mr->set_reg && mr->clr_reg) { + fn = REG_FN_WRITE_BASE; + mode = MODE_DUAL_REG; + reg_e = mr->clr_reg; + reg_d = mr->set_reg; + } else { + fn = REG_FN_MODIFY_BASE; + if (mr->set_reg) { + mode = MODE_ENABLE_REG; + reg_e = mr->set_reg; + reg_d = mr->set_reg; + } else { + mode = MODE_MASK_REG; + reg_e = mr->clr_reg; + reg_d = mr->clr_reg; + } + } + + fn += (mr->reg_width >> 3) - 1; + return _INTC_MK(fn, mode, + intc_get_reg(d, reg_e), + intc_get_reg(d, reg_d), + 1, + (mr->reg_width - 1) - *fld_idx); + } + + *fld_idx = 0; + (*reg_idx)++; + } + + return 0; +} + +unsigned int __init +intc_get_mask_handle(struct intc_desc *desc, struct intc_desc_int *d, + intc_enum enum_id, int do_grps) +{ + unsigned int i = 0; + unsigned int j = 0; + unsigned int ret; + + ret = _intc_mask_data(desc, d, enum_id, &i, &j); + if (ret) + return ret; + + if (do_grps) + return intc_get_mask_handle(desc, d, intc_grp_id(desc, enum_id), 0); + + return 0; +} + +static unsigned int __init _intc_prio_data(struct intc_desc *desc, + struct intc_desc_int *d, + intc_enum enum_id, + unsigned int *reg_idx, + unsigned int *fld_idx) +{ + struct intc_prio_reg *pr = desc->hw.prio_regs; + unsigned int fn, n, mode, bit; + unsigned long reg_e, reg_d; + + while (pr && enum_id && *reg_idx < desc->hw.nr_prio_regs) { + pr = desc->hw.prio_regs + *reg_idx; + + for (; *fld_idx < ARRAY_SIZE(pr->enum_ids); (*fld_idx)++) { + if (pr->enum_ids[*fld_idx] != enum_id) + continue; + + if (pr->set_reg && pr->clr_reg) { + fn = REG_FN_WRITE_BASE; + mode = MODE_PCLR_REG; + reg_e = pr->set_reg; + reg_d = pr->clr_reg; + } else { + fn = REG_FN_MODIFY_BASE; + mode = MODE_PRIO_REG; + if (!pr->set_reg) + BUG(); + reg_e = pr->set_reg; + reg_d = pr->set_reg; + } + + fn += (pr->reg_width >> 3) - 1; + n = *fld_idx + 1; + + BUG_ON(n * pr->field_width > pr->reg_width); + + bit = pr->reg_width - (n * pr->field_width); + + return _INTC_MK(fn, mode, + intc_get_reg(d, reg_e), + intc_get_reg(d, reg_d), + pr->field_width, bit); + } + + *fld_idx = 0; + (*reg_idx)++; + } + + return 0; +} + +unsigned int __init +intc_get_prio_handle(struct intc_desc *desc, struct intc_desc_int *d, + intc_enum enum_id, int do_grps) +{ + unsigned int i = 0; + unsigned int j = 0; + unsigned int ret; + + ret = _intc_prio_data(desc, d, enum_id, &i, &j); + if (ret) + return ret; + + if (do_grps) + return intc_get_prio_handle(desc, d, intc_grp_id(desc, enum_id), 0); + + return 0; +} + +static unsigned int intc_ack_data(struct intc_desc *desc, + struct intc_desc_int *d, intc_enum enum_id) +{ + struct intc_mask_reg *mr = desc->hw.ack_regs; + unsigned int i, j, fn, mode; + unsigned long reg_e, reg_d; + + for (i = 0; mr && enum_id && i < desc->hw.nr_ack_regs; i++) { + mr = desc->hw.ack_regs + i; + + for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) { + if (mr->enum_ids[j] != enum_id) + continue; + + fn = REG_FN_MODIFY_BASE; + mode = MODE_ENABLE_REG; + reg_e = mr->set_reg; + reg_d = mr->set_reg; + + fn += (mr->reg_width >> 3) - 1; + return _INTC_MK(fn, mode, + intc_get_reg(d, reg_e), + intc_get_reg(d, reg_d), + 1, + (mr->reg_width - 1) - j); + } + } + + return 0; +} + +static void intc_enable_disable(struct intc_desc_int *d, + unsigned long handle, int do_enable) +{ + unsigned long addr; + unsigned int cpu; + unsigned long (*fn)(unsigned long, unsigned long, + unsigned long (*)(unsigned long, unsigned long, + unsigned long), + unsigned int); + + if (do_enable) { + for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) { + addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu); + fn = intc_enable_noprio_fns[_INTC_MODE(handle)]; + fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0); + } + } else { + for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) { + addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu); + fn = intc_disable_fns[_INTC_MODE(handle)]; + fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0); + } + } +} + +void __init intc_enable_disable_enum(struct intc_desc *desc, + struct intc_desc_int *d, + intc_enum enum_id, int enable) +{ + unsigned int i, j, data; + + /* go through and enable/disable all mask bits */ + i = j = 0; + do { + data = _intc_mask_data(desc, d, enum_id, &i, &j); + if (data) + intc_enable_disable(d, data, enable); + j++; + } while (data); + + /* go through and enable/disable all priority fields */ + i = j = 0; + do { + data = _intc_prio_data(desc, d, enum_id, &i, &j); + if (data) + intc_enable_disable(d, data, enable); + + j++; + } while (data); +} + +unsigned int __init +intc_get_sense_handle(struct intc_desc *desc, struct intc_desc_int *d, + intc_enum enum_id) +{ + struct intc_sense_reg *sr = desc->hw.sense_regs; + unsigned int i, j, fn, bit; + + for (i = 0; sr && enum_id && i < desc->hw.nr_sense_regs; i++) { + sr = desc->hw.sense_regs + i; + + for (j = 0; j < ARRAY_SIZE(sr->enum_ids); j++) { + if (sr->enum_ids[j] != enum_id) + continue; + + fn = REG_FN_MODIFY_BASE; + fn += (sr->reg_width >> 3) - 1; + + BUG_ON((j + 1) * sr->field_width > sr->reg_width); + + bit = sr->reg_width - ((j + 1) * sr->field_width); + + return _INTC_MK(fn, 0, intc_get_reg(d, sr->reg), + 0, sr->field_width, bit); + } + } + + return 0; +} + + +void intc_set_ack_handle(unsigned int irq, struct intc_desc *desc, + struct intc_desc_int *d, intc_enum id) +{ + unsigned long flags; + + /* + * Nothing to do for this IRQ. + */ + if (!desc->hw.ack_regs) + return; + + raw_spin_lock_irqsave(&intc_big_lock, flags); + ack_handle[irq] = intc_ack_data(desc, d, id); + raw_spin_unlock_irqrestore(&intc_big_lock, flags); +} + +unsigned long intc_get_ack_handle(unsigned int irq) +{ + return ack_handle[irq]; +} diff --git a/drivers/sh/intc/internals.h b/drivers/sh/intc/internals.h new file mode 100644 index 000000000..7dff08e2a --- /dev/null +++ b/drivers/sh/intc/internals.h @@ -0,0 +1,198 @@ +#include <linux/sh_intc.h> +#include <linux/irq.h> +#include <linux/irqdomain.h> +#include <linux/list.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/radix-tree.h> +#include <linux/device.h> + +#define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \ + ((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \ + ((addr_e) << 16) | ((addr_d << 24))) + +#define _INTC_SHIFT(h) (h & 0x1f) +#define _INTC_WIDTH(h) ((h >> 5) & 0xf) +#define _INTC_FN(h) ((h >> 9) & 0xf) +#define _INTC_MODE(h) ((h >> 13) & 0x7) +#define _INTC_ADDR_E(h) ((h >> 16) & 0xff) +#define _INTC_ADDR_D(h) ((h >> 24) & 0xff) + +#ifdef CONFIG_SMP +#define IS_SMP(x) (x.smp) +#define INTC_REG(d, x, c) (d->reg[(x)] + ((d->smp[(x)] & 0xff) * c)) +#define SMP_NR(d, x) ((d->smp[(x)] >> 8) ? (d->smp[(x)] >> 8) : 1) +#else +#define IS_SMP(x) 0 +#define INTC_REG(d, x, c) (d->reg[(x)]) +#define SMP_NR(d, x) 1 +#endif + +struct intc_handle_int { + unsigned int irq; + unsigned long handle; +}; + +struct intc_window { + phys_addr_t phys; + void __iomem *virt; + unsigned long size; +}; + +struct intc_map_entry { + intc_enum enum_id; + struct intc_desc_int *desc; +}; + +struct intc_subgroup_entry { + unsigned int pirq; + intc_enum enum_id; + unsigned long handle; +}; + +struct intc_desc_int { + struct list_head list; + struct device dev; + struct radix_tree_root tree; + raw_spinlock_t lock; + unsigned int index; + unsigned long *reg; +#ifdef CONFIG_SMP + unsigned long *smp; +#endif + unsigned int nr_reg; + struct intc_handle_int *prio; + unsigned int nr_prio; + struct intc_handle_int *sense; + unsigned int nr_sense; + struct intc_window *window; + unsigned int nr_windows; + struct irq_domain *domain; + struct irq_chip chip; + bool skip_suspend; +}; + + +enum { + REG_FN_ERR = 0, + REG_FN_TEST_BASE = 1, + REG_FN_WRITE_BASE = 5, + REG_FN_MODIFY_BASE = 9 +}; + +enum { MODE_ENABLE_REG = 0, /* Bit(s) set -> interrupt enabled */ + MODE_MASK_REG, /* Bit(s) set -> interrupt disabled */ + MODE_DUAL_REG, /* Two registers, set bit to enable / disable */ + MODE_PRIO_REG, /* Priority value written to enable interrupt */ + MODE_PCLR_REG, /* Above plus all bits set to disable interrupt */ +}; + +static inline struct intc_desc_int *get_intc_desc(unsigned int irq) +{ + struct irq_chip *chip = irq_get_chip(irq); + + return container_of(chip, struct intc_desc_int, chip); +} + +/* + * Grumble. + */ +static inline void activate_irq(int irq) +{ +#ifdef CONFIG_ARM + /* ARM requires an extra step to clear IRQ_NOREQUEST, which it + * sets on behalf of every irq_chip. Also sets IRQ_NOPROBE. + */ + set_irq_flags(irq, IRQF_VALID); +#else + /* same effect on other architectures */ + irq_set_noprobe(irq); +#endif +} + +static inline int intc_handle_int_cmp(const void *a, const void *b) +{ + const struct intc_handle_int *_a = a; + const struct intc_handle_int *_b = b; + + return _a->irq - _b->irq; +} + +/* access.c */ +extern unsigned long +(*intc_reg_fns[])(unsigned long addr, unsigned long h, unsigned long data); + +extern unsigned long +(*intc_enable_fns[])(unsigned long addr, unsigned long handle, + unsigned long (*fn)(unsigned long, + unsigned long, unsigned long), + unsigned int irq); +extern unsigned long +(*intc_disable_fns[])(unsigned long addr, unsigned long handle, + unsigned long (*fn)(unsigned long, + unsigned long, unsigned long), + unsigned int irq); +extern unsigned long +(*intc_enable_noprio_fns[])(unsigned long addr, unsigned long handle, + unsigned long (*fn)(unsigned long, + unsigned long, unsigned long), + unsigned int irq); + +unsigned long intc_phys_to_virt(struct intc_desc_int *d, unsigned long address); +unsigned int intc_get_reg(struct intc_desc_int *d, unsigned long address); +unsigned int intc_set_field_from_handle(unsigned int value, + unsigned int field_value, + unsigned int handle); +unsigned long intc_get_field_from_handle(unsigned int value, + unsigned int handle); + +/* balancing.c */ +#ifdef CONFIG_INTC_BALANCING +void intc_balancing_enable(unsigned int irq); +void intc_balancing_disable(unsigned int irq); +void intc_set_dist_handle(unsigned int irq, struct intc_desc *desc, + struct intc_desc_int *d, intc_enum id); +#else +static inline void intc_balancing_enable(unsigned int irq) { } +static inline void intc_balancing_disable(unsigned int irq) { } +static inline void +intc_set_dist_handle(unsigned int irq, struct intc_desc *desc, + struct intc_desc_int *d, intc_enum id) { } +#endif + +/* chip.c */ +extern struct irq_chip intc_irq_chip; +void _intc_enable(struct irq_data *data, unsigned long handle); + +/* core.c */ +extern struct list_head intc_list; +extern raw_spinlock_t intc_big_lock; +extern struct bus_type intc_subsys; + +unsigned int intc_get_dfl_prio_level(void); +unsigned int intc_get_prio_level(unsigned int irq); +void intc_set_prio_level(unsigned int irq, unsigned int level); + +/* handle.c */ +unsigned int intc_get_mask_handle(struct intc_desc *desc, + struct intc_desc_int *d, + intc_enum enum_id, int do_grps); +unsigned int intc_get_prio_handle(struct intc_desc *desc, + struct intc_desc_int *d, + intc_enum enum_id, int do_grps); +unsigned int intc_get_sense_handle(struct intc_desc *desc, + struct intc_desc_int *d, + intc_enum enum_id); +void intc_set_ack_handle(unsigned int irq, struct intc_desc *desc, + struct intc_desc_int *d, intc_enum id); +unsigned long intc_get_ack_handle(unsigned int irq); +void intc_enable_disable_enum(struct intc_desc *desc, struct intc_desc_int *d, + intc_enum enum_id, int enable); + +/* irqdomain.c */ +void intc_irq_domain_init(struct intc_desc_int *d, struct intc_hw_desc *hw); + +/* virq.c */ +void intc_subgroup_init(struct intc_desc *desc, struct intc_desc_int *d); +void intc_irq_xlate_set(unsigned int irq, intc_enum id, struct intc_desc_int *d); +struct intc_map_entry *intc_irq_xlate_get(unsigned int irq); diff --git a/drivers/sh/intc/irqdomain.c b/drivers/sh/intc/irqdomain.c new file mode 100644 index 000000000..3968f1c3c --- /dev/null +++ b/drivers/sh/intc/irqdomain.c @@ -0,0 +1,68 @@ +/* + * IRQ domain support for SH INTC subsystem + * + * Copyright (C) 2012 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#define pr_fmt(fmt) "intc: " fmt + +#include <linux/irqdomain.h> +#include <linux/sh_intc.h> +#include <linux/export.h> +#include "internals.h" + +/** + * intc_irq_domain_evt_xlate() - Generic xlate for vectored IRQs. + * + * This takes care of exception vector to hwirq translation through + * by way of evt2irq() translation. + * + * Note: For platforms that use a flat vector space without INTEVT this + * basically just mimics irq_domain_xlate_onecell() by way of a nopped + * out evt2irq() implementation. + */ +static int intc_evt_xlate(struct irq_domain *d, struct device_node *ctrlr, + const u32 *intspec, unsigned int intsize, + unsigned long *out_hwirq, unsigned int *out_type) +{ + if (WARN_ON(intsize < 1)) + return -EINVAL; + + *out_hwirq = evt2irq(intspec[0]); + *out_type = IRQ_TYPE_NONE; + + return 0; +} + +static const struct irq_domain_ops intc_evt_ops = { + .xlate = intc_evt_xlate, +}; + +void __init intc_irq_domain_init(struct intc_desc_int *d, + struct intc_hw_desc *hw) +{ + unsigned int irq_base, irq_end; + + /* + * Quick linear revmap check + */ + irq_base = evt2irq(hw->vectors[0].vect); + irq_end = evt2irq(hw->vectors[hw->nr_vectors - 1].vect); + + /* + * Linear domains have a hard-wired assertion that IRQs start at + * 0 in order to make some performance optimizations. Lamely + * restrict the linear case to these conditions here, taking the + * tree penalty for linear cases with non-zero hwirq bases. + */ + if (irq_base == 0 && irq_end == (irq_base + hw->nr_vectors - 1)) + d->domain = irq_domain_add_linear(NULL, hw->nr_vectors, + &intc_evt_ops, NULL); + else + d->domain = irq_domain_add_tree(NULL, &intc_evt_ops, NULL); + + BUG_ON(!d->domain); +} diff --git a/drivers/sh/intc/userimask.c b/drivers/sh/intc/userimask.c new file mode 100644 index 000000000..e649ceaaa --- /dev/null +++ b/drivers/sh/intc/userimask.c @@ -0,0 +1,84 @@ +/* + * Support for hardware-assisted userspace interrupt masking. + * + * Copyright (C) 2010 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#define pr_fmt(fmt) "intc: " fmt + +#include <linux/errno.h> +#include <linux/device.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/stat.h> +#include <asm/sizes.h> +#include "internals.h" + +static void __iomem *uimask; + +static ssize_t +show_intc_userimask(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", (__raw_readl(uimask) >> 4) & 0xf); +} + +static ssize_t +store_intc_userimask(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned long level; + + level = simple_strtoul(buf, NULL, 10); + + /* + * Minimal acceptable IRQ levels are in the 2 - 16 range, but + * these are chomped so as to not interfere with normal IRQs. + * + * Level 1 is a special case on some CPUs in that it's not + * directly settable, but given that USERIMASK cuts off below a + * certain level, we don't care about this limitation here. + * Level 0 on the other hand equates to user masking disabled. + * + * We use the default priority level as a cut off so that only + * special case opt-in IRQs can be mangled. + */ + if (level >= intc_get_dfl_prio_level()) + return -EINVAL; + + __raw_writel(0xa5 << 24 | level << 4, uimask); + + return count; +} + +static DEVICE_ATTR(userimask, S_IRUSR | S_IWUSR, + show_intc_userimask, store_intc_userimask); + + +static int __init userimask_sysdev_init(void) +{ + if (unlikely(!uimask)) + return -ENXIO; + + return device_create_file(intc_subsys.dev_root, &dev_attr_userimask); +} +late_initcall(userimask_sysdev_init); + +int register_intc_userimask(unsigned long addr) +{ + if (unlikely(uimask)) + return -EBUSY; + + uimask = ioremap_nocache(addr, SZ_4K); + if (unlikely(!uimask)) + return -ENOMEM; + + pr_info("userimask support registered for levels 0 -> %d\n", + intc_get_dfl_prio_level() - 1); + + return 0; +} diff --git a/drivers/sh/intc/virq-debugfs.c b/drivers/sh/intc/virq-debugfs.c new file mode 100644 index 000000000..9e62ba931 --- /dev/null +++ b/drivers/sh/intc/virq-debugfs.c @@ -0,0 +1,64 @@ +/* + * Support for virtual IRQ subgroups debugfs mapping. + * + * Copyright (C) 2010 Paul Mundt + * + * Modelled after arch/powerpc/kernel/irq.c. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/seq_file.h> +#include <linux/fs.h> +#include <linux/init.h> +#include <linux/irq.h> +#include <linux/debugfs.h> +#include "internals.h" + +static int intc_irq_xlate_debug(struct seq_file *m, void *priv) +{ + int i; + + seq_printf(m, "%-5s %-7s %-15s\n", "irq", "enum", "chip name"); + + for (i = 1; i < nr_irqs; i++) { + struct intc_map_entry *entry = intc_irq_xlate_get(i); + struct intc_desc_int *desc = entry->desc; + + if (!desc) + continue; + + seq_printf(m, "%5d ", i); + seq_printf(m, "0x%05x ", entry->enum_id); + seq_printf(m, "%-15s\n", desc->chip.name); + } + + return 0; +} + +static int intc_irq_xlate_open(struct inode *inode, struct file *file) +{ + return single_open(file, intc_irq_xlate_debug, inode->i_private); +} + +static const struct file_operations intc_irq_xlate_fops = { + .open = intc_irq_xlate_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int __init intc_irq_xlate_init(void) +{ + /* + * XXX.. use arch_debugfs_dir here when all of the intc users are + * converted. + */ + if (debugfs_create_file("intc_irq_xlate", S_IRUGO, NULL, NULL, + &intc_irq_xlate_fops) == NULL) + return -ENOMEM; + + return 0; +} +fs_initcall(intc_irq_xlate_init); diff --git a/drivers/sh/intc/virq.c b/drivers/sh/intc/virq.c new file mode 100644 index 000000000..f30ac9354 --- /dev/null +++ b/drivers/sh/intc/virq.c @@ -0,0 +1,265 @@ +/* + * Support for virtual IRQ subgroups. + * + * Copyright (C) 2010 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#define pr_fmt(fmt) "intc: " fmt + +#include <linux/slab.h> +#include <linux/irq.h> +#include <linux/list.h> +#include <linux/radix-tree.h> +#include <linux/spinlock.h> +#include <linux/export.h> +#include "internals.h" + +static struct intc_map_entry intc_irq_xlate[INTC_NR_IRQS]; + +struct intc_virq_list { + unsigned int irq; + struct intc_virq_list *next; +}; + +#define for_each_virq(entry, head) \ + for (entry = head; entry; entry = entry->next) + +/* + * Tags for the radix tree + */ +#define INTC_TAG_VIRQ_NEEDS_ALLOC 0 + +void intc_irq_xlate_set(unsigned int irq, intc_enum id, struct intc_desc_int *d) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&intc_big_lock, flags); + intc_irq_xlate[irq].enum_id = id; + intc_irq_xlate[irq].desc = d; + raw_spin_unlock_irqrestore(&intc_big_lock, flags); +} + +struct intc_map_entry *intc_irq_xlate_get(unsigned int irq) +{ + return intc_irq_xlate + irq; +} + +int intc_irq_lookup(const char *chipname, intc_enum enum_id) +{ + struct intc_map_entry *ptr; + struct intc_desc_int *d; + int irq = -1; + + list_for_each_entry(d, &intc_list, list) { + int tagged; + + if (strcmp(d->chip.name, chipname) != 0) + continue; + + /* + * Catch early lookups for subgroup VIRQs that have not + * yet been allocated an IRQ. This already includes a + * fast-path out if the tree is untagged, so there is no + * need to explicitly test the root tree. + */ + tagged = radix_tree_tag_get(&d->tree, enum_id, + INTC_TAG_VIRQ_NEEDS_ALLOC); + if (unlikely(tagged)) + break; + + ptr = radix_tree_lookup(&d->tree, enum_id); + if (ptr) { + irq = ptr - intc_irq_xlate; + break; + } + } + + return irq; +} +EXPORT_SYMBOL_GPL(intc_irq_lookup); + +static int add_virq_to_pirq(unsigned int irq, unsigned int virq) +{ + struct intc_virq_list **last, *entry; + struct irq_data *data = irq_get_irq_data(irq); + + /* scan for duplicates */ + last = (struct intc_virq_list **)&data->handler_data; + for_each_virq(entry, data->handler_data) { + if (entry->irq == virq) + return 0; + last = &entry->next; + } + + entry = kzalloc(sizeof(struct intc_virq_list), GFP_ATOMIC); + if (!entry) { + pr_err("can't allocate VIRQ mapping for %d\n", virq); + return -ENOMEM; + } + + entry->irq = virq; + + *last = entry; + + return 0; +} + +static void intc_virq_handler(unsigned int irq, struct irq_desc *desc) +{ + struct irq_data *data = irq_get_irq_data(irq); + struct irq_chip *chip = irq_data_get_irq_chip(data); + struct intc_virq_list *entry, *vlist = irq_data_get_irq_handler_data(data); + struct intc_desc_int *d = get_intc_desc(irq); + + chip->irq_mask_ack(data); + + for_each_virq(entry, vlist) { + unsigned long addr, handle; + + handle = (unsigned long)irq_get_handler_data(entry->irq); + addr = INTC_REG(d, _INTC_ADDR_E(handle), 0); + + if (intc_reg_fns[_INTC_FN(handle)](addr, handle, 0)) + generic_handle_irq(entry->irq); + } + + chip->irq_unmask(data); +} + +static unsigned long __init intc_subgroup_data(struct intc_subgroup *subgroup, + struct intc_desc_int *d, + unsigned int index) +{ + unsigned int fn = REG_FN_TEST_BASE + (subgroup->reg_width >> 3) - 1; + + return _INTC_MK(fn, MODE_ENABLE_REG, intc_get_reg(d, subgroup->reg), + 0, 1, (subgroup->reg_width - 1) - index); +} + +static void __init intc_subgroup_init_one(struct intc_desc *desc, + struct intc_desc_int *d, + struct intc_subgroup *subgroup) +{ + struct intc_map_entry *mapped; + unsigned int pirq; + unsigned long flags; + int i; + + mapped = radix_tree_lookup(&d->tree, subgroup->parent_id); + if (!mapped) { + WARN_ON(1); + return; + } + + pirq = mapped - intc_irq_xlate; + + raw_spin_lock_irqsave(&d->lock, flags); + + for (i = 0; i < ARRAY_SIZE(subgroup->enum_ids); i++) { + struct intc_subgroup_entry *entry; + int err; + + if (!subgroup->enum_ids[i]) + continue; + + entry = kmalloc(sizeof(*entry), GFP_NOWAIT); + if (!entry) + break; + + entry->pirq = pirq; + entry->enum_id = subgroup->enum_ids[i]; + entry->handle = intc_subgroup_data(subgroup, d, i); + + err = radix_tree_insert(&d->tree, entry->enum_id, entry); + if (unlikely(err < 0)) + break; + + radix_tree_tag_set(&d->tree, entry->enum_id, + INTC_TAG_VIRQ_NEEDS_ALLOC); + } + + raw_spin_unlock_irqrestore(&d->lock, flags); +} + +void __init intc_subgroup_init(struct intc_desc *desc, struct intc_desc_int *d) +{ + int i; + + if (!desc->hw.subgroups) + return; + + for (i = 0; i < desc->hw.nr_subgroups; i++) + intc_subgroup_init_one(desc, d, desc->hw.subgroups + i); +} + +static void __init intc_subgroup_map(struct intc_desc_int *d) +{ + struct intc_subgroup_entry *entries[32]; + unsigned long flags; + unsigned int nr_found; + int i; + + raw_spin_lock_irqsave(&d->lock, flags); + +restart: + nr_found = radix_tree_gang_lookup_tag_slot(&d->tree, + (void ***)entries, 0, ARRAY_SIZE(entries), + INTC_TAG_VIRQ_NEEDS_ALLOC); + + for (i = 0; i < nr_found; i++) { + struct intc_subgroup_entry *entry; + int irq; + + entry = radix_tree_deref_slot((void **)entries[i]); + if (unlikely(!entry)) + continue; + if (radix_tree_deref_retry(entry)) + goto restart; + + irq = irq_alloc_desc(numa_node_id()); + if (unlikely(irq < 0)) { + pr_err("no more free IRQs, bailing..\n"); + break; + } + + activate_irq(irq); + + pr_info("Setting up a chained VIRQ from %d -> %d\n", + irq, entry->pirq); + + intc_irq_xlate_set(irq, entry->enum_id, d); + + irq_set_chip_and_handler_name(irq, irq_get_chip(entry->pirq), + handle_simple_irq, "virq"); + irq_set_chip_data(irq, irq_get_chip_data(entry->pirq)); + + irq_set_handler_data(irq, (void *)entry->handle); + + /* + * Set the virtual IRQ as non-threadable. + */ + irq_set_nothread(irq); + + irq_set_chained_handler(entry->pirq, intc_virq_handler); + add_virq_to_pirq(entry->pirq, irq); + + radix_tree_tag_clear(&d->tree, entry->enum_id, + INTC_TAG_VIRQ_NEEDS_ALLOC); + radix_tree_replace_slot((void **)entries[i], + &intc_irq_xlate[irq]); + } + + raw_spin_unlock_irqrestore(&d->lock, flags); +} + +void __init intc_finalize(void) +{ + struct intc_desc_int *d; + + list_for_each_entry(d, &intc_list, list) + if (radix_tree_tagged(&d->tree, INTC_TAG_VIRQ_NEEDS_ALLOC)) + intc_subgroup_map(d); +} diff --git a/drivers/sh/maple/Makefile b/drivers/sh/maple/Makefile new file mode 100644 index 000000000..65dfeeb61 --- /dev/null +++ b/drivers/sh/maple/Makefile @@ -0,0 +1,3 @@ +# Makefile for Maple Bus + +obj-$(CONFIG_MAPLE) := maple.o diff --git a/drivers/sh/maple/maple.c b/drivers/sh/maple/maple.c new file mode 100644 index 000000000..bec81c240 --- /dev/null +++ b/drivers/sh/maple/maple.c @@ -0,0 +1,893 @@ +/* + * Core maple bus functionality + * + * Copyright (C) 2007 - 2009 Adrian McMenamin + * Copyright (C) 2001 - 2008 Paul Mundt + * Copyright (C) 2000 - 2001 YAEGASHI Takeshi + * Copyright (C) 2001 M. R. Brown + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/io.h> +#include <linux/slab.h> +#include <linux/maple.h> +#include <linux/dma-mapping.h> +#include <linux/delay.h> +#include <linux/module.h> +#include <asm/cacheflush.h> +#include <asm/dma.h> +#include <asm/io.h> +#include <mach/dma.h> +#include <mach/sysasic.h> + +MODULE_AUTHOR("Adrian McMenamin <adrian@mcmen.demon.co.uk>"); +MODULE_DESCRIPTION("Maple bus driver for Dreamcast"); +MODULE_LICENSE("GPL v2"); +MODULE_SUPPORTED_DEVICE("{{SEGA, Dreamcast/Maple}}"); + +static void maple_dma_handler(struct work_struct *work); +static void maple_vblank_handler(struct work_struct *work); + +static DECLARE_WORK(maple_dma_process, maple_dma_handler); +static DECLARE_WORK(maple_vblank_process, maple_vblank_handler); + +static LIST_HEAD(maple_waitq); +static LIST_HEAD(maple_sentq); + +/* mutex to protect queue of waiting packets */ +static DEFINE_MUTEX(maple_wlist_lock); + +static struct maple_driver maple_unsupported_device; +static struct device maple_bus; +static int subdevice_map[MAPLE_PORTS]; +static unsigned long *maple_sendbuf, *maple_sendptr, *maple_lastptr; +static unsigned long maple_pnp_time; +static int started, scanning, fullscan; +static struct kmem_cache *maple_queue_cache; + +struct maple_device_specify { + int port; + int unit; +}; + +static bool checked[MAPLE_PORTS]; +static bool empty[MAPLE_PORTS]; +static struct maple_device *baseunits[MAPLE_PORTS]; + +/** + * maple_driver_register - register a maple driver + * @drv: maple driver to be registered. + * + * Registers the passed in @drv, while updating the bus type. + * Devices with matching function IDs will be automatically probed. + */ +int maple_driver_register(struct maple_driver *drv) +{ + if (!drv) + return -EINVAL; + + drv->drv.bus = &maple_bus_type; + + return driver_register(&drv->drv); +} +EXPORT_SYMBOL_GPL(maple_driver_register); + +/** + * maple_driver_unregister - unregister a maple driver. + * @drv: maple driver to unregister. + * + * Cleans up after maple_driver_register(). To be invoked in the exit + * path of any module drivers. + */ +void maple_driver_unregister(struct maple_driver *drv) +{ + driver_unregister(&drv->drv); +} +EXPORT_SYMBOL_GPL(maple_driver_unregister); + +/* set hardware registers to enable next round of dma */ +static void maple_dma_reset(void) +{ + __raw_writel(MAPLE_MAGIC, MAPLE_RESET); + /* set trig type to 0 for software trigger, 1 for hardware (VBLANK) */ + __raw_writel(1, MAPLE_TRIGTYPE); + /* + * Maple system register + * bits 31 - 16 timeout in units of 20nsec + * bit 12 hard trigger - set 0 to keep responding to VBLANK + * bits 9 - 8 set 00 for 2 Mbps, 01 for 1 Mbps + * bits 3 - 0 delay (in 1.3ms) between VBLANK and start of DMA + * max delay is 11 + */ + __raw_writel(MAPLE_2MBPS | MAPLE_TIMEOUT(0xFFFF), MAPLE_SPEED); + __raw_writel(virt_to_phys(maple_sendbuf), MAPLE_DMAADDR); + __raw_writel(1, MAPLE_ENABLE); +} + +/** + * maple_getcond_callback - setup handling MAPLE_COMMAND_GETCOND + * @dev: device responding + * @callback: handler callback + * @interval: interval in jiffies between callbacks + * @function: the function code for the device + */ +void maple_getcond_callback(struct maple_device *dev, + void (*callback) (struct mapleq *mq), + unsigned long interval, unsigned long function) +{ + dev->callback = callback; + dev->interval = interval; + dev->function = cpu_to_be32(function); + dev->when = jiffies; +} +EXPORT_SYMBOL_GPL(maple_getcond_callback); + +static int maple_dma_done(void) +{ + return (__raw_readl(MAPLE_STATE) & 1) == 0; +} + +static void maple_release_device(struct device *dev) +{ + struct maple_device *mdev; + struct mapleq *mq; + + mdev = to_maple_dev(dev); + mq = mdev->mq; + kmem_cache_free(maple_queue_cache, mq->recvbuf); + kfree(mq); + kfree(mdev); +} + +/** + * maple_add_packet - add a single instruction to the maple bus queue + * @mdev: maple device + * @function: function on device being queried + * @command: maple command to add + * @length: length of command string (in 32 bit words) + * @data: remainder of command string + */ +int maple_add_packet(struct maple_device *mdev, u32 function, u32 command, + size_t length, void *data) +{ + int ret = 0; + void *sendbuf = NULL; + + if (length) { + sendbuf = kzalloc(length * 4, GFP_KERNEL); + if (!sendbuf) { + ret = -ENOMEM; + goto out; + } + ((__be32 *)sendbuf)[0] = cpu_to_be32(function); + } + + mdev->mq->command = command; + mdev->mq->length = length; + if (length > 1) + memcpy(sendbuf + 4, data, (length - 1) * 4); + mdev->mq->sendbuf = sendbuf; + + mutex_lock(&maple_wlist_lock); + list_add_tail(&mdev->mq->list, &maple_waitq); + mutex_unlock(&maple_wlist_lock); +out: + return ret; +} +EXPORT_SYMBOL_GPL(maple_add_packet); + +static struct mapleq *maple_allocq(struct maple_device *mdev) +{ + struct mapleq *mq; + + mq = kzalloc(sizeof(*mq), GFP_KERNEL); + if (!mq) + goto failed_nomem; + + INIT_LIST_HEAD(&mq->list); + mq->dev = mdev; + mq->recvbuf = kmem_cache_zalloc(maple_queue_cache, GFP_KERNEL); + if (!mq->recvbuf) + goto failed_p2; + mq->recvbuf->buf = &((mq->recvbuf->bufx)[0]); + + return mq; + +failed_p2: + kfree(mq); +failed_nomem: + dev_err(&mdev->dev, "could not allocate memory for device (%d, %d)\n", + mdev->port, mdev->unit); + return NULL; +} + +static struct maple_device *maple_alloc_dev(int port, int unit) +{ + struct maple_device *mdev; + + /* zero this out to avoid kobj subsystem + * thinking it has already been registered */ + + mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); + if (!mdev) + return NULL; + + mdev->port = port; + mdev->unit = unit; + + mdev->mq = maple_allocq(mdev); + + if (!mdev->mq) { + kfree(mdev); + return NULL; + } + mdev->dev.bus = &maple_bus_type; + mdev->dev.parent = &maple_bus; + init_waitqueue_head(&mdev->maple_wait); + return mdev; +} + +static void maple_free_dev(struct maple_device *mdev) +{ + kmem_cache_free(maple_queue_cache, mdev->mq->recvbuf); + kfree(mdev->mq); + kfree(mdev); +} + +/* process the command queue into a maple command block + * terminating command has bit 32 of first long set to 0 + */ +static void maple_build_block(struct mapleq *mq) +{ + int port, unit, from, to, len; + unsigned long *lsendbuf = mq->sendbuf; + + port = mq->dev->port & 3; + unit = mq->dev->unit; + len = mq->length; + from = port << 6; + to = (port << 6) | (unit > 0 ? (1 << (unit - 1)) & 0x1f : 0x20); + + *maple_lastptr &= 0x7fffffff; + maple_lastptr = maple_sendptr; + + *maple_sendptr++ = (port << 16) | len | 0x80000000; + *maple_sendptr++ = virt_to_phys(mq->recvbuf->buf); + *maple_sendptr++ = + mq->command | (to << 8) | (from << 16) | (len << 24); + while (len-- > 0) + *maple_sendptr++ = *lsendbuf++; +} + +/* build up command queue */ +static void maple_send(void) +{ + int i, maple_packets = 0; + struct mapleq *mq, *nmq; + + if (!maple_dma_done()) + return; + + /* disable DMA */ + __raw_writel(0, MAPLE_ENABLE); + + if (!list_empty(&maple_sentq)) + goto finish; + + mutex_lock(&maple_wlist_lock); + if (list_empty(&maple_waitq)) { + mutex_unlock(&maple_wlist_lock); + goto finish; + } + + maple_lastptr = maple_sendbuf; + maple_sendptr = maple_sendbuf; + + list_for_each_entry_safe(mq, nmq, &maple_waitq, list) { + maple_build_block(mq); + list_del_init(&mq->list); + list_add_tail(&mq->list, &maple_sentq); + if (maple_packets++ > MAPLE_MAXPACKETS) + break; + } + mutex_unlock(&maple_wlist_lock); + if (maple_packets > 0) { + for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++) + dma_cache_sync(0, maple_sendbuf + i * PAGE_SIZE, + PAGE_SIZE, DMA_BIDIRECTIONAL); + } + +finish: + maple_dma_reset(); +} + +/* check if there is a driver registered likely to match this device */ +static int maple_check_matching_driver(struct device_driver *driver, + void *devptr) +{ + struct maple_driver *maple_drv; + struct maple_device *mdev; + + mdev = devptr; + maple_drv = to_maple_driver(driver); + if (mdev->devinfo.function & cpu_to_be32(maple_drv->function)) + return 1; + return 0; +} + +static void maple_detach_driver(struct maple_device *mdev) +{ + device_unregister(&mdev->dev); +} + +/* process initial MAPLE_COMMAND_DEVINFO for each device or port */ +static void maple_attach_driver(struct maple_device *mdev) +{ + char *p, *recvbuf; + unsigned long function; + int matched, error; + + recvbuf = mdev->mq->recvbuf->buf; + /* copy the data as individual elements in + * case of memory optimisation */ + memcpy(&mdev->devinfo.function, recvbuf + 4, 4); + memcpy(&mdev->devinfo.function_data[0], recvbuf + 8, 12); + memcpy(&mdev->devinfo.area_code, recvbuf + 20, 1); + memcpy(&mdev->devinfo.connector_direction, recvbuf + 21, 1); + memcpy(&mdev->devinfo.product_name[0], recvbuf + 22, 30); + memcpy(&mdev->devinfo.standby_power, recvbuf + 112, 2); + memcpy(&mdev->devinfo.max_power, recvbuf + 114, 2); + memcpy(mdev->product_name, mdev->devinfo.product_name, 30); + mdev->product_name[30] = '\0'; + memcpy(mdev->product_licence, mdev->devinfo.product_licence, 60); + mdev->product_licence[60] = '\0'; + + for (p = mdev->product_name + 29; mdev->product_name <= p; p--) + if (*p == ' ') + *p = '\0'; + else + break; + for (p = mdev->product_licence + 59; mdev->product_licence <= p; p--) + if (*p == ' ') + *p = '\0'; + else + break; + + function = be32_to_cpu(mdev->devinfo.function); + + dev_info(&mdev->dev, "detected %s: function 0x%lX: at (%d, %d)\n", + mdev->product_name, function, mdev->port, mdev->unit); + + if (function > 0x200) { + /* Do this silently - as not a real device */ + function = 0; + mdev->driver = &maple_unsupported_device; + dev_set_name(&mdev->dev, "%d:0.port", mdev->port); + } else { + matched = + bus_for_each_drv(&maple_bus_type, NULL, mdev, + maple_check_matching_driver); + + if (matched == 0) { + /* Driver does not exist yet */ + dev_info(&mdev->dev, "no driver found\n"); + mdev->driver = &maple_unsupported_device; + } + dev_set_name(&mdev->dev, "%d:0%d.%lX", mdev->port, + mdev->unit, function); + } + + mdev->function = function; + mdev->dev.release = &maple_release_device; + + atomic_set(&mdev->busy, 0); + error = device_register(&mdev->dev); + if (error) { + dev_warn(&mdev->dev, "could not register device at" + " (%d, %d), with error 0x%X\n", mdev->unit, + mdev->port, error); + maple_free_dev(mdev); + mdev = NULL; + return; + } +} + +/* + * if device has been registered for the given + * port and unit then return 1 - allows identification + * of which devices need to be attached or detached + */ +static int check_maple_device(struct device *device, void *portptr) +{ + struct maple_device_specify *ds; + struct maple_device *mdev; + + ds = portptr; + mdev = to_maple_dev(device); + if (mdev->port == ds->port && mdev->unit == ds->unit) + return 1; + return 0; +} + +static int setup_maple_commands(struct device *device, void *ignored) +{ + int add; + struct maple_device *mdev = to_maple_dev(device); + if (mdev->interval > 0 && atomic_read(&mdev->busy) == 0 && + time_after(jiffies, mdev->when)) { + /* bounce if we cannot add */ + add = maple_add_packet(mdev, + be32_to_cpu(mdev->devinfo.function), + MAPLE_COMMAND_GETCOND, 1, NULL); + if (!add) + mdev->when = jiffies + mdev->interval; + } else { + if (time_after(jiffies, maple_pnp_time)) + /* Ensure we don't have block reads and devinfo + * calls interfering with one another - so flag the + * device as busy */ + if (atomic_read(&mdev->busy) == 0) { + atomic_set(&mdev->busy, 1); + maple_add_packet(mdev, 0, + MAPLE_COMMAND_DEVINFO, 0, NULL); + } + } + return 0; +} + +/* VBLANK bottom half - implemented via workqueue */ +static void maple_vblank_handler(struct work_struct *work) +{ + int x, locking; + struct maple_device *mdev; + + if (!maple_dma_done()) + return; + + __raw_writel(0, MAPLE_ENABLE); + + if (!list_empty(&maple_sentq)) + goto finish; + + /* + * Set up essential commands - to fetch data and + * check devices are still present + */ + bus_for_each_dev(&maple_bus_type, NULL, NULL, + setup_maple_commands); + + if (time_after(jiffies, maple_pnp_time)) { + /* + * Scan the empty ports - bus is flakey and may have + * mis-reported emptyness + */ + for (x = 0; x < MAPLE_PORTS; x++) { + if (checked[x] && empty[x]) { + mdev = baseunits[x]; + if (!mdev) + break; + atomic_set(&mdev->busy, 1); + locking = maple_add_packet(mdev, 0, + MAPLE_COMMAND_DEVINFO, 0, NULL); + if (!locking) + break; + } + } + + maple_pnp_time = jiffies + MAPLE_PNP_INTERVAL; + } + +finish: + maple_send(); +} + +/* handle devices added via hotplugs - placing them on queue for DEVINFO */ +static void maple_map_subunits(struct maple_device *mdev, int submask) +{ + int retval, k, devcheck; + struct maple_device *mdev_add; + struct maple_device_specify ds; + + ds.port = mdev->port; + for (k = 0; k < 5; k++) { + ds.unit = k + 1; + retval = + bus_for_each_dev(&maple_bus_type, NULL, &ds, + check_maple_device); + if (retval) { + submask = submask >> 1; + continue; + } + devcheck = submask & 0x01; + if (devcheck) { + mdev_add = maple_alloc_dev(mdev->port, k + 1); + if (!mdev_add) + return; + atomic_set(&mdev_add->busy, 1); + maple_add_packet(mdev_add, 0, MAPLE_COMMAND_DEVINFO, + 0, NULL); + /* mark that we are checking sub devices */ + scanning = 1; + } + submask = submask >> 1; + } +} + +/* mark a device as removed */ +static void maple_clean_submap(struct maple_device *mdev) +{ + int killbit; + + killbit = (mdev->unit > 0 ? (1 << (mdev->unit - 1)) & 0x1f : 0x20); + killbit = ~killbit; + killbit &= 0xFF; + subdevice_map[mdev->port] = subdevice_map[mdev->port] & killbit; +} + +/* handle empty port or hotplug removal */ +static void maple_response_none(struct maple_device *mdev) +{ + maple_clean_submap(mdev); + + if (likely(mdev->unit != 0)) { + /* + * Block devices play up + * and give the impression they have + * been removed even when still in place or + * trip the mtd layer when they have + * really gone - this code traps that eventuality + * and ensures we aren't overloaded with useless + * error messages + */ + if (mdev->can_unload) { + if (!mdev->can_unload(mdev)) { + atomic_set(&mdev->busy, 2); + wake_up(&mdev->maple_wait); + return; + } + } + + dev_info(&mdev->dev, "detaching device at (%d, %d)\n", + mdev->port, mdev->unit); + maple_detach_driver(mdev); + return; + } else { + if (!started || !fullscan) { + if (checked[mdev->port] == false) { + checked[mdev->port] = true; + empty[mdev->port] = true; + dev_info(&mdev->dev, "no devices" + " to port %d\n", mdev->port); + } + return; + } + } + /* Some hardware devices generate false detach messages on unit 0 */ + atomic_set(&mdev->busy, 0); +} + +/* preprocess hotplugs or scans */ +static void maple_response_devinfo(struct maple_device *mdev, + char *recvbuf) +{ + char submask; + if (!started || (scanning == 2) || !fullscan) { + if ((mdev->unit == 0) && (checked[mdev->port] == false)) { + checked[mdev->port] = true; + maple_attach_driver(mdev); + } else { + if (mdev->unit != 0) + maple_attach_driver(mdev); + if (mdev->unit == 0) { + empty[mdev->port] = false; + maple_attach_driver(mdev); + } + } + } + if (mdev->unit == 0) { + submask = recvbuf[2] & 0x1F; + if (submask ^ subdevice_map[mdev->port]) { + maple_map_subunits(mdev, submask); + subdevice_map[mdev->port] = submask; + } + } +} + +static void maple_response_fileerr(struct maple_device *mdev, void *recvbuf) +{ + if (mdev->fileerr_handler) { + mdev->fileerr_handler(mdev, recvbuf); + return; + } else + dev_warn(&mdev->dev, "device at (%d, %d) reports" + "file error 0x%X\n", mdev->port, mdev->unit, + ((int *)recvbuf)[1]); +} + +static void maple_port_rescan(void) +{ + int i; + struct maple_device *mdev; + + fullscan = 1; + for (i = 0; i < MAPLE_PORTS; i++) { + if (checked[i] == false) { + fullscan = 0; + mdev = baseunits[i]; + maple_add_packet(mdev, 0, MAPLE_COMMAND_DEVINFO, + 0, NULL); + } + } +} + +/* maple dma end bottom half - implemented via workqueue */ +static void maple_dma_handler(struct work_struct *work) +{ + struct mapleq *mq, *nmq; + struct maple_device *mdev; + char *recvbuf; + enum maple_code code; + + if (!maple_dma_done()) + return; + __raw_writel(0, MAPLE_ENABLE); + if (!list_empty(&maple_sentq)) { + list_for_each_entry_safe(mq, nmq, &maple_sentq, list) { + mdev = mq->dev; + recvbuf = mq->recvbuf->buf; + dma_cache_sync(&mdev->dev, recvbuf, 0x400, + DMA_FROM_DEVICE); + code = recvbuf[0]; + kfree(mq->sendbuf); + list_del_init(&mq->list); + switch (code) { + case MAPLE_RESPONSE_NONE: + maple_response_none(mdev); + break; + + case MAPLE_RESPONSE_DEVINFO: + maple_response_devinfo(mdev, recvbuf); + atomic_set(&mdev->busy, 0); + break; + + case MAPLE_RESPONSE_DATATRF: + if (mdev->callback) + mdev->callback(mq); + atomic_set(&mdev->busy, 0); + wake_up(&mdev->maple_wait); + break; + + case MAPLE_RESPONSE_FILEERR: + maple_response_fileerr(mdev, recvbuf); + atomic_set(&mdev->busy, 0); + wake_up(&mdev->maple_wait); + break; + + case MAPLE_RESPONSE_AGAIN: + case MAPLE_RESPONSE_BADCMD: + case MAPLE_RESPONSE_BADFUNC: + dev_warn(&mdev->dev, "non-fatal error" + " 0x%X at (%d, %d)\n", code, + mdev->port, mdev->unit); + atomic_set(&mdev->busy, 0); + break; + + case MAPLE_RESPONSE_ALLINFO: + dev_notice(&mdev->dev, "extended" + " device information request for (%d, %d)" + " but call is not supported\n", mdev->port, + mdev->unit); + atomic_set(&mdev->busy, 0); + break; + + case MAPLE_RESPONSE_OK: + atomic_set(&mdev->busy, 0); + wake_up(&mdev->maple_wait); + break; + + default: + break; + } + } + /* if scanning is 1 then we have subdevices to check */ + if (scanning == 1) { + maple_send(); + scanning = 2; + } else + scanning = 0; + /*check if we have actually tested all ports yet */ + if (!fullscan) + maple_port_rescan(); + /* mark that we have been through the first scan */ + started = 1; + } + maple_send(); +} + +static irqreturn_t maple_dma_interrupt(int irq, void *dev_id) +{ + /* Load everything into the bottom half */ + schedule_work(&maple_dma_process); + return IRQ_HANDLED; +} + +static irqreturn_t maple_vblank_interrupt(int irq, void *dev_id) +{ + schedule_work(&maple_vblank_process); + return IRQ_HANDLED; +} + +static int maple_set_dma_interrupt_handler(void) +{ + return request_irq(HW_EVENT_MAPLE_DMA, maple_dma_interrupt, + IRQF_SHARED, "maple bus DMA", &maple_unsupported_device); +} + +static int maple_set_vblank_interrupt_handler(void) +{ + return request_irq(HW_EVENT_VSYNC, maple_vblank_interrupt, + IRQF_SHARED, "maple bus VBLANK", &maple_unsupported_device); +} + +static int maple_get_dma_buffer(void) +{ + maple_sendbuf = + (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO, + MAPLE_DMA_PAGES); + if (!maple_sendbuf) + return -ENOMEM; + return 0; +} + +static int maple_match_bus_driver(struct device *devptr, + struct device_driver *drvptr) +{ + struct maple_driver *maple_drv = to_maple_driver(drvptr); + struct maple_device *maple_dev = to_maple_dev(devptr); + + /* Trap empty port case */ + if (maple_dev->devinfo.function == 0xFFFFFFFF) + return 0; + else if (maple_dev->devinfo.function & + cpu_to_be32(maple_drv->function)) + return 1; + return 0; +} + +static int maple_bus_uevent(struct device *dev, + struct kobj_uevent_env *env) +{ + return 0; +} + +static void maple_bus_release(struct device *dev) +{ +} + +static struct maple_driver maple_unsupported_device = { + .drv = { + .name = "maple_unsupported_device", + .bus = &maple_bus_type, + }, +}; +/* + * maple_bus_type - core maple bus structure + */ +struct bus_type maple_bus_type = { + .name = "maple", + .match = maple_match_bus_driver, + .uevent = maple_bus_uevent, +}; +EXPORT_SYMBOL_GPL(maple_bus_type); + +static struct device maple_bus = { + .init_name = "maple", + .release = maple_bus_release, +}; + +static int __init maple_bus_init(void) +{ + int retval, i; + struct maple_device *mdev[MAPLE_PORTS]; + + __raw_writel(0, MAPLE_ENABLE); + + retval = device_register(&maple_bus); + if (retval) + goto cleanup; + + retval = bus_register(&maple_bus_type); + if (retval) + goto cleanup_device; + + retval = driver_register(&maple_unsupported_device.drv); + if (retval) + goto cleanup_bus; + + /* allocate memory for maple bus dma */ + retval = maple_get_dma_buffer(); + if (retval) { + dev_err(&maple_bus, "failed to allocate DMA buffers\n"); + goto cleanup_basic; + } + + /* set up DMA interrupt handler */ + retval = maple_set_dma_interrupt_handler(); + if (retval) { + dev_err(&maple_bus, "bus failed to grab maple " + "DMA IRQ\n"); + goto cleanup_dma; + } + + /* set up VBLANK interrupt handler */ + retval = maple_set_vblank_interrupt_handler(); + if (retval) { + dev_err(&maple_bus, "bus failed to grab VBLANK IRQ\n"); + goto cleanup_irq; + } + + maple_queue_cache = KMEM_CACHE(maple_buffer, SLAB_HWCACHE_ALIGN); + + if (!maple_queue_cache) + goto cleanup_bothirqs; + + INIT_LIST_HEAD(&maple_waitq); + INIT_LIST_HEAD(&maple_sentq); + + /* setup maple ports */ + for (i = 0; i < MAPLE_PORTS; i++) { + checked[i] = false; + empty[i] = false; + mdev[i] = maple_alloc_dev(i, 0); + if (!mdev[i]) { + while (i-- > 0) + maple_free_dev(mdev[i]); + goto cleanup_cache; + } + baseunits[i] = mdev[i]; + atomic_set(&mdev[i]->busy, 1); + maple_add_packet(mdev[i], 0, MAPLE_COMMAND_DEVINFO, 0, NULL); + subdevice_map[i] = 0; + } + + maple_pnp_time = jiffies + HZ; + /* prepare initial queue */ + maple_send(); + dev_info(&maple_bus, "bus core now registered\n"); + + return 0; + +cleanup_cache: + kmem_cache_destroy(maple_queue_cache); + +cleanup_bothirqs: + free_irq(HW_EVENT_VSYNC, 0); + +cleanup_irq: + free_irq(HW_EVENT_MAPLE_DMA, 0); + +cleanup_dma: + free_pages((unsigned long) maple_sendbuf, MAPLE_DMA_PAGES); + +cleanup_basic: + driver_unregister(&maple_unsupported_device.drv); + +cleanup_bus: + bus_unregister(&maple_bus_type); + +cleanup_device: + device_unregister(&maple_bus); + +cleanup: + printk(KERN_ERR "Maple bus registration failed\n"); + return retval; +} +/* Push init to later to ensure hardware gets detected */ +fs_initcall(maple_bus_init); diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c new file mode 100644 index 000000000..fe8875f0d --- /dev/null +++ b/drivers/sh/pm_runtime.c @@ -0,0 +1,101 @@ +/* + * Runtime PM support code + * + * Copyright (C) 2009-2010 Magnus Damm + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/io.h> +#include <linux/pm_runtime.h> +#include <linux/pm_domain.h> +#include <linux/pm_clock.h> +#include <linux/platform_device.h> +#include <linux/clk.h> +#include <linux/sh_clk.h> +#include <linux/bitmap.h> +#include <linux/slab.h> + +#ifdef CONFIG_PM +static int sh_pm_runtime_suspend(struct device *dev) +{ + int ret; + + ret = pm_generic_runtime_suspend(dev); + if (ret) { + dev_err(dev, "failed to suspend device\n"); + return ret; + } + + ret = pm_clk_suspend(dev); + if (ret) { + dev_err(dev, "failed to suspend clock\n"); + pm_generic_runtime_resume(dev); + return ret; + } + + return 0; +} + +static int sh_pm_runtime_resume(struct device *dev) +{ + int ret; + + ret = pm_clk_resume(dev); + if (ret) { + dev_err(dev, "failed to resume clock\n"); + return ret; + } + + return pm_generic_runtime_resume(dev); +} + +static struct dev_pm_domain default_pm_domain = { + .ops = { + .runtime_suspend = sh_pm_runtime_suspend, + .runtime_resume = sh_pm_runtime_resume, + USE_PLATFORM_PM_SLEEP_OPS + }, +}; + +#define DEFAULT_PM_DOMAIN_PTR (&default_pm_domain) + +#else + +#define DEFAULT_PM_DOMAIN_PTR NULL + +#endif /* CONFIG_PM */ + +static struct pm_clk_notifier_block platform_bus_notifier = { + .pm_domain = DEFAULT_PM_DOMAIN_PTR, + .con_ids = { NULL, }, +}; + +static int __init sh_pm_runtime_init(void) +{ + if (IS_ENABLED(CONFIG_ARCH_SHMOBILE_MULTI)) { + if (!of_machine_is_compatible("renesas,emev2") && + !of_machine_is_compatible("renesas,r7s72100") && +#ifndef CONFIG_PM_GENERIC_DOMAINS_OF + !of_machine_is_compatible("renesas,r8a73a4") && + !of_machine_is_compatible("renesas,r8a7740") && + !of_machine_is_compatible("renesas,sh73a0") && +#endif + !of_machine_is_compatible("renesas,r8a7778") && + !of_machine_is_compatible("renesas,r8a7779") && + !of_machine_is_compatible("renesas,r8a7790") && + !of_machine_is_compatible("renesas,r8a7791") && + !of_machine_is_compatible("renesas,r8a7792") && + !of_machine_is_compatible("renesas,r8a7793") && + !of_machine_is_compatible("renesas,r8a7794")) + return 0; + } + + pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier); + return 0; +} +core_initcall(sh_pm_runtime_init); diff --git a/drivers/sh/superhyway/Makefile b/drivers/sh/superhyway/Makefile new file mode 100644 index 000000000..499dc47d8 --- /dev/null +++ b/drivers/sh/superhyway/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for the SuperHyway bus drivers. +# + +obj-$(CONFIG_SUPERHYWAY) += superhyway.o +obj-$(CONFIG_SYSFS) += superhyway-sysfs.o + diff --git a/drivers/sh/superhyway/superhyway-sysfs.c b/drivers/sh/superhyway/superhyway-sysfs.c new file mode 100644 index 000000000..554343308 --- /dev/null +++ b/drivers/sh/superhyway/superhyway-sysfs.c @@ -0,0 +1,45 @@ +/* + * drivers/sh/superhyway/superhyway-sysfs.c + * + * SuperHyway Bus sysfs interface + * + * Copyright (C) 2004, 2005 Paul Mundt <lethal@linux-sh.org> + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/types.h> +#include <linux/superhyway.h> + +#define superhyway_ro_attr(name, fmt, field) \ +static ssize_t name##_show(struct device *dev, struct device_attribute *attr, char *buf) \ +{ \ + struct superhyway_device *s = to_superhyway_device(dev); \ + return sprintf(buf, fmt, s->field); \ +} + +/* VCR flags */ +superhyway_ro_attr(perr_flags, "0x%02x\n", vcr.perr_flags); +superhyway_ro_attr(merr_flags, "0x%02x\n", vcr.merr_flags); +superhyway_ro_attr(mod_vers, "0x%04x\n", vcr.mod_vers); +superhyway_ro_attr(mod_id, "0x%04x\n", vcr.mod_id); +superhyway_ro_attr(bot_mb, "0x%02x\n", vcr.bot_mb); +superhyway_ro_attr(top_mb, "0x%02x\n", vcr.top_mb); + +/* Misc */ +superhyway_ro_attr(resource, "0x%08lx\n", resource[0].start); + +struct device_attribute superhyway_dev_attrs[] = { + __ATTR_RO(perr_flags), + __ATTR_RO(merr_flags), + __ATTR_RO(mod_vers), + __ATTR_RO(mod_id), + __ATTR_RO(bot_mb), + __ATTR_RO(top_mb), + __ATTR_RO(resource), + __ATTR_NULL, +}; + diff --git a/drivers/sh/superhyway/superhyway.c b/drivers/sh/superhyway/superhyway.c new file mode 100644 index 000000000..2d9e7f3d5 --- /dev/null +++ b/drivers/sh/superhyway/superhyway.c @@ -0,0 +1,238 @@ +/* + * drivers/sh/superhyway/superhyway.c + * + * SuperHyway Bus Driver + * + * Copyright (C) 2004, 2005 Paul Mundt <lethal@linux-sh.org> + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/list.h> +#include <linux/superhyway.h> +#include <linux/string.h> +#include <linux/slab.h> + +static int superhyway_devices; + +static struct device superhyway_bus_device = { + .init_name = "superhyway", +}; + +static void superhyway_device_release(struct device *dev) +{ + struct superhyway_device *sdev = to_superhyway_device(dev); + + kfree(sdev->resource); + kfree(sdev); +} + +/** + * superhyway_add_device - Add a SuperHyway module + * @base: Physical address where module is mapped. + * @sdev: SuperHyway device to add, or NULL to allocate a new one. + * @bus: Bus where SuperHyway module resides. + * + * This is responsible for adding a new SuperHyway module. This sets up a new + * struct superhyway_device for the module being added if @sdev == NULL. + * + * Devices are initially added in the order that they are scanned (from the + * top-down of the memory map), and are assigned an ID based on the order that + * they are added. Any manual addition of a module will thus get the ID after + * the devices already discovered regardless of where it resides in memory. + * + * Further work can and should be done in superhyway_scan_bus(), to be sure + * that any new modules are properly discovered and subsequently registered. + */ +int superhyway_add_device(unsigned long base, struct superhyway_device *sdev, + struct superhyway_bus *bus) +{ + struct superhyway_device *dev = sdev; + + if (!dev) { + dev = kzalloc(sizeof(struct superhyway_device), GFP_KERNEL); + if (!dev) + return -ENOMEM; + + } + + dev->bus = bus; + superhyway_read_vcr(dev, base, &dev->vcr); + + if (!dev->resource) { + dev->resource = kmalloc(sizeof(struct resource), GFP_KERNEL); + if (!dev->resource) { + kfree(dev); + return -ENOMEM; + } + + dev->resource->name = dev->name; + dev->resource->start = base; + dev->resource->end = dev->resource->start + 0x01000000; + } + + dev->dev.parent = &superhyway_bus_device; + dev->dev.bus = &superhyway_bus_type; + dev->dev.release = superhyway_device_release; + dev->id.id = dev->vcr.mod_id; + + sprintf(dev->name, "SuperHyway device %04x", dev->id.id); + dev_set_name(&dev->dev, "%02x", superhyway_devices); + + superhyway_devices++; + + return device_register(&dev->dev); +} + +int superhyway_add_devices(struct superhyway_bus *bus, + struct superhyway_device **devices, + int nr_devices) +{ + int i, ret = 0; + + for (i = 0; i < nr_devices; i++) { + struct superhyway_device *dev = devices[i]; + ret |= superhyway_add_device(dev->resource[0].start, dev, bus); + } + + return ret; +} + +static int __init superhyway_init(void) +{ + struct superhyway_bus *bus; + int ret; + + ret = device_register(&superhyway_bus_device); + if (unlikely(ret)) + return ret; + + for (bus = superhyway_channels; bus->ops; bus++) + ret |= superhyway_scan_bus(bus); + + return ret; +} +postcore_initcall(superhyway_init); + +static const struct superhyway_device_id * +superhyway_match_id(const struct superhyway_device_id *ids, + struct superhyway_device *dev) +{ + while (ids->id) { + if (ids->id == dev->id.id) + return ids; + + ids++; + } + + return NULL; +} + +static int superhyway_device_probe(struct device *dev) +{ + struct superhyway_device *shyway_dev = to_superhyway_device(dev); + struct superhyway_driver *shyway_drv = to_superhyway_driver(dev->driver); + + if (shyway_drv && shyway_drv->probe) { + const struct superhyway_device_id *id; + + id = superhyway_match_id(shyway_drv->id_table, shyway_dev); + if (id) + return shyway_drv->probe(shyway_dev, id); + } + + return -ENODEV; +} + +static int superhyway_device_remove(struct device *dev) +{ + struct superhyway_device *shyway_dev = to_superhyway_device(dev); + struct superhyway_driver *shyway_drv = to_superhyway_driver(dev->driver); + + if (shyway_drv && shyway_drv->remove) { + shyway_drv->remove(shyway_dev); + return 0; + } + + return -ENODEV; +} + +/** + * superhyway_register_driver - Register a new SuperHyway driver + * @drv: SuperHyway driver to register. + * + * This registers the passed in @drv. Any devices matching the id table will + * automatically be populated and handed off to the driver's specified probe + * routine. + */ +int superhyway_register_driver(struct superhyway_driver *drv) +{ + drv->drv.name = drv->name; + drv->drv.bus = &superhyway_bus_type; + + return driver_register(&drv->drv); +} + +/** + * superhyway_unregister_driver - Unregister a SuperHyway driver + * @drv: SuperHyway driver to unregister. + * + * This cleans up after superhyway_register_driver(), and should be invoked in + * the exit path of any module drivers. + */ +void superhyway_unregister_driver(struct superhyway_driver *drv) +{ + driver_unregister(&drv->drv); +} + +static int superhyway_bus_match(struct device *dev, struct device_driver *drv) +{ + struct superhyway_device *shyway_dev = to_superhyway_device(dev); + struct superhyway_driver *shyway_drv = to_superhyway_driver(drv); + const struct superhyway_device_id *ids = shyway_drv->id_table; + + if (!ids) + return -EINVAL; + if (superhyway_match_id(ids, shyway_dev)) + return 1; + + return -ENODEV; +} + +struct bus_type superhyway_bus_type = { + .name = "superhyway", + .match = superhyway_bus_match, +#ifdef CONFIG_SYSFS + .dev_attrs = superhyway_dev_attrs, +#endif + .probe = superhyway_device_probe, + .remove = superhyway_device_remove, +}; + +static int __init superhyway_bus_init(void) +{ + return bus_register(&superhyway_bus_type); +} + +static void __exit superhyway_bus_exit(void) +{ + device_unregister(&superhyway_bus_device); + bus_unregister(&superhyway_bus_type); +} + +core_initcall(superhyway_bus_init); +module_exit(superhyway_bus_exit); + +EXPORT_SYMBOL(superhyway_bus_type); +EXPORT_SYMBOL(superhyway_add_device); +EXPORT_SYMBOL(superhyway_add_devices); +EXPORT_SYMBOL(superhyway_register_driver); +EXPORT_SYMBOL(superhyway_unregister_driver); + +MODULE_LICENSE("GPL"); |