diff options
Diffstat (limited to 'arch/sparc')
24 files changed, 146 insertions, 144 deletions
diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h index 0e69b7e7a..7dcbebbca 100644 --- a/arch/sparc/include/asm/atomic_32.h +++ b/arch/sparc/include/asm/atomic_32.h @@ -17,10 +17,12 @@ #include <asm/barrier.h> #include <asm-generic/atomic64.h> - #define ATOMIC_INIT(i) { (i) } int atomic_add_return(int, atomic_t *); +void atomic_and(int, atomic_t *); +void atomic_or(int, atomic_t *); +void atomic_xor(int, atomic_t *); int atomic_cmpxchg(atomic_t *, int, int); int atomic_xchg(atomic_t *, int); int __atomic_add_unless(atomic_t *, int, int); diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h index 408274991..917084ace 100644 --- a/arch/sparc/include/asm/atomic_64.h +++ b/arch/sparc/include/asm/atomic_64.h @@ -33,6 +33,10 @@ long atomic64_##op##_return(long, atomic64_t *); ATOMIC_OPS(add) ATOMIC_OPS(sub) +ATOMIC_OP(and) +ATOMIC_OP(or) +ATOMIC_OP(xor) + #undef ATOMIC_OPS #undef ATOMIC_OP_RETURN #undef ATOMIC_OP diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h index 809941e33..14a928601 100644 --- a/arch/sparc/include/asm/barrier_64.h +++ b/arch/sparc/include/asm/barrier_64.h @@ -60,12 +60,12 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \ do { \ compiletime_assert_atomic_type(*p); \ barrier(); \ - ACCESS_ONCE(*p) = (v); \ + WRITE_ONCE(*p, v); \ } while (0) #define smp_load_acquire(p) \ ({ \ - typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ barrier(); \ ___p1; \ diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h index 7e064c68c..a21da597b 100644 --- a/arch/sparc/include/asm/dma-mapping.h +++ b/arch/sparc/include/asm/dma-mapping.h @@ -7,11 +7,9 @@ #define DMA_ERROR_CODE (~(dma_addr_t)0x0) +#define HAVE_ARCH_DMA_SUPPORTED 1 int dma_supported(struct device *dev, u64 mask); -#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) -#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) - static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction dir) { @@ -39,39 +37,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) return dma_ops; } -#include <asm-generic/dma-mapping-common.h> - -#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) - -static inline void *dma_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - void *cpu_addr; - - cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); - debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); - return cpu_addr; -} - -#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) - -static inline void dma_free_attrs(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_handle, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); - ops->free(dev, size, cpu_addr, dma_handle, attrs); -} - -static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - debug_dma_mapping_error(dev, dma_addr); - return (dma_addr == DMA_ERROR_CODE); -} +#define HAVE_ARCH_DMA_SET_MASK 1 static inline int dma_set_mask(struct device *dev, u64 mask) { @@ -86,4 +52,6 @@ static inline int dma_set_mask(struct device *dev, u64 mask) return -EINVAL; } +#include <asm-generic/dma-mapping-common.h> + #endif diff --git a/arch/sparc/include/asm/ftrace.h b/arch/sparc/include/asm/ftrace.h index 9ec94ad11..3192a8e42 100644 --- a/arch/sparc/include/asm/ftrace.h +++ b/arch/sparc/include/asm/ftrace.h @@ -2,7 +2,7 @@ #define _ASM_SPARC64_FTRACE #ifdef CONFIG_MCOUNT -#define MCOUNT_ADDR ((long)(_mcount)) +#define MCOUNT_ADDR ((unsigned long)(_mcount)) #define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */ #ifndef __ASSEMBLY__ diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h index cc9b04a2b..62d0354d1 100644 --- a/arch/sparc/include/asm/jump_label.h +++ b/arch/sparc/include/asm/jump_label.h @@ -7,16 +7,33 @@ #define JUMP_LABEL_NOP_SIZE 4 -static __always_inline bool arch_static_branch(struct static_key *key) +static __always_inline bool arch_static_branch(struct static_key *key, bool branch) { - asm_volatile_goto("1:\n\t" - "nop\n\t" - "nop\n\t" - ".pushsection __jump_table, \"aw\"\n\t" - ".align 4\n\t" - ".word 1b, %l[l_yes], %c0\n\t" - ".popsection \n\t" - : : "i" (key) : : l_yes); + asm_volatile_goto("1:\n\t" + "nop\n\t" + "nop\n\t" + ".pushsection __jump_table, \"aw\"\n\t" + ".align 4\n\t" + ".word 1b, %l[l_yes], %c0\n\t" + ".popsection \n\t" + : : "i" (&((char *)key)[branch]) : : l_yes); + + return false; +l_yes: + return true; +} + +static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) +{ + asm_volatile_goto("1:\n\t" + "b %l[l_yes]\n\t" + "nop\n\t" + ".pushsection __jump_table, \"aw\"\n\t" + ".align 4\n\t" + ".word 1b, %l[l_yes], %c0\n\t" + ".popsection \n\t" + : : "i" (&((char *)key)[branch]) : : l_yes); + return false; l_yes: return true; diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h index f06b36a00..91b963a88 100644 --- a/arch/sparc/include/asm/pgtable_32.h +++ b/arch/sparc/include/asm/pgtable_32.h @@ -14,7 +14,7 @@ #include <asm-generic/4level-fixup.h> #include <linux/spinlock.h> -#include <linux/swap.h> +#include <linux/mm_types.h> #include <asm/types.h> #include <asm/pgtsrmmu.h> #include <asm/vaddrs.h> diff --git a/arch/sparc/include/uapi/asm/pstate.h b/arch/sparc/include/uapi/asm/pstate.h index 4b6b998af..cf832e14a 100644 --- a/arch/sparc/include/uapi/asm/pstate.h +++ b/arch/sparc/include/uapi/asm/pstate.h @@ -88,7 +88,7 @@ #define VERS_MAXTL _AC(0x000000000000ff00,UL) /* Max Trap Level. */ #define VERS_MAXWIN _AC(0x000000000000001f,UL) /* Max RegWindow Idx.*/ -/* Compatability Feature Register (%asr26), SPARC-T4 and later */ +/* Compatibility Feature Register (%asr26), SPARC-T4 and later */ #define CFR_AES _AC(0x0000000000000001,UL) /* Supports AES opcodes */ #define CFR_DES _AC(0x0000000000000002,UL) /* Supports DES opcodes */ #define CFR_KASUMI _AC(0x0000000000000004,UL) /* Supports KASUMI opcodes */ diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index 4033c23bd..e22416ce5 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c @@ -210,21 +210,21 @@ struct irq_handler_data { static inline unsigned int irq_data_to_handle(struct irq_data *data) { - struct irq_handler_data *ihd = data->handler_data; + struct irq_handler_data *ihd = irq_data_get_irq_handler_data(data); return ihd->dev_handle; } static inline unsigned int irq_data_to_ino(struct irq_data *data) { - struct irq_handler_data *ihd = data->handler_data; + struct irq_handler_data *ihd = irq_data_get_irq_handler_data(data); return ihd->dev_ino; } static inline unsigned long irq_data_to_sysino(struct irq_data *data) { - struct irq_handler_data *ihd = data->handler_data; + struct irq_handler_data *ihd = irq_data_get_irq_handler_data(data); return ihd->sysino; } @@ -370,13 +370,15 @@ static int irq_choose_cpu(unsigned int irq, const struct cpumask *affinity) static void sun4u_irq_enable(struct irq_data *data) { - struct irq_handler_data *handler_data = data->handler_data; + struct irq_handler_data *handler_data; + handler_data = irq_data_get_irq_handler_data(data); if (likely(handler_data)) { unsigned long cpuid, imap, val; unsigned int tid; - cpuid = irq_choose_cpu(data->irq, data->affinity); + cpuid = irq_choose_cpu(data->irq, + irq_data_get_affinity_mask(data)); imap = handler_data->imap; tid = sun4u_compute_tid(imap, cpuid); @@ -393,8 +395,9 @@ static void sun4u_irq_enable(struct irq_data *data) static int sun4u_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) { - struct irq_handler_data *handler_data = data->handler_data; + struct irq_handler_data *handler_data; + handler_data = irq_data_get_irq_handler_data(data); if (likely(handler_data)) { unsigned long cpuid, imap, val; unsigned int tid; @@ -438,15 +441,17 @@ static void sun4u_irq_disable(struct irq_data *data) static void sun4u_irq_eoi(struct irq_data *data) { - struct irq_handler_data *handler_data = data->handler_data; + struct irq_handler_data *handler_data; + handler_data = irq_data_get_irq_handler_data(data); if (likely(handler_data)) upa_writeq(ICLR_IDLE, handler_data->iclr); } static void sun4v_irq_enable(struct irq_data *data) { - unsigned long cpuid = irq_choose_cpu(data->irq, data->affinity); + unsigned long cpuid = irq_choose_cpu(data->irq, + irq_data_get_affinity_mask(data)); unsigned int ino = irq_data_to_sysino(data); int err; @@ -508,7 +513,7 @@ static void sun4v_virq_enable(struct irq_data *data) unsigned long cpuid; int err; - cpuid = irq_choose_cpu(data->irq, data->affinity); + cpuid = irq_choose_cpu(data->irq, irq_data_get_affinity_mask(data)); err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid); if (err != HV_EOK) @@ -881,8 +886,8 @@ void fixup_irqs(void) if (desc->action && !irqd_is_per_cpu(data)) { if (data->chip->irq_set_affinity) data->chip->irq_set_affinity(data, - data->affinity, - false); + irq_data_get_affinity_mask(data), + false); } raw_spin_unlock_irqrestore(&desc->lock, flags); } diff --git a/arch/sparc/kernel/jump_label.c b/arch/sparc/kernel/jump_label.c index 48565c11e..59bbeff55 100644 --- a/arch/sparc/kernel/jump_label.c +++ b/arch/sparc/kernel/jump_label.c @@ -16,7 +16,7 @@ void arch_jump_label_transform(struct jump_entry *entry, u32 val; u32 *insn = (u32 *) (unsigned long) entry->code; - if (type == JUMP_LABEL_ENABLE) { + if (type == JUMP_LABEL_JMP) { s32 off = (s32)entry->target - (s32)entry->code; #ifdef CONFIG_SPARC64 diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c index 9bbb8f2bb..42efcf85f 100644 --- a/arch/sparc/kernel/leon_kernel.c +++ b/arch/sparc/kernel/leon_kernel.c @@ -53,7 +53,7 @@ static inline unsigned int leon_eirq_get(int cpu) } /* Handle one or multiple IRQs from the extended interrupt controller */ -static void leon_handle_ext_irq(unsigned int irq, struct irq_desc *desc) +static void leon_handle_ext_irq(struct irq_desc *desc) { unsigned int eirq; struct irq_bucket *p; @@ -126,7 +126,7 @@ static int leon_set_affinity(struct irq_data *data, const struct cpumask *dest, int oldcpu, newcpu; mask = (unsigned long)data->chip_data; - oldcpu = irq_choose_cpu(data->affinity); + oldcpu = irq_choose_cpu(irq_data_get_affinity_mask(data)); newcpu = irq_choose_cpu(dest); if (oldcpu == newcpu) @@ -149,7 +149,7 @@ static void leon_unmask_irq(struct irq_data *data) int cpu; mask = (unsigned long)data->chip_data; - cpu = irq_choose_cpu(data->affinity); + cpu = irq_choose_cpu(irq_data_get_affinity_mask(data)); spin_lock_irqsave(&leon_irq_lock, flags); oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(cpu)); LEON3_BYPASS_STORE_PA(LEON_IMASK(cpu), (oldmask | mask)); @@ -162,7 +162,7 @@ static void leon_mask_irq(struct irq_data *data) int cpu; mask = (unsigned long)data->chip_data; - cpu = irq_choose_cpu(data->affinity); + cpu = irq_choose_cpu(irq_data_get_affinity_mask(data)); spin_lock_irqsave(&leon_irq_lock, flags); oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(cpu)); LEON3_BYPASS_STORE_PA(LEON_IMASK(cpu), (oldmask & ~mask)); diff --git a/arch/sparc/kernel/leon_pci_grpci1.c b/arch/sparc/kernel/leon_pci_grpci1.c index 3382f7b3e..1e77128a8 100644 --- a/arch/sparc/kernel/leon_pci_grpci1.c +++ b/arch/sparc/kernel/leon_pci_grpci1.c @@ -357,7 +357,7 @@ static struct irq_chip grpci1_irq = { }; /* Handle one or multiple IRQs from the PCI core */ -static void grpci1_pci_flow_irq(unsigned int irq, struct irq_desc *desc) +static void grpci1_pci_flow_irq(struct irq_desc *desc) { struct grpci1_priv *priv = grpci1priv; int i, ack = 0; diff --git a/arch/sparc/kernel/leon_pci_grpci2.c b/arch/sparc/kernel/leon_pci_grpci2.c index 814fb1729..f727c4de1 100644 --- a/arch/sparc/kernel/leon_pci_grpci2.c +++ b/arch/sparc/kernel/leon_pci_grpci2.c @@ -498,7 +498,7 @@ static struct irq_chip grpci2_irq = { }; /* Handle one or multiple IRQs from the PCI core */ -static void grpci2_pci_flow_irq(unsigned int irq, struct irq_desc *desc) +static void grpci2_pci_flow_irq(struct irq_desc *desc) { struct grpci2_priv *priv = grpci2priv; int i, ack = 0; diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c index c928bc64b..b91d7f146 100644 --- a/arch/sparc/kernel/pci.c +++ b/arch/sparc/kernel/pci.c @@ -231,8 +231,7 @@ static void pci_parse_of_addrs(struct platform_device *op, res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2]; } else if (i == dev->rom_base_reg) { res = &dev->resource[PCI_ROM_RESOURCE]; - flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE - | IORESOURCE_SIZEALIGN; + flags |= IORESOURCE_READONLY | IORESOURCE_SIZEALIGN; } else { printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i); continue; @@ -249,7 +248,6 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm, struct pci_bus *bus, int devfn) { struct dev_archdata *sd; - struct pci_slot *slot; struct platform_device *op; struct pci_dev *dev; const char *type; @@ -290,10 +288,7 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm, dev->multifunction = 0; /* maybe a lie? */ set_pcie_port_type(dev); - list_for_each_entry(slot, &dev->bus->slots, list) - if (PCI_SLOT(dev->devfn) == slot->number) - dev->slot = slot; - + pci_dev_assign_slot(dev); dev->vendor = of_getintprop_default(node, "vendor-id", 0xffff); dev->device = of_getintprop_default(node, "device-id", 0xffff); dev->subsystem_vendor = @@ -918,7 +913,7 @@ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) void arch_teardown_msi_irq(unsigned int irq) { struct msi_desc *entry = irq_get_msi_desc(irq); - struct pci_dev *pdev = entry->dev; + struct pci_dev *pdev = msi_desc_to_pci_dev(entry); struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; if (pbm->teardown_msi_irq) diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c index 50e7b626a..c5113c7ce 100644 --- a/arch/sparc/kernel/process_32.c +++ b/arch/sparc/kernel/process_32.c @@ -333,11 +333,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, childregs = (struct pt_regs *) (new_stack + STACKFRAME_SZ); /* - * A new process must start with interrupts closed in 2.5, - * because this is how Mingo's scheduler works (see schedule_tail - * and finish_arch_switch). If we do not do it, a timer interrupt hits - * before we unlock, attempts to re-take the rq->lock, and then we die. - * Thus, kpsr|=PSR_PIL. + * A new process must start with interrupts disabled, see schedule_tail() + * and finish_task_switch(). (If we do not do it and if a timer interrupt + * hits before we unlock and attempts to take the rq->lock, we deadlock.) + * + * Thus, kpsr |= PSR_PIL. */ ti->ksp = (unsigned long) new_stack; p->thread.kregs = childregs; diff --git a/arch/sparc/kernel/sun4d_irq.c b/arch/sparc/kernel/sun4d_irq.c index a1bb2675b..a87d0e47c 100644 --- a/arch/sparc/kernel/sun4d_irq.c +++ b/arch/sparc/kernel/sun4d_irq.c @@ -188,7 +188,7 @@ void sun4d_handler_irq(unsigned int pil, struct pt_regs *regs) static void sun4d_mask_irq(struct irq_data *data) { - struct sun4d_handler_data *handler_data = data->handler_data; + struct sun4d_handler_data *handler_data = irq_data_get_irq_handler_data(data); unsigned int real_irq; #ifdef CONFIG_SMP int cpuid = handler_data->cpuid; @@ -206,7 +206,7 @@ static void sun4d_mask_irq(struct irq_data *data) static void sun4d_unmask_irq(struct irq_data *data) { - struct sun4d_handler_data *handler_data = data->handler_data; + struct sun4d_handler_data *handler_data = irq_data_get_irq_handler_data(data); unsigned int real_irq; #ifdef CONFIG_SMP int cpuid = handler_data->cpuid; diff --git a/arch/sparc/kernel/sun4m_irq.c b/arch/sparc/kernel/sun4m_irq.c index 8bb3b3fdd..da737c712 100644 --- a/arch/sparc/kernel/sun4m_irq.c +++ b/arch/sparc/kernel/sun4m_irq.c @@ -188,9 +188,10 @@ static unsigned long sun4m_imask[0x50] = { static void sun4m_mask_irq(struct irq_data *data) { - struct sun4m_handler_data *handler_data = data->handler_data; + struct sun4m_handler_data *handler_data; int cpu = smp_processor_id(); + handler_data = irq_data_get_irq_handler_data(data); if (handler_data->mask) { unsigned long flags; @@ -206,9 +207,10 @@ static void sun4m_mask_irq(struct irq_data *data) static void sun4m_unmask_irq(struct irq_data *data) { - struct sun4m_handler_data *handler_data = data->handler_data; + struct sun4m_handler_data *handler_data; int cpu = smp_processor_id(); + handler_data = irq_data_get_irq_handler_data(data); if (handler_data->mask) { unsigned long flags; diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c index d3408e72d..278c40abc 100644 --- a/arch/sparc/kernel/sun4m_smp.c +++ b/arch/sparc/kernel/sun4m_smp.c @@ -247,7 +247,7 @@ void smp4m_percpu_timer_interrupt(struct pt_regs *regs) ce = &per_cpu(sparc32_clockevent, cpu); - if (ce->mode & CLOCK_EVT_MODE_PERIODIC) + if (clockevent_state_periodic(ce)) sun4m_clear_profile_irq(cpu); else sparc_config.load_profile_irq(cpu, 0); /* Is this needless? */ diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c index c9692f387..1affabc96 100644 --- a/arch/sparc/kernel/time_32.c +++ b/arch/sparc/kernel/time_32.c @@ -101,21 +101,18 @@ irqreturn_t notrace timer_interrupt(int dummy, void *dev_id) return IRQ_HANDLED; } -static void timer_ce_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) +static int timer_ce_shutdown(struct clock_event_device *evt) { - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - case CLOCK_EVT_MODE_RESUME: - timer_ce_enabled = 1; - break; - case CLOCK_EVT_MODE_SHUTDOWN: - timer_ce_enabled = 0; - break; - default: - break; - } + timer_ce_enabled = 0; + smp_mb(); + return 0; +} + +static int timer_ce_set_periodic(struct clock_event_device *evt) +{ + timer_ce_enabled = 1; smp_mb(); + return 0; } static __init void setup_timer_ce(void) @@ -127,7 +124,9 @@ static __init void setup_timer_ce(void) ce->name = "timer_ce"; ce->rating = 100; ce->features = CLOCK_EVT_FEAT_PERIODIC; - ce->set_mode = timer_ce_set_mode; + ce->set_state_shutdown = timer_ce_shutdown; + ce->set_state_periodic = timer_ce_set_periodic; + ce->tick_resume = timer_ce_set_periodic; ce->cpumask = cpu_possible_mask; ce->shift = 32; ce->mult = div_sc(sparc_config.clock_rate, NSEC_PER_SEC, @@ -183,24 +182,20 @@ static __init int setup_timer_cs(void) } #ifdef CONFIG_SMP -static void percpu_ce_setup(enum clock_event_mode mode, - struct clock_event_device *evt) +static int percpu_ce_shutdown(struct clock_event_device *evt) { int cpu = cpumask_first(evt->cpumask); - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - sparc_config.load_profile_irq(cpu, - SBUS_CLOCK_RATE / HZ); - break; - case CLOCK_EVT_MODE_ONESHOT: - case CLOCK_EVT_MODE_SHUTDOWN: - case CLOCK_EVT_MODE_UNUSED: - sparc_config.load_profile_irq(cpu, 0); - break; - default: - break; - } + sparc_config.load_profile_irq(cpu, 0); + return 0; +} + +static int percpu_ce_set_periodic(struct clock_event_device *evt) +{ + int cpu = cpumask_first(evt->cpumask); + + sparc_config.load_profile_irq(cpu, SBUS_CLOCK_RATE / HZ); + return 0; } static int percpu_ce_set_next_event(unsigned long delta, @@ -224,7 +219,9 @@ void register_percpu_ce(int cpu) ce->name = "percpu_ce"; ce->rating = 200; ce->features = features; - ce->set_mode = percpu_ce_setup; + ce->set_state_shutdown = percpu_ce_shutdown; + ce->set_state_periodic = percpu_ce_set_periodic; + ce->set_state_oneshot = percpu_ce_shutdown; ce->set_next_event = percpu_ce_set_next_event; ce->cpumask = cpumask_of(cpu); ce->shift = 32; diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c index 2e6035c0a..c69b21e51 100644 --- a/arch/sparc/kernel/time_64.c +++ b/arch/sparc/kernel/time_64.c @@ -674,32 +674,19 @@ static int sparc64_next_event(unsigned long delta, return tick_ops->add_compare(delta) ? -ETIME : 0; } -static void sparc64_timer_setup(enum clock_event_mode mode, - struct clock_event_device *evt) -{ - switch (mode) { - case CLOCK_EVT_MODE_ONESHOT: - case CLOCK_EVT_MODE_RESUME: - break; - - case CLOCK_EVT_MODE_SHUTDOWN: - tick_ops->disable_irq(); - break; - - case CLOCK_EVT_MODE_PERIODIC: - case CLOCK_EVT_MODE_UNUSED: - WARN_ON(1); - break; - } +static int sparc64_timer_shutdown(struct clock_event_device *evt) +{ + tick_ops->disable_irq(); + return 0; } static struct clock_event_device sparc64_clockevent = { - .features = CLOCK_EVT_FEAT_ONESHOT, - .set_mode = sparc64_timer_setup, - .set_next_event = sparc64_next_event, - .rating = 100, - .shift = 30, - .irq = -1, + .features = CLOCK_EVT_FEAT_ONESHOT, + .set_state_shutdown = sparc64_timer_shutdown, + .set_next_event = sparc64_next_event, + .rating = 100, + .shift = 30, + .irq = -1, }; static DEFINE_PER_CPU(struct clock_event_device, sparc64_events); diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c index 71cd65ab2..b9d63c0a7 100644 --- a/arch/sparc/lib/atomic32.c +++ b/arch/sparc/lib/atomic32.c @@ -27,22 +27,38 @@ static DEFINE_SPINLOCK(dummy); #endif /* SMP */ -#define ATOMIC_OP(op, cop) \ +#define ATOMIC_OP_RETURN(op, c_op) \ int atomic_##op##_return(int i, atomic_t *v) \ { \ int ret; \ unsigned long flags; \ spin_lock_irqsave(ATOMIC_HASH(v), flags); \ \ - ret = (v->counter cop i); \ + ret = (v->counter c_op i); \ \ spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \ return ret; \ } \ EXPORT_SYMBOL(atomic_##op##_return); -ATOMIC_OP(add, +=) +#define ATOMIC_OP(op, c_op) \ +void atomic_##op(int i, atomic_t *v) \ +{ \ + unsigned long flags; \ + spin_lock_irqsave(ATOMIC_HASH(v), flags); \ + \ + v->counter c_op i; \ + \ + spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \ +} \ +EXPORT_SYMBOL(atomic_##op); + +ATOMIC_OP_RETURN(add, +=) +ATOMIC_OP(and, &=) +ATOMIC_OP(or, |=) +ATOMIC_OP(xor, ^=) +#undef ATOMIC_OP_RETURN #undef ATOMIC_OP int atomic_xchg(atomic_t *v, int new) diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S index 05dac4390..d6b0363f3 100644 --- a/arch/sparc/lib/atomic_64.S +++ b/arch/sparc/lib/atomic_64.S @@ -47,6 +47,9 @@ ENDPROC(atomic_##op##_return); ATOMIC_OPS(add) ATOMIC_OPS(sub) +ATOMIC_OP(and) +ATOMIC_OP(or) +ATOMIC_OP(xor) #undef ATOMIC_OPS #undef ATOMIC_OP_RETURN @@ -84,6 +87,9 @@ ENDPROC(atomic64_##op##_return); ATOMIC64_OPS(add) ATOMIC64_OPS(sub) +ATOMIC64_OP(and) +ATOMIC64_OP(or) +ATOMIC64_OP(xor) #undef ATOMIC64_OPS #undef ATOMIC64_OP_RETURN diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c index 8069ce12f..8eb454cfe 100644 --- a/arch/sparc/lib/ksyms.c +++ b/arch/sparc/lib/ksyms.c @@ -111,6 +111,9 @@ EXPORT_SYMBOL(atomic64_##op##_return); ATOMIC_OPS(add) ATOMIC_OPS(sub) +ATOMIC_OP(and) +ATOMIC_OP(or) +ATOMIC_OP(xor) #undef ATOMIC_OPS #undef ATOMIC_OP_RETURN diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c index 7931eeeb6..f8b9f71b9 100644 --- a/arch/sparc/net/bpf_jit_comp.c +++ b/arch/sparc/net/bpf_jit_comp.c @@ -807,7 +807,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf]; } if (bpf_jit_enable > 1) - bpf_jit_dump(flen, proglen, pass, image); + bpf_jit_dump(flen, proglen, pass + 1, image); if (image) { bpf_flush_icache(image, image + proglen); |