From 57f0f512b273f60d52568b8c6b77e17f5636edc0 Mon Sep 17 00:00:00 2001 From: André Fabian Silva Delgado Date: Wed, 5 Aug 2015 17:04:01 -0300 Subject: Initial import --- arch/sparc/include/asm/barrier_64.h | 77 +++++++++++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) create mode 100644 arch/sparc/include/asm/barrier_64.h (limited to 'arch/sparc/include/asm/barrier_64.h') diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h new file mode 100644 index 000000000..76648941f --- /dev/null +++ b/arch/sparc/include/asm/barrier_64.h @@ -0,0 +1,77 @@ +#ifndef __SPARC64_BARRIER_H +#define __SPARC64_BARRIER_H + +/* These are here in an effort to more fully work around Spitfire Errata + * #51. Essentially, if a memory barrier occurs soon after a mispredicted + * branch, the chip can stop executing instructions until a trap occurs. + * Therefore, if interrupts are disabled, the chip can hang forever. + * + * It used to be believed that the memory barrier had to be right in the + * delay slot, but a case has been traced recently wherein the memory barrier + * was one instruction after the branch delay slot and the chip still hung. + * The offending sequence was the following in sym_wakeup_done() of the + * sym53c8xx_2 driver: + * + * call sym_ccb_from_dsa, 0 + * movge %icc, 0, %l0 + * brz,pn %o0, .LL1303 + * mov %o0, %l2 + * membar #LoadLoad + * + * The branch has to be mispredicted for the bug to occur. Therefore, we put + * the memory barrier explicitly into a "branch always, predicted taken" + * delay slot to avoid the problem case. + */ +#define membar_safe(type) \ +do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \ + " membar " type "\n" \ + "1:\n" \ + : : : "memory"); \ +} while (0) + +/* The kernel always executes in TSO memory model these days, + * and furthermore most sparc64 chips implement more stringent + * memory ordering than required by the specifications. + */ +#define mb() membar_safe("#StoreLoad") +#define rmb() __asm__ __volatile__("":::"memory") +#define wmb() __asm__ __volatile__("":::"memory") + +#define dma_rmb() rmb() +#define dma_wmb() wmb() + +#define set_mb(__var, __value) \ + do { __var = __value; membar_safe("#StoreLoad"); } while(0) + +#ifdef CONFIG_SMP +#define smp_mb() mb() +#define smp_rmb() rmb() +#define smp_wmb() wmb() +#else +#define smp_mb() __asm__ __volatile__("":::"memory") +#define smp_rmb() __asm__ __volatile__("":::"memory") +#define smp_wmb() __asm__ __volatile__("":::"memory") +#endif + +#define read_barrier_depends() do { } while (0) +#define smp_read_barrier_depends() do { } while (0) + +#define smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + barrier(); \ + ACCESS_ONCE(*p) = (v); \ +} while (0) + +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + barrier(); \ + ___p1; \ +}) + +#define smp_mb__before_atomic() barrier() +#define smp_mb__after_atomic() barrier() + +#endif /* !(__SPARC64_BARRIER_H) */ -- cgit v1.2.3-54-g00ecf