diff options
Diffstat (limited to 'arch/tile/include/asm')
-rw-r--r-- | arch/tile/include/asm/Kbuild | 1 | ||||
-rw-r--r-- | arch/tile/include/asm/atomic_32.h | 28 | ||||
-rw-r--r-- | arch/tile/include/asm/atomic_64.h | 40 | ||||
-rw-r--r-- | arch/tile/include/asm/dma-mapping.h | 45 | ||||
-rw-r--r-- | arch/tile/include/asm/elf.h | 4 | ||||
-rw-r--r-- | arch/tile/include/asm/io.h | 1 | ||||
-rw-r--r-- | arch/tile/include/asm/switch_to.h | 8 | ||||
-rw-r--r-- | arch/tile/include/asm/syscall.h | 28 | ||||
-rw-r--r-- | arch/tile/include/asm/word-at-a-time.h | 8 |
9 files changed, 110 insertions, 53 deletions
diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild index d8a843163..ba35c41c7 100644 --- a/arch/tile/include/asm/Kbuild +++ b/arch/tile/include/asm/Kbuild @@ -28,6 +28,7 @@ generic-y += poll.h generic-y += posix_types.h generic-y += preempt.h generic-y += resource.h +generic-y += seccomp.h generic-y += sembuf.h generic-y += serial.h generic-y += shmbuf.h diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h index 1b109fad9..d320ce253 100644 --- a/arch/tile/include/asm/atomic_32.h +++ b/arch/tile/include/asm/atomic_32.h @@ -34,6 +34,19 @@ static inline void atomic_add(int i, atomic_t *v) _atomic_xchg_add(&v->counter, i); } +#define ATOMIC_OP(op) \ +unsigned long _atomic_##op(volatile unsigned long *p, unsigned long mask); \ +static inline void atomic_##op(int i, atomic_t *v) \ +{ \ + _atomic_##op((unsigned long *)&v->counter, i); \ +} + +ATOMIC_OP(and) +ATOMIC_OP(or) +ATOMIC_OP(xor) + +#undef ATOMIC_OP + /** * atomic_add_return - add integer and return * @v: pointer of type atomic_t @@ -113,6 +126,17 @@ static inline void atomic64_add(long long i, atomic64_t *v) _atomic64_xchg_add(&v->counter, i); } +#define ATOMIC64_OP(op) \ +long long _atomic64_##op(long long *v, long long n); \ +static inline void atomic64_##op(long long i, atomic64_t *v) \ +{ \ + _atomic64_##op(&v->counter, i); \ +} + +ATOMIC64_OP(and) +ATOMIC64_OP(or) +ATOMIC64_OP(xor) + /** * atomic64_add_return - add integer and return * @v: pointer of type atomic64_t @@ -225,6 +249,7 @@ extern struct __get_user __atomic_xchg_add(volatile int *p, int *lock, int n); extern struct __get_user __atomic_xchg_add_unless(volatile int *p, int *lock, int o, int n); extern struct __get_user __atomic_or(volatile int *p, int *lock, int n); +extern struct __get_user __atomic_and(volatile int *p, int *lock, int n); extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n); extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n); extern long long __atomic64_cmpxchg(volatile long long *p, int *lock, @@ -234,6 +259,9 @@ extern long long __atomic64_xchg_add(volatile long long *p, int *lock, long long n); extern long long __atomic64_xchg_add_unless(volatile long long *p, int *lock, long long o, long long n); +extern long long __atomic64_and(volatile long long *p, int *lock, long long n); +extern long long __atomic64_or(volatile long long *p, int *lock, long long n); +extern long long __atomic64_xor(volatile long long *p, int *lock, long long n); /* Return failure from the atomic wrappers. */ struct __get_user __atomic_bad_address(int __user *addr); diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h index 0496970ce..096a56d6e 100644 --- a/arch/tile/include/asm/atomic_64.h +++ b/arch/tile/include/asm/atomic_64.h @@ -58,6 +58,26 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) return oldval; } +static inline void atomic_and(int i, atomic_t *v) +{ + __insn_fetchand4((void *)&v->counter, i); +} + +static inline void atomic_or(int i, atomic_t *v) +{ + __insn_fetchor4((void *)&v->counter, i); +} + +static inline void atomic_xor(int i, atomic_t *v) +{ + int guess, oldval = v->counter; + do { + guess = oldval; + __insn_mtspr(SPR_CMPEXCH_VALUE, guess); + oldval = __insn_cmpexch4(&v->counter, guess ^ i); + } while (guess != oldval); +} + /* Now the true 64-bit operations. */ #define ATOMIC64_INIT(i) { (i) } @@ -91,6 +111,26 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u) return oldval != u; } +static inline void atomic64_and(long i, atomic64_t *v) +{ + __insn_fetchand((void *)&v->counter, i); +} + +static inline void atomic64_or(long i, atomic64_t *v) +{ + __insn_fetchor((void *)&v->counter, i); +} + +static inline void atomic64_xor(long i, atomic64_t *v) +{ + long guess, oldval = v->counter; + do { + guess = oldval; + __insn_mtspr(SPR_CMPEXCH_VALUE, guess); + oldval = __insn_cmpexch(&v->counter, guess ^ i); + } while (guess != oldval); +} + #define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v)) #define atomic64_sub(i, v) atomic64_add(-(i), (v)) #define atomic64_inc_return(v) atomic64_add_return(1, (v)) diff --git a/arch/tile/include/asm/dma-mapping.h b/arch/tile/include/asm/dma-mapping.h index 1eae359d8..96ac6cce4 100644 --- a/arch/tile/include/asm/dma-mapping.h +++ b/arch/tile/include/asm/dma-mapping.h @@ -59,8 +59,6 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) static inline void dma_mark_clean(void *addr, size_t size) {} -#include <asm-generic/dma-mapping-common.h> - static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) { dev->archdata.dma_ops = ops; @@ -74,18 +72,9 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) return addr + size - 1 <= *dev->dma_mask; } -static inline int -dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - debug_dma_mapping_error(dev, dma_addr); - return get_dma_ops(dev)->mapping_error(dev, dma_addr); -} +#define HAVE_ARCH_DMA_SET_MASK 1 -static inline int -dma_supported(struct device *dev, u64 mask) -{ - return get_dma_ops(dev)->dma_supported(dev, mask); -} +#include <asm-generic/dma-mapping-common.h> static inline int dma_set_mask(struct device *dev, u64 mask) @@ -116,36 +105,6 @@ dma_set_mask(struct device *dev, u64 mask) return 0; } -static inline void *dma_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag, - struct dma_attrs *attrs) -{ - struct dma_map_ops *dma_ops = get_dma_ops(dev); - void *cpu_addr; - - cpu_addr = dma_ops->alloc(dev, size, dma_handle, flag, attrs); - - debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); - - return cpu_addr; -} - -static inline void dma_free_attrs(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_handle, - struct dma_attrs *attrs) -{ - struct dma_map_ops *dma_ops = get_dma_ops(dev); - - debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); - - dma_ops->free(dev, size, cpu_addr, dma_handle, attrs); -} - -#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) -#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) -#define dma_free_coherent(d, s, v, h) dma_free_attrs(d, s, v, h, NULL) -#define dma_free_noncoherent(d, s, v, h) dma_free_attrs(d, s, v, h, NULL) - /* * dma_alloc_noncoherent() is #defined to return coherent memory, * so there's no need to do any flushing here. diff --git a/arch/tile/include/asm/elf.h b/arch/tile/include/asm/elf.h index 41d9878a9..c505d77e4 100644 --- a/arch/tile/include/asm/elf.h +++ b/arch/tile/include/asm/elf.h @@ -22,6 +22,7 @@ #include <arch/chip.h> #include <linux/ptrace.h> +#include <linux/elf-em.h> #include <asm/byteorder.h> #include <asm/page.h> @@ -30,9 +31,6 @@ typedef unsigned long elf_greg_t; #define ELF_NGREG (sizeof(struct pt_regs) / sizeof(elf_greg_t)) typedef elf_greg_t elf_gregset_t[ELF_NGREG]; -#define EM_TILEPRO 188 -#define EM_TILEGX 191 - /* Provide a nominal data structure. */ #define ELF_NFPREG 0 typedef double elf_fpreg_t; diff --git a/arch/tile/include/asm/io.h b/arch/tile/include/asm/io.h index dc61de15c..322b5fe94 100644 --- a/arch/tile/include/asm/io.h +++ b/arch/tile/include/asm/io.h @@ -55,6 +55,7 @@ extern void iounmap(volatile void __iomem *addr); #define ioremap_nocache(physaddr, size) ioremap(physaddr, size) #define ioremap_wc(physaddr, size) ioremap(physaddr, size) #define ioremap_wt(physaddr, size) ioremap(physaddr, size) +#define ioremap_uc(physaddr, size) ioremap(physaddr, size) #define ioremap_fullcache(physaddr, size) ioremap(physaddr, size) #define mmiowb() diff --git a/arch/tile/include/asm/switch_to.h b/arch/tile/include/asm/switch_to.h index b8f888cbe..34ee72705 100644 --- a/arch/tile/include/asm/switch_to.h +++ b/arch/tile/include/asm/switch_to.h @@ -53,15 +53,13 @@ extern unsigned long get_switch_to_pc(void); * Kernel threads can check to see if they need to migrate their * stack whenever they return from a context switch; for user * threads, we defer until they are returning to user-space. + * We defer homecache migration until the runqueue lock is released. */ -#define finish_arch_switch(prev) do { \ - if (unlikely((prev)->state == TASK_DEAD)) \ - __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_EXIT | \ - ((prev)->pid << _SIM_CONTROL_OPERATOR_BITS)); \ +#define finish_arch_post_lock_switch() do { \ __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_SWITCH | \ (current->pid << _SIM_CONTROL_OPERATOR_BITS)); \ if (current->mm == NULL && !kstack_hash && \ - current_thread_info()->homecache_cpu != smp_processor_id()) \ + current_thread_info()->homecache_cpu != raw_smp_processor_id()) \ homecache_migrate_kthread(); \ } while (0) diff --git a/arch/tile/include/asm/syscall.h b/arch/tile/include/asm/syscall.h index 9644b88f1..373d73064 100644 --- a/arch/tile/include/asm/syscall.h +++ b/arch/tile/include/asm/syscall.h @@ -20,6 +20,8 @@ #include <linux/sched.h> #include <linux/err.h> +#include <linux/audit.h> +#include <linux/compat.h> #include <arch/abi.h> /* The array of function pointers for syscalls. */ @@ -61,7 +63,15 @@ static inline void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, int error, long val) { - regs->regs[0] = (long) error ?: val; + if (error) { + /* R0 is the passed-in negative error, R1 is positive. */ + regs->regs[0] = error; + regs->regs[1] = -error; + } else { + /* R1 set to zero to indicate no error. */ + regs->regs[0] = val; + regs->regs[1] = 0; + } } static inline void syscall_get_arguments(struct task_struct *task, @@ -82,4 +92,20 @@ static inline void syscall_set_arguments(struct task_struct *task, memcpy(®s[i], args, n * sizeof(args[0])); } +/* + * We don't care about endianness (__AUDIT_ARCH_LE bit) here because + * tile has the same system calls both on little- and big- endian. + */ +static inline int syscall_get_arch(void) +{ + if (is_compat_task()) + return AUDIT_ARCH_TILEGX32; + +#ifdef CONFIG_TILEGX + return AUDIT_ARCH_TILEGX; +#else + return AUDIT_ARCH_TILEPRO; +#endif +} + #endif /* _ASM_TILE_SYSCALL_H */ diff --git a/arch/tile/include/asm/word-at-a-time.h b/arch/tile/include/asm/word-at-a-time.h index 9e5ce0d7b..b66a693c2 100644 --- a/arch/tile/include/asm/word-at-a-time.h +++ b/arch/tile/include/asm/word-at-a-time.h @@ -6,7 +6,7 @@ struct word_at_a_time { /* unused */ }; #define WORD_AT_A_TIME_CONSTANTS {} -/* Generate 0x01 byte values for non-zero bytes using a SIMD instruction. */ +/* Generate 0x01 byte values for zero bytes using a SIMD instruction. */ static inline unsigned long has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c) { @@ -33,4 +33,10 @@ static inline long find_zero(unsigned long mask) #endif } +#ifdef __BIG_ENDIAN +#define zero_bytemask(mask) (~1ul << (63 - __builtin_clzl(mask))) +#else +#define zero_bytemask(mask) ((2ul << __builtin_ctzl(mask)) - 1) +#endif + #endif /* _ASM_WORD_AT_A_TIME_H */ |