diff options
Diffstat (limited to 'include/asm-generic')
147 files changed, 11209 insertions, 0 deletions
diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h new file mode 100644 index 000000000..5bdab6bff --- /dev/null +++ b/include/asm-generic/4level-fixup.h @@ -0,0 +1,38 @@ +#ifndef _4LEVEL_FIXUP_H +#define _4LEVEL_FIXUP_H + +#define __ARCH_HAS_4LEVEL_HACK +#define __PAGETABLE_PUD_FOLDED + +#define PUD_SHIFT PGDIR_SHIFT +#define PUD_SIZE PGDIR_SIZE +#define PUD_MASK PGDIR_MASK +#define PTRS_PER_PUD 1 + +#define pud_t pgd_t + +#define pmd_alloc(mm, pud, address) \ + ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \ + NULL: pmd_offset(pud, address)) + +#define pud_alloc(mm, pgd, address) (pgd) +#define pud_offset(pgd, start) (pgd) +#define pud_none(pud) 0 +#define pud_bad(pud) 0 +#define pud_present(pud) 1 +#define pud_ERROR(pud) do { } while (0) +#define pud_clear(pud) pgd_clear(pud) +#define pud_val(pud) pgd_val(pud) +#define pud_populate(mm, pud, pmd) pgd_populate(mm, pud, pmd) +#define pud_page(pud) pgd_page(pud) +#define pud_page_vaddr(pud) pgd_page_vaddr(pud) + +#undef pud_free_tlb +#define pud_free_tlb(tlb, x, addr) do { } while (0) +#define pud_free(mm, x) do { } while (0) +#define __pud_free_tlb(tlb, x, addr) do { } while (0) + +#undef pud_addr_end +#define pud_addr_end(addr, end) (end) + +#endif diff --git a/include/asm-generic/Kbuild.asm b/include/asm-generic/Kbuild.asm new file mode 100644 index 000000000..d2ee86b4c --- /dev/null +++ b/include/asm-generic/Kbuild.asm @@ -0,0 +1 @@ +include include/uapi/asm-generic/Kbuild.asm diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h new file mode 100644 index 000000000..b7babf020 --- /dev/null +++ b/include/asm-generic/atomic-long.h @@ -0,0 +1,258 @@ +#ifndef _ASM_GENERIC_ATOMIC_LONG_H +#define _ASM_GENERIC_ATOMIC_LONG_H +/* + * Copyright (C) 2005 Silicon Graphics, Inc. + * Christoph Lameter + * + * Allows to provide arch independent atomic definitions without the need to + * edit all arch specific atomic.h files. + */ + +#include <asm/types.h> + +/* + * Suppport for atomic_long_t + * + * Casts for parameters are avoided for existing atomic functions in order to + * avoid issues with cast-as-lval under gcc 4.x and other limitations that the + * macros of a platform may have. + */ + +#if BITS_PER_LONG == 64 + +typedef atomic64_t atomic_long_t; + +#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i) + +static inline long atomic_long_read(atomic_long_t *l) +{ + atomic64_t *v = (atomic64_t *)l; + + return (long)atomic64_read(v); +} + +static inline void atomic_long_set(atomic_long_t *l, long i) +{ + atomic64_t *v = (atomic64_t *)l; + + atomic64_set(v, i); +} + +static inline void atomic_long_inc(atomic_long_t *l) +{ + atomic64_t *v = (atomic64_t *)l; + + atomic64_inc(v); +} + +static inline void atomic_long_dec(atomic_long_t *l) +{ + atomic64_t *v = (atomic64_t *)l; + + atomic64_dec(v); +} + +static inline void atomic_long_add(long i, atomic_long_t *l) +{ + atomic64_t *v = (atomic64_t *)l; + + atomic64_add(i, v); +} + +static inline void atomic_long_sub(long i, atomic_long_t *l) +{ + atomic64_t *v = (atomic64_t *)l; + + atomic64_sub(i, v); +} + +static inline int atomic_long_sub_and_test(long i, atomic_long_t *l) +{ + atomic64_t *v = (atomic64_t *)l; + + return atomic64_sub_and_test(i, v); +} + +static inline int atomic_long_dec_and_test(atomic_long_t *l) +{ + atomic64_t *v = (atomic64_t *)l; + + return atomic64_dec_and_test(v); +} + +static inline int atomic_long_inc_and_test(atomic_long_t *l) +{ + atomic64_t *v = (atomic64_t *)l; + + return atomic64_inc_and_test(v); +} + +static inline int atomic_long_add_negative(long i, atomic_long_t *l) +{ + atomic64_t *v = (atomic64_t *)l; + + return atomic64_add_negative(i, v); +} + +static inline long atomic_long_add_return(long i, atomic_long_t *l) +{ + atomic64_t *v = (atomic64_t *)l; + + return (long)atomic64_add_return(i, v); +} + +static inline long atomic_long_sub_return(long i, atomic_long_t *l) +{ + atomic64_t *v = (atomic64_t *)l; + + return (long)atomic64_sub_return(i, v); +} + +static inline long atomic_long_inc_return(atomic_long_t *l) +{ + atomic64_t *v = (atomic64_t *)l; + + return (long)atomic64_inc_return(v); +} + +static inline long atomic_long_dec_return(atomic_long_t *l) +{ + atomic64_t *v = (atomic64_t *)l; + + return (long)atomic64_dec_return(v); +} + +static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) +{ + atomic64_t *v = (atomic64_t *)l; + + return (long)atomic64_add_unless(v, a, u); +} + +#define atomic_long_inc_not_zero(l) atomic64_inc_not_zero((atomic64_t *)(l)) + +#define atomic_long_cmpxchg(l, old, new) \ + (atomic64_cmpxchg((atomic64_t *)(l), (old), (new))) +#define atomic_long_xchg(v, new) \ + (atomic64_xchg((atomic64_t *)(v), (new))) + +#else /* BITS_PER_LONG == 64 */ + +typedef atomic_t atomic_long_t; + +#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i) +static inline long atomic_long_read(atomic_long_t *l) +{ + atomic_t *v = (atomic_t *)l; + + return (long)atomic_read(v); +} + +static inline void atomic_long_set(atomic_long_t *l, long i) +{ + atomic_t *v = (atomic_t *)l; + + atomic_set(v, i); +} + +static inline void atomic_long_inc(atomic_long_t *l) +{ + atomic_t *v = (atomic_t *)l; + + atomic_inc(v); +} + +static inline void atomic_long_dec(atomic_long_t *l) +{ + atomic_t *v = (atomic_t *)l; + + atomic_dec(v); +} + +static inline void atomic_long_add(long i, atomic_long_t *l) +{ + atomic_t *v = (atomic_t *)l; + + atomic_add(i, v); +} + +static inline void atomic_long_sub(long i, atomic_long_t *l) +{ + atomic_t *v = (atomic_t *)l; + + atomic_sub(i, v); +} + +static inline int atomic_long_sub_and_test(long i, atomic_long_t *l) +{ + atomic_t *v = (atomic_t *)l; + + return atomic_sub_and_test(i, v); +} + +static inline int atomic_long_dec_and_test(atomic_long_t *l) +{ + atomic_t *v = (atomic_t *)l; + + return atomic_dec_and_test(v); +} + +static inline int atomic_long_inc_and_test(atomic_long_t *l) +{ + atomic_t *v = (atomic_t *)l; + + return atomic_inc_and_test(v); +} + +static inline int atomic_long_add_negative(long i, atomic_long_t *l) +{ + atomic_t *v = (atomic_t *)l; + + return atomic_add_negative(i, v); +} + +static inline long atomic_long_add_return(long i, atomic_long_t *l) +{ + atomic_t *v = (atomic_t *)l; + + return (long)atomic_add_return(i, v); +} + +static inline long atomic_long_sub_return(long i, atomic_long_t *l) +{ + atomic_t *v = (atomic_t *)l; + + return (long)atomic_sub_return(i, v); +} + +static inline long atomic_long_inc_return(atomic_long_t *l) +{ + atomic_t *v = (atomic_t *)l; + + return (long)atomic_inc_return(v); +} + +static inline long atomic_long_dec_return(atomic_long_t *l) +{ + atomic_t *v = (atomic_t *)l; + + return (long)atomic_dec_return(v); +} + +static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) +{ + atomic_t *v = (atomic_t *)l; + + return (long)atomic_add_unless(v, a, u); +} + +#define atomic_long_inc_not_zero(l) atomic_inc_not_zero((atomic_t *)(l)) + +#define atomic_long_cmpxchg(l, old, new) \ + (atomic_cmpxchg((atomic_t *)(l), (old), (new))) +#define atomic_long_xchg(v, new) \ + (atomic_xchg((atomic_t *)(v), (new))) + +#endif /* BITS_PER_LONG == 64 */ + +#endif /* _ASM_GENERIC_ATOMIC_LONG_H */ diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h new file mode 100644 index 000000000..1973ad2b1 --- /dev/null +++ b/include/asm-generic/atomic.h @@ -0,0 +1,187 @@ +/* + * Generic C implementation of atomic counter operations. Usable on + * UP systems only. Do not include in machine independent code. + * + * Originally implemented for MN10300. + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +#ifndef __ASM_GENERIC_ATOMIC_H +#define __ASM_GENERIC_ATOMIC_H + +#include <asm/cmpxchg.h> +#include <asm/barrier.h> + +/* + * atomic_$op() - $op integer to atomic variable + * @i: integer value to $op + * @v: pointer to the atomic variable + * + * Atomically $ops @i to @v. Does not strictly guarantee a memory-barrier, use + * smp_mb__{before,after}_atomic(). + */ + +/* + * atomic_$op_return() - $op interer to atomic variable and returns the result + * @i: integer value to $op + * @v: pointer to the atomic variable + * + * Atomically $ops @i to @v. Does imply a full memory barrier. + */ + +#ifdef CONFIG_SMP + +/* we can build all atomic primitives from cmpxchg */ + +#define ATOMIC_OP(op, c_op) \ +static inline void atomic_##op(int i, atomic_t *v) \ +{ \ + int c, old; \ + \ + c = v->counter; \ + while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \ + c = old; \ +} + +#define ATOMIC_OP_RETURN(op, c_op) \ +static inline int atomic_##op##_return(int i, atomic_t *v) \ +{ \ + int c, old; \ + \ + c = v->counter; \ + while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \ + c = old; \ + \ + return c c_op i; \ +} + +#else + +#include <linux/irqflags.h> + +#define ATOMIC_OP(op, c_op) \ +static inline void atomic_##op(int i, atomic_t *v) \ +{ \ + unsigned long flags; \ + \ + raw_local_irq_save(flags); \ + v->counter = v->counter c_op i; \ + raw_local_irq_restore(flags); \ +} + +#define ATOMIC_OP_RETURN(op, c_op) \ +static inline int atomic_##op##_return(int i, atomic_t *v) \ +{ \ + unsigned long flags; \ + int ret; \ + \ + raw_local_irq_save(flags); \ + ret = (v->counter = v->counter c_op i); \ + raw_local_irq_restore(flags); \ + \ + return ret; \ +} + +#endif /* CONFIG_SMP */ + +#ifndef atomic_add_return +ATOMIC_OP_RETURN(add, +) +#endif + +#ifndef atomic_sub_return +ATOMIC_OP_RETURN(sub, -) +#endif + +#ifndef atomic_clear_mask +ATOMIC_OP(and, &) +#define atomic_clear_mask(i, v) atomic_and(~(i), (v)) +#endif + +#ifndef atomic_set_mask +#define CONFIG_ARCH_HAS_ATOMIC_OR +ATOMIC_OP(or, |) +#define atomic_set_mask(i, v) atomic_or((i), (v)) +#endif + +#undef ATOMIC_OP_RETURN +#undef ATOMIC_OP + +/* + * Atomic operations that C can't guarantee us. Useful for + * resource counting etc.. + */ + +#define ATOMIC_INIT(i) { (i) } + +/** + * atomic_read - read atomic variable + * @v: pointer of type atomic_t + * + * Atomically reads the value of @v. + */ +#ifndef atomic_read +#define atomic_read(v) ACCESS_ONCE((v)->counter) +#endif + +/** + * atomic_set - set atomic variable + * @v: pointer of type atomic_t + * @i: required value + * + * Atomically sets the value of @v to @i. + */ +#define atomic_set(v, i) (((v)->counter) = (i)) + +#include <linux/irqflags.h> + +static inline int atomic_add_negative(int i, atomic_t *v) +{ + return atomic_add_return(i, v) < 0; +} + +static inline void atomic_add(int i, atomic_t *v) +{ + atomic_add_return(i, v); +} + +static inline void atomic_sub(int i, atomic_t *v) +{ + atomic_sub_return(i, v); +} + +static inline void atomic_inc(atomic_t *v) +{ + atomic_add_return(1, v); +} + +static inline void atomic_dec(atomic_t *v) +{ + atomic_sub_return(1, v); +} + +#define atomic_dec_return(v) atomic_sub_return(1, (v)) +#define atomic_inc_return(v) atomic_add_return(1, (v)) + +#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0) +#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) +#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) + +#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) +#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) + +static inline int __atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + c = atomic_read(v); + while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c) + c = old; + return c; +} + +#endif /* __ASM_GENERIC_ATOMIC_H */ diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h new file mode 100644 index 000000000..30ad9c86c --- /dev/null +++ b/include/asm-generic/atomic64.h @@ -0,0 +1,54 @@ +/* + * Generic implementation of 64-bit atomics using spinlocks, + * useful on processors that don't have 64-bit atomic instructions. + * + * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _ASM_GENERIC_ATOMIC64_H +#define _ASM_GENERIC_ATOMIC64_H + +typedef struct { + long long counter; +} atomic64_t; + +#define ATOMIC64_INIT(i) { (i) } + +extern long long atomic64_read(const atomic64_t *v); +extern void atomic64_set(atomic64_t *v, long long i); + +#define ATOMIC64_OP(op) \ +extern void atomic64_##op(long long a, atomic64_t *v); + +#define ATOMIC64_OP_RETURN(op) \ +extern long long atomic64_##op##_return(long long a, atomic64_t *v); + +#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) + +ATOMIC64_OPS(add) +ATOMIC64_OPS(sub) + +#undef ATOMIC64_OPS +#undef ATOMIC64_OP_RETURN +#undef ATOMIC64_OP + +extern long long atomic64_dec_if_positive(atomic64_t *v); +extern long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n); +extern long long atomic64_xchg(atomic64_t *v, long long new); +extern int atomic64_add_unless(atomic64_t *v, long long a, long long u); + +#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) +#define atomic64_inc(v) atomic64_add(1LL, (v)) +#define atomic64_inc_return(v) atomic64_add_return(1LL, (v)) +#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) +#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) +#define atomic64_dec(v) atomic64_sub(1LL, (v)) +#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v)) +#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) +#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) + +#endif /* _ASM_GENERIC_ATOMIC64_H */ diff --git a/include/asm-generic/audit_change_attr.h b/include/asm-generic/audit_change_attr.h new file mode 100644 index 000000000..a18655373 --- /dev/null +++ b/include/asm-generic/audit_change_attr.h @@ -0,0 +1,32 @@ +#ifdef __NR_chmod +__NR_chmod, +#endif +__NR_fchmod, +#ifdef __NR_chown +__NR_chown, +__NR_lchown, +#endif +#ifdef __NR_fchown +__NR_fchown, +#endif +__NR_setxattr, +__NR_lsetxattr, +__NR_fsetxattr, +__NR_removexattr, +__NR_lremovexattr, +__NR_fremovexattr, +#ifdef __NR_fchownat +__NR_fchownat, +__NR_fchmodat, +#endif +#ifdef __NR_chown32 +__NR_chown32, +__NR_fchown32, +__NR_lchown32, +#endif +#ifdef __NR_link +__NR_link, +#endif +#ifdef __NR_linkat +__NR_linkat, +#endif diff --git a/include/asm-generic/audit_dir_write.h b/include/asm-generic/audit_dir_write.h new file mode 100644 index 000000000..7b61db4fe --- /dev/null +++ b/include/asm-generic/audit_dir_write.h @@ -0,0 +1,32 @@ +#ifdef __NR_rename +__NR_rename, +#endif +#ifdef __NR_mkdir +__NR_mkdir, +#endif +#ifdef __NR_rmdir +__NR_rmdir, +#endif +#ifdef __NR_creat +__NR_creat, +#endif +#ifdef __NR_link +__NR_link, +#endif +#ifdef __NR_unlink +__NR_unlink, +#endif +#ifdef __NR_symlink +__NR_symlink, +#endif +#ifdef __NR_mknod +__NR_mknod, +#endif +#ifdef __NR_mkdirat +__NR_mkdirat, +__NR_mknodat, +__NR_unlinkat, +__NR_renameat, +__NR_linkat, +__NR_symlinkat, +#endif diff --git a/include/asm-generic/audit_read.h b/include/asm-generic/audit_read.h new file mode 100644 index 000000000..3b249cb85 --- /dev/null +++ b/include/asm-generic/audit_read.h @@ -0,0 +1,13 @@ +#ifdef __NR_readlink +__NR_readlink, +#endif +__NR_quotactl, +__NR_listxattr, +__NR_llistxattr, +__NR_flistxattr, +__NR_getxattr, +__NR_lgetxattr, +__NR_fgetxattr, +#ifdef __NR_readlinkat +__NR_readlinkat, +#endif diff --git a/include/asm-generic/audit_signal.h b/include/asm-generic/audit_signal.h new file mode 100644 index 000000000..6feab7f18 --- /dev/null +++ b/include/asm-generic/audit_signal.h @@ -0,0 +1,3 @@ +__NR_kill, +__NR_tgkill, +__NR_tkill, diff --git a/include/asm-generic/audit_write.h b/include/asm-generic/audit_write.h new file mode 100644 index 000000000..274575d71 --- /dev/null +++ b/include/asm-generic/audit_write.h @@ -0,0 +1,21 @@ +#include <asm-generic/audit_dir_write.h> +__NR_acct, +#ifdef __NR_swapon +__NR_swapon, +#endif +__NR_quotactl, +#ifdef __NR_truncate +__NR_truncate, +#endif +#ifdef __NR_truncate64 +__NR_truncate64, +#endif +#ifdef __NR_ftruncate +__NR_ftruncate, +#endif +#ifdef __NR_ftruncate64 +__NR_ftruncate64, +#endif +#ifdef __NR_bind +__NR_bind, /* bind can affect fs object only in one way... */ +#endif diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h new file mode 100644 index 000000000..f5c40b0fa --- /dev/null +++ b/include/asm-generic/barrier.h @@ -0,0 +1,97 @@ +/* + * Generic barrier definitions, originally based on MN10300 definitions. + * + * It should be possible to use these on really simple architectures, + * but it serves more as a starting point for new ports. + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +#ifndef __ASM_GENERIC_BARRIER_H +#define __ASM_GENERIC_BARRIER_H + +#ifndef __ASSEMBLY__ + +#include <linux/compiler.h> + +#ifndef nop +#define nop() asm volatile ("nop") +#endif + +/* + * Force strict CPU ordering. And yes, this is required on UP too when we're + * talking to devices. + * + * Fall back to compiler barriers if nothing better is provided. + */ + +#ifndef mb +#define mb() barrier() +#endif + +#ifndef rmb +#define rmb() mb() +#endif + +#ifndef wmb +#define wmb() mb() +#endif + +#ifndef dma_rmb +#define dma_rmb() rmb() +#endif + +#ifndef dma_wmb +#define dma_wmb() wmb() +#endif + +#ifndef read_barrier_depends +#define read_barrier_depends() do { } while (0) +#endif + +#ifdef CONFIG_SMP +#define smp_mb() mb() +#define smp_rmb() rmb() +#define smp_wmb() wmb() +#define smp_read_barrier_depends() read_barrier_depends() +#else +#define smp_mb() barrier() +#define smp_rmb() barrier() +#define smp_wmb() barrier() +#define smp_read_barrier_depends() do { } while (0) +#endif + +#ifndef set_mb +#define set_mb(var, value) do { (var) = (value); mb(); } while (0) +#endif + +#ifndef smp_mb__before_atomic +#define smp_mb__before_atomic() smp_mb() +#endif + +#ifndef smp_mb__after_atomic +#define smp_mb__after_atomic() smp_mb() +#endif + +#define smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ + ACCESS_ONCE(*p) = (v); \ +} while (0) + +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ + ___p1; \ +}) + +#endif /* !__ASSEMBLY__ */ +#endif /* __ASM_GENERIC_BARRIER_H */ diff --git a/include/asm-generic/bitops.h b/include/asm-generic/bitops.h new file mode 100644 index 000000000..dcdcacf2f --- /dev/null +++ b/include/asm-generic/bitops.h @@ -0,0 +1,37 @@ +#ifndef __ASM_GENERIC_BITOPS_H +#define __ASM_GENERIC_BITOPS_H + +/* + * For the benefit of those who are trying to port Linux to another + * architecture, here are some C-language equivalents. You should + * recode these in the native assembly language, if at all possible. + * + * C language equivalents written by Theodore Ts'o, 9/26/92 + */ + +#include <linux/irqflags.h> +#include <linux/compiler.h> +#include <asm/barrier.h> + +#include <asm-generic/bitops/__ffs.h> +#include <asm-generic/bitops/ffz.h> +#include <asm-generic/bitops/fls.h> +#include <asm-generic/bitops/__fls.h> +#include <asm-generic/bitops/fls64.h> +#include <asm-generic/bitops/find.h> + +#ifndef _LINUX_BITOPS_H +#error only <linux/bitops.h> can be included directly +#endif + +#include <asm-generic/bitops/sched.h> +#include <asm-generic/bitops/ffs.h> +#include <asm-generic/bitops/hweight.h> +#include <asm-generic/bitops/lock.h> + +#include <asm-generic/bitops/atomic.h> +#include <asm-generic/bitops/non-atomic.h> +#include <asm-generic/bitops/le.h> +#include <asm-generic/bitops/ext2-atomic.h> + +#endif /* __ASM_GENERIC_BITOPS_H */ diff --git a/include/asm-generic/bitops/__ffs.h b/include/asm-generic/bitops/__ffs.h new file mode 100644 index 000000000..937d7c435 --- /dev/null +++ b/include/asm-generic/bitops/__ffs.h @@ -0,0 +1,43 @@ +#ifndef _ASM_GENERIC_BITOPS___FFS_H_ +#define _ASM_GENERIC_BITOPS___FFS_H_ + +#include <asm/types.h> + +/** + * __ffs - find first bit in word. + * @word: The word to search + * + * Undefined if no bit exists, so code should check against 0 first. + */ +static __always_inline unsigned long __ffs(unsigned long word) +{ + int num = 0; + +#if BITS_PER_LONG == 64 + if ((word & 0xffffffff) == 0) { + num += 32; + word >>= 32; + } +#endif + if ((word & 0xffff) == 0) { + num += 16; + word >>= 16; + } + if ((word & 0xff) == 0) { + num += 8; + word >>= 8; + } + if ((word & 0xf) == 0) { + num += 4; + word >>= 4; + } + if ((word & 0x3) == 0) { + num += 2; + word >>= 2; + } + if ((word & 0x1) == 0) + num += 1; + return num; +} + +#endif /* _ASM_GENERIC_BITOPS___FFS_H_ */ diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h new file mode 100644 index 000000000..a60a7ccb6 --- /dev/null +++ b/include/asm-generic/bitops/__fls.h @@ -0,0 +1,43 @@ +#ifndef _ASM_GENERIC_BITOPS___FLS_H_ +#define _ASM_GENERIC_BITOPS___FLS_H_ + +#include <asm/types.h> + +/** + * __fls - find last (most-significant) set bit in a long word + * @word: the word to search + * + * Undefined if no set bit exists, so code should check against 0 first. + */ +static __always_inline unsigned long __fls(unsigned long word) +{ + int num = BITS_PER_LONG - 1; + +#if BITS_PER_LONG == 64 + if (!(word & (~0ul << 32))) { + num -= 32; + word <<= 32; + } +#endif + if (!(word & (~0ul << (BITS_PER_LONG-16)))) { + num -= 16; + word <<= 16; + } + if (!(word & (~0ul << (BITS_PER_LONG-8)))) { + num -= 8; + word <<= 8; + } + if (!(word & (~0ul << (BITS_PER_LONG-4)))) { + num -= 4; + word <<= 4; + } + if (!(word & (~0ul << (BITS_PER_LONG-2)))) { + num -= 2; + word <<= 2; + } + if (!(word & (~0ul << (BITS_PER_LONG-1)))) + num -= 1; + return num; +} + +#endif /* _ASM_GENERIC_BITOPS___FLS_H_ */ diff --git a/include/asm-generic/bitops/arch_hweight.h b/include/asm-generic/bitops/arch_hweight.h new file mode 100644 index 000000000..6a211f406 --- /dev/null +++ b/include/asm-generic/bitops/arch_hweight.h @@ -0,0 +1,25 @@ +#ifndef _ASM_GENERIC_BITOPS_ARCH_HWEIGHT_H_ +#define _ASM_GENERIC_BITOPS_ARCH_HWEIGHT_H_ + +#include <asm/types.h> + +static inline unsigned int __arch_hweight32(unsigned int w) +{ + return __sw_hweight32(w); +} + +static inline unsigned int __arch_hweight16(unsigned int w) +{ + return __sw_hweight16(w); +} + +static inline unsigned int __arch_hweight8(unsigned int w) +{ + return __sw_hweight8(w); +} + +static inline unsigned long __arch_hweight64(__u64 w) +{ + return __sw_hweight64(w); +} +#endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */ diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h new file mode 100644 index 000000000..49673510b --- /dev/null +++ b/include/asm-generic/bitops/atomic.h @@ -0,0 +1,189 @@ +#ifndef _ASM_GENERIC_BITOPS_ATOMIC_H_ +#define _ASM_GENERIC_BITOPS_ATOMIC_H_ + +#include <asm/types.h> +#include <linux/irqflags.h> + +#ifdef CONFIG_SMP +#include <asm/spinlock.h> +#include <asm/cache.h> /* we use L1_CACHE_BYTES */ + +/* Use an array of spinlocks for our atomic_ts. + * Hash function to index into a different SPINLOCK. + * Since "a" is usually an address, use one spinlock per cacheline. + */ +# define ATOMIC_HASH_SIZE 4 +# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) + +extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; + +/* Can't use raw_spin_lock_irq because of #include problems, so + * this is the substitute */ +#define _atomic_spin_lock_irqsave(l,f) do { \ + arch_spinlock_t *s = ATOMIC_HASH(l); \ + local_irq_save(f); \ + arch_spin_lock(s); \ +} while(0) + +#define _atomic_spin_unlock_irqrestore(l,f) do { \ + arch_spinlock_t *s = ATOMIC_HASH(l); \ + arch_spin_unlock(s); \ + local_irq_restore(f); \ +} while(0) + + +#else +# define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0) +# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0) +#endif + +/* + * NMI events can occur at any time, including when interrupts have been + * disabled by *_irqsave(). So you can get NMI events occurring while a + * *_bit function is holding a spin lock. If the NMI handler also wants + * to do bit manipulation (and they do) then you can get a deadlock + * between the original caller of *_bit() and the NMI handler. + * + * by Keith Owens + */ + +/** + * set_bit - Atomically set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * This function is atomic and may not be reordered. See __set_bit() + * if you do not require the atomic guarantees. + * + * Note: there are no guarantees that this function will not be reordered + * on non x86 architectures, so if you are writing portable code, + * make sure not to rely on its reordering guarantees. + * + * Note that @nr may be almost arbitrarily large; this function is not + * restricted to acting on a single-word quantity. + */ +static inline void set_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long flags; + + _atomic_spin_lock_irqsave(p, flags); + *p |= mask; + _atomic_spin_unlock_irqrestore(p, flags); +} + +/** + * clear_bit - Clears a bit in memory + * @nr: Bit to clear + * @addr: Address to start counting from + * + * clear_bit() is atomic and may not be reordered. However, it does + * not contain a memory barrier, so if it is used for locking purposes, + * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic() + * in order to ensure changes are visible on other processors. + */ +static inline void clear_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long flags; + + _atomic_spin_lock_irqsave(p, flags); + *p &= ~mask; + _atomic_spin_unlock_irqrestore(p, flags); +} + +/** + * change_bit - Toggle a bit in memory + * @nr: Bit to change + * @addr: Address to start counting from + * + * change_bit() is atomic and may not be reordered. It may be + * reordered on other architectures than x86. + * Note that @nr may be almost arbitrarily large; this function is not + * restricted to acting on a single-word quantity. + */ +static inline void change_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long flags; + + _atomic_spin_lock_irqsave(p, flags); + *p ^= mask; + _atomic_spin_unlock_irqrestore(p, flags); +} + +/** + * test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It may be reordered on other architectures than x86. + * It also implies a memory barrier. + */ +static inline int test_and_set_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old; + unsigned long flags; + + _atomic_spin_lock_irqsave(p, flags); + old = *p; + *p = old | mask; + _atomic_spin_unlock_irqrestore(p, flags); + + return (old & mask) != 0; +} + +/** + * test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to clear + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It can be reorderdered on other architectures other than x86. + * It also implies a memory barrier. + */ +static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old; + unsigned long flags; + + _atomic_spin_lock_irqsave(p, flags); + old = *p; + *p = old & ~mask; + _atomic_spin_unlock_irqrestore(p, flags); + + return (old & mask) != 0; +} + +/** + * test_and_change_bit - Change a bit and return its old value + * @nr: Bit to change + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It also implies a memory barrier. + */ +static inline int test_and_change_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old; + unsigned long flags; + + _atomic_spin_lock_irqsave(p, flags); + old = *p; + *p = old ^ mask; + _atomic_spin_unlock_irqrestore(p, flags); + + return (old & mask) != 0; +} + +#endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */ diff --git a/include/asm-generic/bitops/builtin-__ffs.h b/include/asm-generic/bitops/builtin-__ffs.h new file mode 100644 index 000000000..90041e3a4 --- /dev/null +++ b/include/asm-generic/bitops/builtin-__ffs.h @@ -0,0 +1,15 @@ +#ifndef _ASM_GENERIC_BITOPS_BUILTIN___FFS_H_ +#define _ASM_GENERIC_BITOPS_BUILTIN___FFS_H_ + +/** + * __ffs - find first bit in word. + * @word: The word to search + * + * Undefined if no bit exists, so code should check against 0 first. + */ +static __always_inline unsigned long __ffs(unsigned long word) +{ + return __builtin_ctzl(word); +} + +#endif diff --git a/include/asm-generic/bitops/builtin-__fls.h b/include/asm-generic/bitops/builtin-__fls.h new file mode 100644 index 000000000..0248f3866 --- /dev/null +++ b/include/asm-generic/bitops/builtin-__fls.h @@ -0,0 +1,15 @@ +#ifndef _ASM_GENERIC_BITOPS_BUILTIN___FLS_H_ +#define _ASM_GENERIC_BITOPS_BUILTIN___FLS_H_ + +/** + * __fls - find last (most-significant) set bit in a long word + * @word: the word to search + * + * Undefined if no set bit exists, so code should check against 0 first. + */ +static __always_inline unsigned long __fls(unsigned long word) +{ + return (sizeof(word) * 8) - 1 - __builtin_clzl(word); +} + +#endif diff --git a/include/asm-generic/bitops/builtin-ffs.h b/include/asm-generic/bitops/builtin-ffs.h new file mode 100644 index 000000000..064825829 --- /dev/null +++ b/include/asm-generic/bitops/builtin-ffs.h @@ -0,0 +1,17 @@ +#ifndef _ASM_GENERIC_BITOPS_BUILTIN_FFS_H_ +#define _ASM_GENERIC_BITOPS_BUILTIN_FFS_H_ + +/** + * ffs - find first bit set + * @x: the word to search + * + * This is defined the same way as + * the libc and compiler builtin ffs routines, therefore + * differs in spirit from the above ffz (man ffs). + */ +static __always_inline int ffs(int x) +{ + return __builtin_ffs(x); +} + +#endif diff --git a/include/asm-generic/bitops/builtin-fls.h b/include/asm-generic/bitops/builtin-fls.h new file mode 100644 index 000000000..eda652d0a --- /dev/null +++ b/include/asm-generic/bitops/builtin-fls.h @@ -0,0 +1,16 @@ +#ifndef _ASM_GENERIC_BITOPS_BUILTIN_FLS_H_ +#define _ASM_GENERIC_BITOPS_BUILTIN_FLS_H_ + +/** + * fls - find last (most-significant) bit set + * @x: the word to search + * + * This is defined the same way as ffs. + * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. + */ +static __always_inline int fls(int x) +{ + return x ? sizeof(x) * 8 - __builtin_clz(x) : 0; +} + +#endif diff --git a/include/asm-generic/bitops/const_hweight.h b/include/asm-generic/bitops/const_hweight.h new file mode 100644 index 000000000..0a7e06623 --- /dev/null +++ b/include/asm-generic/bitops/const_hweight.h @@ -0,0 +1,43 @@ +#ifndef _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_ +#define _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_ + +/* + * Compile time versions of __arch_hweightN() + */ +#define __const_hweight8(w) \ + ((unsigned int) \ + ((!!((w) & (1ULL << 0))) + \ + (!!((w) & (1ULL << 1))) + \ + (!!((w) & (1ULL << 2))) + \ + (!!((w) & (1ULL << 3))) + \ + (!!((w) & (1ULL << 4))) + \ + (!!((w) & (1ULL << 5))) + \ + (!!((w) & (1ULL << 6))) + \ + (!!((w) & (1ULL << 7))))) + +#define __const_hweight16(w) (__const_hweight8(w) + __const_hweight8((w) >> 8 )) +#define __const_hweight32(w) (__const_hweight16(w) + __const_hweight16((w) >> 16)) +#define __const_hweight64(w) (__const_hweight32(w) + __const_hweight32((w) >> 32)) + +/* + * Generic interface. + */ +#define hweight8(w) (__builtin_constant_p(w) ? __const_hweight8(w) : __arch_hweight8(w)) +#define hweight16(w) (__builtin_constant_p(w) ? __const_hweight16(w) : __arch_hweight16(w)) +#define hweight32(w) (__builtin_constant_p(w) ? __const_hweight32(w) : __arch_hweight32(w)) +#define hweight64(w) (__builtin_constant_p(w) ? __const_hweight64(w) : __arch_hweight64(w)) + +/* + * Interface for known constant arguments + */ +#define HWEIGHT8(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight8(w)) +#define HWEIGHT16(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight16(w)) +#define HWEIGHT32(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight32(w)) +#define HWEIGHT64(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight64(w)) + +/* + * Type invariant interface to the compile time constant hweight functions. + */ +#define HWEIGHT(w) HWEIGHT64((u64)w) + +#endif /* _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_ */ diff --git a/include/asm-generic/bitops/count_zeros.h b/include/asm-generic/bitops/count_zeros.h new file mode 100644 index 000000000..97520d21f --- /dev/null +++ b/include/asm-generic/bitops/count_zeros.h @@ -0,0 +1,57 @@ +/* Count leading and trailing zeros functions + * + * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef _ASM_GENERIC_BITOPS_COUNT_ZEROS_H_ +#define _ASM_GENERIC_BITOPS_COUNT_ZEROS_H_ + +#include <asm/bitops.h> + +/** + * count_leading_zeros - Count the number of zeros from the MSB back + * @x: The value + * + * Count the number of leading zeros from the MSB going towards the LSB in @x. + * + * If the MSB of @x is set, the result is 0. + * If only the LSB of @x is set, then the result is BITS_PER_LONG-1. + * If @x is 0 then the result is COUNT_LEADING_ZEROS_0. + */ +static inline int count_leading_zeros(unsigned long x) +{ + if (sizeof(x) == 4) + return BITS_PER_LONG - fls(x); + else + return BITS_PER_LONG - fls64(x); +} + +#define COUNT_LEADING_ZEROS_0 BITS_PER_LONG + +/** + * count_trailing_zeros - Count the number of zeros from the LSB forwards + * @x: The value + * + * Count the number of trailing zeros from the LSB going towards the MSB in @x. + * + * If the LSB of @x is set, the result is 0. + * If only the MSB of @x is set, then the result is BITS_PER_LONG-1. + * If @x is 0 then the result is COUNT_TRAILING_ZEROS_0. + */ +static inline int count_trailing_zeros(unsigned long x) +{ +#define COUNT_TRAILING_ZEROS_0 (-1) + + if (sizeof(x) == 4) + return ffs(x); + else + return (x != 0) ? __ffs(x) : COUNT_TRAILING_ZEROS_0; +} + +#endif /* _ASM_GENERIC_BITOPS_COUNT_ZEROS_H_ */ diff --git a/include/asm-generic/bitops/ext2-atomic-setbit.h b/include/asm-generic/bitops/ext2-atomic-setbit.h new file mode 100644 index 000000000..5a0997857 --- /dev/null +++ b/include/asm-generic/bitops/ext2-atomic-setbit.h @@ -0,0 +1,11 @@ +#ifndef _ASM_GENERIC_BITOPS_EXT2_ATOMIC_SETBIT_H_ +#define _ASM_GENERIC_BITOPS_EXT2_ATOMIC_SETBIT_H_ + +/* + * Atomic bitops based version of ext2 atomic bitops + */ + +#define ext2_set_bit_atomic(l, nr, addr) test_and_set_bit_le(nr, addr) +#define ext2_clear_bit_atomic(l, nr, addr) test_and_clear_bit_le(nr, addr) + +#endif /* _ASM_GENERIC_BITOPS_EXT2_ATOMIC_SETBIT_H_ */ diff --git a/include/asm-generic/bitops/ext2-atomic.h b/include/asm-generic/bitops/ext2-atomic.h new file mode 100644 index 000000000..87f0f109d --- /dev/null +++ b/include/asm-generic/bitops/ext2-atomic.h @@ -0,0 +1,26 @@ +#ifndef _ASM_GENERIC_BITOPS_EXT2_ATOMIC_H_ +#define _ASM_GENERIC_BITOPS_EXT2_ATOMIC_H_ + +/* + * Spinlock based version of ext2 atomic bitops + */ + +#define ext2_set_bit_atomic(lock, nr, addr) \ + ({ \ + int ret; \ + spin_lock(lock); \ + ret = __test_and_set_bit_le(nr, addr); \ + spin_unlock(lock); \ + ret; \ + }) + +#define ext2_clear_bit_atomic(lock, nr, addr) \ + ({ \ + int ret; \ + spin_lock(lock); \ + ret = __test_and_clear_bit_le(nr, addr); \ + spin_unlock(lock); \ + ret; \ + }) + +#endif /* _ASM_GENERIC_BITOPS_EXT2_ATOMIC_H_ */ diff --git a/include/asm-generic/bitops/ffs.h b/include/asm-generic/bitops/ffs.h new file mode 100644 index 000000000..fbbb43af7 --- /dev/null +++ b/include/asm-generic/bitops/ffs.h @@ -0,0 +1,41 @@ +#ifndef _ASM_GENERIC_BITOPS_FFS_H_ +#define _ASM_GENERIC_BITOPS_FFS_H_ + +/** + * ffs - find first bit set + * @x: the word to search + * + * This is defined the same way as + * the libc and compiler builtin ffs routines, therefore + * differs in spirit from the above ffz (man ffs). + */ +static inline int ffs(int x) +{ + int r = 1; + + if (!x) + return 0; + if (!(x & 0xffff)) { + x >>= 16; + r += 16; + } + if (!(x & 0xff)) { + x >>= 8; + r += 8; + } + if (!(x & 0xf)) { + x >>= 4; + r += 4; + } + if (!(x & 3)) { + x >>= 2; + r += 2; + } + if (!(x & 1)) { + x >>= 1; + r += 1; + } + return r; +} + +#endif /* _ASM_GENERIC_BITOPS_FFS_H_ */ diff --git a/include/asm-generic/bitops/ffz.h b/include/asm-generic/bitops/ffz.h new file mode 100644 index 000000000..6744bd4cd --- /dev/null +++ b/include/asm-generic/bitops/ffz.h @@ -0,0 +1,12 @@ +#ifndef _ASM_GENERIC_BITOPS_FFZ_H_ +#define _ASM_GENERIC_BITOPS_FFZ_H_ + +/* + * ffz - find first zero in word. + * @word: The word to search + * + * Undefined if no zero exists, so code should check against ~0UL first. + */ +#define ffz(x) __ffs(~(x)) + +#endif /* _ASM_GENERIC_BITOPS_FFZ_H_ */ diff --git a/include/asm-generic/bitops/find.h b/include/asm-generic/bitops/find.h new file mode 100644 index 000000000..998d4d544 --- /dev/null +++ b/include/asm-generic/bitops/find.h @@ -0,0 +1,62 @@ +#ifndef _ASM_GENERIC_BITOPS_FIND_H_ +#define _ASM_GENERIC_BITOPS_FIND_H_ + +#ifndef find_next_bit +/** + * find_next_bit - find the next set bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The bitmap size in bits + * + * Returns the bit number for the next set bit + * If no bits are set, returns @size. + */ +extern unsigned long find_next_bit(const unsigned long *addr, unsigned long + size, unsigned long offset); +#endif + +#ifndef find_next_zero_bit +/** + * find_next_zero_bit - find the next cleared bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The bitmap size in bits + * + * Returns the bit number of the next zero bit + * If no bits are zero, returns @size. + */ +extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned + long size, unsigned long offset); +#endif + +#ifdef CONFIG_GENERIC_FIND_FIRST_BIT + +/** + * find_first_bit - find the first set bit in a memory region + * @addr: The address to start the search at + * @size: The maximum number of bits to search + * + * Returns the bit number of the first set bit. + * If no bits are set, returns @size. + */ +extern unsigned long find_first_bit(const unsigned long *addr, + unsigned long size); + +/** + * find_first_zero_bit - find the first cleared bit in a memory region + * @addr: The address to start the search at + * @size: The maximum number of bits to search + * + * Returns the bit number of the first cleared bit. + * If no bits are zero, returns @size. + */ +extern unsigned long find_first_zero_bit(const unsigned long *addr, + unsigned long size); +#else /* CONFIG_GENERIC_FIND_FIRST_BIT */ + +#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) +#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) + +#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ + +#endif /*_ASM_GENERIC_BITOPS_FIND_H_ */ diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h new file mode 100644 index 000000000..0576d1f42 --- /dev/null +++ b/include/asm-generic/bitops/fls.h @@ -0,0 +1,41 @@ +#ifndef _ASM_GENERIC_BITOPS_FLS_H_ +#define _ASM_GENERIC_BITOPS_FLS_H_ + +/** + * fls - find last (most-significant) bit set + * @x: the word to search + * + * This is defined the same way as ffs. + * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. + */ + +static __always_inline int fls(int x) +{ + int r = 32; + + if (!x) + return 0; + if (!(x & 0xffff0000u)) { + x <<= 16; + r -= 16; + } + if (!(x & 0xff000000u)) { + x <<= 8; + r -= 8; + } + if (!(x & 0xf0000000u)) { + x <<= 4; + r -= 4; + } + if (!(x & 0xc0000000u)) { + x <<= 2; + r -= 2; + } + if (!(x & 0x80000000u)) { + x <<= 1; + r -= 1; + } + return r; +} + +#endif /* _ASM_GENERIC_BITOPS_FLS_H_ */ diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h new file mode 100644 index 000000000..b097cf844 --- /dev/null +++ b/include/asm-generic/bitops/fls64.h @@ -0,0 +1,36 @@ +#ifndef _ASM_GENERIC_BITOPS_FLS64_H_ +#define _ASM_GENERIC_BITOPS_FLS64_H_ + +#include <asm/types.h> + +/** + * fls64 - find last set bit in a 64-bit word + * @x: the word to search + * + * This is defined in a similar way as the libc and compiler builtin + * ffsll, but returns the position of the most significant set bit. + * + * fls64(value) returns 0 if value is 0 or the position of the last + * set bit if value is nonzero. The last (most significant) bit is + * at position 64. + */ +#if BITS_PER_LONG == 32 +static __always_inline int fls64(__u64 x) +{ + __u32 h = x >> 32; + if (h) + return fls(h) + 32; + return fls(x); +} +#elif BITS_PER_LONG == 64 +static __always_inline int fls64(__u64 x) +{ + if (x == 0) + return 0; + return __fls(x) + 1; +} +#else +#error BITS_PER_LONG not 32 or 64 +#endif + +#endif /* _ASM_GENERIC_BITOPS_FLS64_H_ */ diff --git a/include/asm-generic/bitops/hweight.h b/include/asm-generic/bitops/hweight.h new file mode 100644 index 000000000..a94d6519c --- /dev/null +++ b/include/asm-generic/bitops/hweight.h @@ -0,0 +1,7 @@ +#ifndef _ASM_GENERIC_BITOPS_HWEIGHT_H_ +#define _ASM_GENERIC_BITOPS_HWEIGHT_H_ + +#include <asm-generic/bitops/arch_hweight.h> +#include <asm-generic/bitops/const_hweight.h> + +#endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */ diff --git a/include/asm-generic/bitops/le.h b/include/asm-generic/bitops/le.h new file mode 100644 index 000000000..61731543c --- /dev/null +++ b/include/asm-generic/bitops/le.h @@ -0,0 +1,97 @@ +#ifndef _ASM_GENERIC_BITOPS_LE_H_ +#define _ASM_GENERIC_BITOPS_LE_H_ + +#include <asm/types.h> +#include <asm/byteorder.h> + +#if defined(__LITTLE_ENDIAN) + +#define BITOP_LE_SWIZZLE 0 + +static inline unsigned long find_next_zero_bit_le(const void *addr, + unsigned long size, unsigned long offset) +{ + return find_next_zero_bit(addr, size, offset); +} + +static inline unsigned long find_next_bit_le(const void *addr, + unsigned long size, unsigned long offset) +{ + return find_next_bit(addr, size, offset); +} + +static inline unsigned long find_first_zero_bit_le(const void *addr, + unsigned long size) +{ + return find_first_zero_bit(addr, size); +} + +#elif defined(__BIG_ENDIAN) + +#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7) + +#ifndef find_next_zero_bit_le +extern unsigned long find_next_zero_bit_le(const void *addr, + unsigned long size, unsigned long offset); +#endif + +#ifndef find_next_bit_le +extern unsigned long find_next_bit_le(const void *addr, + unsigned long size, unsigned long offset); +#endif + +#ifndef find_first_zero_bit_le +#define find_first_zero_bit_le(addr, size) \ + find_next_zero_bit_le((addr), (size), 0) +#endif + +#else +#error "Please fix <asm/byteorder.h>" +#endif + +static inline int test_bit_le(int nr, const void *addr) +{ + return test_bit(nr ^ BITOP_LE_SWIZZLE, addr); +} + +static inline void set_bit_le(int nr, void *addr) +{ + set_bit(nr ^ BITOP_LE_SWIZZLE, addr); +} + +static inline void clear_bit_le(int nr, void *addr) +{ + clear_bit(nr ^ BITOP_LE_SWIZZLE, addr); +} + +static inline void __set_bit_le(int nr, void *addr) +{ + __set_bit(nr ^ BITOP_LE_SWIZZLE, addr); +} + +static inline void __clear_bit_le(int nr, void *addr) +{ + __clear_bit(nr ^ BITOP_LE_SWIZZLE, addr); +} + +static inline int test_and_set_bit_le(int nr, void *addr) +{ + return test_and_set_bit(nr ^ BITOP_LE_SWIZZLE, addr); +} + +static inline int test_and_clear_bit_le(int nr, void *addr) +{ + return test_and_clear_bit(nr ^ BITOP_LE_SWIZZLE, addr); +} + +static inline int __test_and_set_bit_le(int nr, void *addr) +{ + return __test_and_set_bit(nr ^ BITOP_LE_SWIZZLE, addr); +} + +static inline int __test_and_clear_bit_le(int nr, void *addr) +{ + return __test_and_clear_bit(nr ^ BITOP_LE_SWIZZLE, addr); +} + +#endif /* _ASM_GENERIC_BITOPS_LE_H_ */ diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h new file mode 100644 index 000000000..c30266e94 --- /dev/null +++ b/include/asm-generic/bitops/lock.h @@ -0,0 +1,45 @@ +#ifndef _ASM_GENERIC_BITOPS_LOCK_H_ +#define _ASM_GENERIC_BITOPS_LOCK_H_ + +/** + * test_and_set_bit_lock - Set a bit and return its old value, for lock + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is atomic and provides acquire barrier semantics. + * It can be used to implement bit locks. + */ +#define test_and_set_bit_lock(nr, addr) test_and_set_bit(nr, addr) + +/** + * clear_bit_unlock - Clear a bit in memory, for unlock + * @nr: the bit to set + * @addr: the address to start counting from + * + * This operation is atomic and provides release barrier semantics. + */ +#define clear_bit_unlock(nr, addr) \ +do { \ + smp_mb__before_atomic(); \ + clear_bit(nr, addr); \ +} while (0) + +/** + * __clear_bit_unlock - Clear a bit in memory, for unlock + * @nr: the bit to set + * @addr: the address to start counting from + * + * This operation is like clear_bit_unlock, however it is not atomic. + * It does provide release barrier semantics so it can be used to unlock + * a bit lock, however it would only be used if no other CPU can modify + * any bits in the memory until the lock is released (a good example is + * if the bit lock itself protects access to the other bits in the word). + */ +#define __clear_bit_unlock(nr, addr) \ +do { \ + smp_mb(); \ + __clear_bit(nr, addr); \ +} while (0) + +#endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */ + diff --git a/include/asm-generic/bitops/non-atomic.h b/include/asm-generic/bitops/non-atomic.h new file mode 100644 index 000000000..697cc2b7e --- /dev/null +++ b/include/asm-generic/bitops/non-atomic.h @@ -0,0 +1,108 @@ +#ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ +#define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ + +#include <asm/types.h> + +/** + * __set_bit - Set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * Unlike set_bit(), this function is non-atomic and may be reordered. + * If it's called on the same region of memory simultaneously, the effect + * may be that only one operation succeeds. + */ +static inline void __set_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + + *p |= mask; +} + +static inline void __clear_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + + *p &= ~mask; +} + +/** + * __change_bit - Toggle a bit in memory + * @nr: the bit to change + * @addr: the address to start counting from + * + * Unlike change_bit(), this function is non-atomic and may be reordered. + * If it's called on the same region of memory simultaneously, the effect + * may be that only one operation succeeds. + */ +static inline void __change_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + + *p ^= mask; +} + +/** + * __test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is non-atomic and can be reordered. + * If two examples of this operation race, one can appear to succeed + * but actually fail. You must protect multiple accesses with a lock. + */ +static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old = *p; + + *p = old | mask; + return (old & mask) != 0; +} + +/** + * __test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to clear + * @addr: Address to count from + * + * This operation is non-atomic and can be reordered. + * If two examples of this operation race, one can appear to succeed + * but actually fail. You must protect multiple accesses with a lock. + */ +static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old = *p; + + *p = old & ~mask; + return (old & mask) != 0; +} + +/* WARNING: non atomic and it can be reordered! */ +static inline int __test_and_change_bit(int nr, + volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old = *p; + + *p = old ^ mask; + return (old & mask) != 0; +} + +/** + * test_bit - Determine whether a bit is set + * @nr: bit number to test + * @addr: Address to start counting from + */ +static inline int test_bit(int nr, const volatile unsigned long *addr) +{ + return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); +} + +#endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */ diff --git a/include/asm-generic/bitops/sched.h b/include/asm-generic/bitops/sched.h new file mode 100644 index 000000000..604fab703 --- /dev/null +++ b/include/asm-generic/bitops/sched.h @@ -0,0 +1,31 @@ +#ifndef _ASM_GENERIC_BITOPS_SCHED_H_ +#define _ASM_GENERIC_BITOPS_SCHED_H_ + +#include <linux/compiler.h> /* unlikely() */ +#include <asm/types.h> + +/* + * Every architecture must define this function. It's the fastest + * way of searching a 100-bit bitmap. It's guaranteed that at least + * one of the 100 bits is cleared. + */ +static inline int sched_find_first_bit(const unsigned long *b) +{ +#if BITS_PER_LONG == 64 + if (b[0]) + return __ffs(b[0]); + return __ffs(b[1]) + 64; +#elif BITS_PER_LONG == 32 + if (b[0]) + return __ffs(b[0]); + if (b[1]) + return __ffs(b[1]) + 32; + if (b[2]) + return __ffs(b[2]) + 64; + return __ffs(b[3]) + 96; +#else +#error BITS_PER_LONG not defined +#endif +} + +#endif /* _ASM_GENERIC_BITOPS_SCHED_H_ */ diff --git a/include/asm-generic/bitsperlong.h b/include/asm-generic/bitsperlong.h new file mode 100644 index 000000000..d1d70aa19 --- /dev/null +++ b/include/asm-generic/bitsperlong.h @@ -0,0 +1,25 @@ +#ifndef __ASM_GENERIC_BITS_PER_LONG +#define __ASM_GENERIC_BITS_PER_LONG + +#include <uapi/asm-generic/bitsperlong.h> + + +#ifdef CONFIG_64BIT +#define BITS_PER_LONG 64 +#else +#define BITS_PER_LONG 32 +#endif /* CONFIG_64BIT */ + +/* + * FIXME: The check currently breaks x86-64 build, so it's + * temporarily disabled. Please fix x86-64 and reenable + */ +#if 0 && BITS_PER_LONG != __BITS_PER_LONG +#error Inconsistent word size. Check asm/bitsperlong.h +#endif + +#ifndef BITS_PER_LONG_LONG +#define BITS_PER_LONG_LONG 64 +#endif + +#endif /* __ASM_GENERIC_BITS_PER_LONG */ diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h new file mode 100644 index 000000000..630dd2372 --- /dev/null +++ b/include/asm-generic/bug.h @@ -0,0 +1,211 @@ +#ifndef _ASM_GENERIC_BUG_H +#define _ASM_GENERIC_BUG_H + +#include <linux/compiler.h> + +#ifdef CONFIG_GENERIC_BUG +#define BUGFLAG_WARNING (1 << 0) +#define BUGFLAG_TAINT(taint) (BUGFLAG_WARNING | ((taint) << 8)) +#define BUG_GET_TAINT(bug) ((bug)->flags >> 8) +#endif + +#ifndef __ASSEMBLY__ +#include <linux/kernel.h> + +#ifdef CONFIG_BUG + +#ifdef CONFIG_GENERIC_BUG +struct bug_entry { +#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS + unsigned long bug_addr; +#else + signed int bug_addr_disp; +#endif +#ifdef CONFIG_DEBUG_BUGVERBOSE +#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS + const char *file; +#else + signed int file_disp; +#endif + unsigned short line; +#endif + unsigned short flags; +}; +#endif /* CONFIG_GENERIC_BUG */ + +/* + * Don't use BUG() or BUG_ON() unless there's really no way out; one + * example might be detecting data structure corruption in the middle + * of an operation that can't be backed out of. If the (sub)system + * can somehow continue operating, perhaps with reduced functionality, + * it's probably not BUG-worthy. + * + * If you're tempted to BUG(), think again: is completely giving up + * really the *only* solution? There are usually better options, where + * users don't need to reboot ASAP and can mostly shut down cleanly. + */ +#ifndef HAVE_ARCH_BUG +#define BUG() do { \ + printk("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \ + panic("BUG!"); \ +} while (0) +#endif + +#ifndef HAVE_ARCH_BUG_ON +#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while (0) +#endif + +/* + * WARN(), WARN_ON(), WARN_ON_ONCE, and so on can be used to report + * significant issues that need prompt attention if they should ever + * appear at runtime. Use the versions with printk format strings + * to provide better diagnostics. + */ +#ifndef __WARN_TAINT +extern __printf(3, 4) +void warn_slowpath_fmt(const char *file, const int line, + const char *fmt, ...); +extern __printf(4, 5) +void warn_slowpath_fmt_taint(const char *file, const int line, unsigned taint, + const char *fmt, ...); +extern void warn_slowpath_null(const char *file, const int line); +#define WANT_WARN_ON_SLOWPATH +#define __WARN() warn_slowpath_null(__FILE__, __LINE__) +#define __WARN_printf(arg...) warn_slowpath_fmt(__FILE__, __LINE__, arg) +#define __WARN_printf_taint(taint, arg...) \ + warn_slowpath_fmt_taint(__FILE__, __LINE__, taint, arg) +#else +#define __WARN() __WARN_TAINT(TAINT_WARN) +#define __WARN_printf(arg...) do { printk(arg); __WARN(); } while (0) +#define __WARN_printf_taint(taint, arg...) \ + do { printk(arg); __WARN_TAINT(taint); } while (0) +#endif + +#ifndef WARN_ON +#define WARN_ON(condition) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + __WARN(); \ + unlikely(__ret_warn_on); \ +}) +#endif + +#ifndef WARN +#define WARN(condition, format...) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + __WARN_printf(format); \ + unlikely(__ret_warn_on); \ +}) +#endif + +#define WARN_TAINT(condition, taint, format...) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + __WARN_printf_taint(taint, format); \ + unlikely(__ret_warn_on); \ +}) + +#define WARN_ON_ONCE(condition) ({ \ + static bool __section(.data.unlikely) __warned; \ + int __ret_warn_once = !!(condition); \ + \ + if (unlikely(__ret_warn_once)) \ + if (WARN_ON(!__warned)) \ + __warned = true; \ + unlikely(__ret_warn_once); \ +}) + +#define WARN_ONCE(condition, format...) ({ \ + static bool __section(.data.unlikely) __warned; \ + int __ret_warn_once = !!(condition); \ + \ + if (unlikely(__ret_warn_once)) \ + if (WARN(!__warned, format)) \ + __warned = true; \ + unlikely(__ret_warn_once); \ +}) + +#define WARN_TAINT_ONCE(condition, taint, format...) ({ \ + static bool __section(.data.unlikely) __warned; \ + int __ret_warn_once = !!(condition); \ + \ + if (unlikely(__ret_warn_once)) \ + if (WARN_TAINT(!__warned, taint, format)) \ + __warned = true; \ + unlikely(__ret_warn_once); \ +}) + +#else /* !CONFIG_BUG */ +#ifndef HAVE_ARCH_BUG +#define BUG() do {} while (1) +#endif + +#ifndef HAVE_ARCH_BUG_ON +#define BUG_ON(condition) do { if (condition) ; } while (0) +#endif + +#ifndef HAVE_ARCH_WARN_ON +#define WARN_ON(condition) ({ \ + int __ret_warn_on = !!(condition); \ + unlikely(__ret_warn_on); \ +}) +#endif + +#ifndef WARN +#define WARN(condition, format...) ({ \ + int __ret_warn_on = !!(condition); \ + no_printk(format); \ + unlikely(__ret_warn_on); \ +}) +#endif + +#define WARN_ON_ONCE(condition) WARN_ON(condition) +#define WARN_ONCE(condition, format...) WARN(condition, format) +#define WARN_TAINT(condition, taint, format...) WARN(condition, format) +#define WARN_TAINT_ONCE(condition, taint, format...) WARN(condition, format) + +#endif + +/* + * WARN_ON_SMP() is for cases that the warning is either + * meaningless for !SMP or may even cause failures. + * This is usually used for cases that we have + * WARN_ON(!spin_is_locked(&lock)) checks, as spin_is_locked() + * returns 0 for uniprocessor settings. + * It can also be used with values that are only defined + * on SMP: + * + * struct foo { + * [...] + * #ifdef CONFIG_SMP + * int bar; + * #endif + * }; + * + * void func(struct foo *zoot) + * { + * WARN_ON_SMP(!zoot->bar); + * + * For CONFIG_SMP, WARN_ON_SMP() should act the same as WARN_ON(), + * and should be a nop and return false for uniprocessor. + * + * if (WARN_ON_SMP(x)) returns true only when CONFIG_SMP is set + * and x is true. + */ +#ifdef CONFIG_SMP +# define WARN_ON_SMP(x) WARN_ON(x) +#else +/* + * Use of ({0;}) because WARN_ON_SMP(x) may be used either as + * a stand alone line statement or as a condition in an if () + * statement. + * A simple "0" would cause gcc to give a "statement has no effect" + * warning. + */ +# define WARN_ON_SMP(x) ({0;}) +#endif + +#endif /* __ASSEMBLY__ */ + +#endif diff --git a/include/asm-generic/bugs.h b/include/asm-generic/bugs.h new file mode 100644 index 000000000..6c4f62ea7 --- /dev/null +++ b/include/asm-generic/bugs.h @@ -0,0 +1,10 @@ +#ifndef __ASM_GENERIC_BUGS_H +#define __ASM_GENERIC_BUGS_H +/* + * This file is included by 'init/main.c' to check for + * architecture-dependent bugs. + */ + +static inline void check_bugs(void) { } + +#endif /* __ASM_GENERIC_BUGS_H */ diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h new file mode 100644 index 000000000..1bfcfe5c2 --- /dev/null +++ b/include/asm-generic/cache.h @@ -0,0 +1,12 @@ +#ifndef __ASM_GENERIC_CACHE_H +#define __ASM_GENERIC_CACHE_H +/* + * 32 bytes appears to be the most common cache line size, + * so make that the default here. Architectures with larger + * cache lines need to provide their own cache.h. + */ + +#define L1_CACHE_SHIFT 5 +#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) + +#endif /* __ASM_GENERIC_CACHE_H */ diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h new file mode 100644 index 000000000..87bc536cc --- /dev/null +++ b/include/asm-generic/cacheflush.h @@ -0,0 +1,34 @@ +#ifndef __ASM_CACHEFLUSH_H +#define __ASM_CACHEFLUSH_H + +/* Keep includes the same across arches. */ +#include <linux/mm.h> + +/* + * The cache doesn't need to be flushed when TLB entries change when + * the cache is mapped to physical memory, not virtual memory + */ +#define flush_cache_all() do { } while (0) +#define flush_cache_mm(mm) do { } while (0) +#define flush_cache_dup_mm(mm) do { } while (0) +#define flush_cache_range(vma, start, end) do { } while (0) +#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 +#define flush_dcache_page(page) do { } while (0) +#define flush_dcache_mmap_lock(mapping) do { } while (0) +#define flush_dcache_mmap_unlock(mapping) do { } while (0) +#define flush_icache_range(start, end) do { } while (0) +#define flush_icache_page(vma,pg) do { } while (0) +#define flush_icache_user_range(vma,pg,adr,len) do { } while (0) +#define flush_cache_vmap(start, end) do { } while (0) +#define flush_cache_vunmap(start, end) do { } while (0) + +#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ + do { \ + memcpy(dst, src, len); \ + flush_icache_user_range(vma, page, vaddr, len); \ + } while (0) +#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ + memcpy(dst, src, len) + +#endif /* __ASM_CACHEFLUSH_H */ diff --git a/include/asm-generic/checksum.h b/include/asm-generic/checksum.h new file mode 100644 index 000000000..59811df58 --- /dev/null +++ b/include/asm-generic/checksum.h @@ -0,0 +1,87 @@ +#ifndef __ASM_GENERIC_CHECKSUM_H +#define __ASM_GENERIC_CHECKSUM_H + +/* + * computes the checksum of a memory block at buff, length len, + * and adds in "sum" (32-bit) + * + * returns a 32-bit number suitable for feeding into itself + * or csum_tcpudp_magic + * + * this function must be called with even lengths, except + * for the last fragment, which may be odd + * + * it's best to have buff aligned on a 32-bit boundary + */ +extern __wsum csum_partial(const void *buff, int len, __wsum sum); + +/* + * the same as csum_partial, but copies from src while it + * checksums + * + * here even more important to align src and dst on a 32-bit (or even + * better 64-bit) boundary + */ +extern __wsum csum_partial_copy(const void *src, void *dst, int len, __wsum sum); + +/* + * the same as csum_partial_copy, but copies from user space. + * + * here even more important to align src and dst on a 32-bit (or even + * better 64-bit) boundary + */ +extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst, + int len, __wsum sum, int *csum_err); + +#ifndef csum_partial_copy_nocheck +#define csum_partial_copy_nocheck(src, dst, len, sum) \ + csum_partial_copy((src), (dst), (len), (sum)) +#endif + +#ifndef ip_fast_csum +/* + * This is a version of ip_compute_csum() optimized for IP headers, + * which always checksum on 4 octet boundaries. + */ +extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl); +#endif + +#ifndef csum_fold +/* + * Fold a partial checksum + */ +static inline __sum16 csum_fold(__wsum csum) +{ + u32 sum = (__force u32)csum; + sum = (sum & 0xffff) + (sum >> 16); + sum = (sum & 0xffff) + (sum >> 16); + return (__force __sum16)~sum; +} +#endif + +#ifndef csum_tcpudp_nofold +/* + * computes the checksum of the TCP/UDP pseudo-header + * returns a 16-bit checksum, already complemented + */ +extern __wsum +csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, + unsigned short proto, __wsum sum); +#endif + +#ifndef csum_tcpudp_magic +static inline __sum16 +csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len, + unsigned short proto, __wsum sum) +{ + return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); +} +#endif + +/* + * this routine is used for miscellaneous IP-like checksums, mainly + * in icmp.c + */ +extern __sum16 ip_compute_csum(const void *buff, int len); + +#endif /* __ASM_GENERIC_CHECKSUM_H */ diff --git a/include/asm-generic/clkdev.h b/include/asm-generic/clkdev.h new file mode 100644 index 000000000..4ff334749 --- /dev/null +++ b/include/asm-generic/clkdev.h @@ -0,0 +1,30 @@ +/* + * include/asm-generic/clkdev.h + * + * Based on the ARM clkdev.h: + * Copyright (C) 2008 Russell King. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Helper for the clk API to assist looking up a struct clk. + */ +#ifndef __ASM_CLKDEV_H +#define __ASM_CLKDEV_H + +#include <linux/slab.h> + +#ifndef CONFIG_COMMON_CLK +struct clk; + +static inline int __clk_get(struct clk *clk) { return 1; } +static inline void __clk_put(struct clk *clk) { } +#endif + +static inline struct clk_lookup_alloc *__clkdev_alloc(size_t size) +{ + return kzalloc(size, GFP_KERNEL); +} + +#endif diff --git a/include/asm-generic/cmpxchg-local.h b/include/asm-generic/cmpxchg-local.h new file mode 100644 index 000000000..70bef7891 --- /dev/null +++ b/include/asm-generic/cmpxchg-local.h @@ -0,0 +1,67 @@ +#ifndef __ASM_GENERIC_CMPXCHG_LOCAL_H +#define __ASM_GENERIC_CMPXCHG_LOCAL_H + +#include <linux/types.h> +#include <linux/irqflags.h> + +extern unsigned long wrong_size_cmpxchg(volatile void *ptr) + __noreturn; + +/* + * Generic version of __cmpxchg_local (disables interrupts). Takes an unsigned + * long parameter, supporting various types of architectures. + */ +static inline unsigned long __cmpxchg_local_generic(volatile void *ptr, + unsigned long old, unsigned long new, int size) +{ + unsigned long flags, prev; + + /* + * Sanity checking, compile-time. + */ + if (size == 8 && sizeof(unsigned long) != 8) + wrong_size_cmpxchg(ptr); + + raw_local_irq_save(flags); + switch (size) { + case 1: prev = *(u8 *)ptr; + if (prev == old) + *(u8 *)ptr = (u8)new; + break; + case 2: prev = *(u16 *)ptr; + if (prev == old) + *(u16 *)ptr = (u16)new; + break; + case 4: prev = *(u32 *)ptr; + if (prev == old) + *(u32 *)ptr = (u32)new; + break; + case 8: prev = *(u64 *)ptr; + if (prev == old) + *(u64 *)ptr = (u64)new; + break; + default: + wrong_size_cmpxchg(ptr); + } + raw_local_irq_restore(flags); + return prev; +} + +/* + * Generic version of __cmpxchg64_local. Takes an u64 parameter. + */ +static inline u64 __cmpxchg64_local_generic(volatile void *ptr, + u64 old, u64 new) +{ + u64 prev; + unsigned long flags; + + raw_local_irq_save(flags); + prev = *(u64 *)ptr; + if (prev == old) + *(u64 *)ptr = new; + raw_local_irq_restore(flags); + return prev; +} + +#endif diff --git a/include/asm-generic/cmpxchg.h b/include/asm-generic/cmpxchg.h new file mode 100644 index 000000000..811fb1e9b --- /dev/null +++ b/include/asm-generic/cmpxchg.h @@ -0,0 +1,108 @@ +/* + * Generic UP xchg and cmpxchg using interrupt disablement. Does not + * support SMP. + */ + +#ifndef __ASM_GENERIC_CMPXCHG_H +#define __ASM_GENERIC_CMPXCHG_H + +#ifdef CONFIG_SMP +#error "Cannot use generic cmpxchg on SMP" +#endif + +#include <linux/types.h> +#include <linux/irqflags.h> + +#ifndef xchg + +/* + * This function doesn't exist, so you'll get a linker error if + * something tries to do an invalidly-sized xchg(). + */ +extern void __xchg_called_with_bad_pointer(void); + +static inline +unsigned long __xchg(unsigned long x, volatile void *ptr, int size) +{ + unsigned long ret, flags; + + switch (size) { + case 1: +#ifdef __xchg_u8 + return __xchg_u8(x, ptr); +#else + local_irq_save(flags); + ret = *(volatile u8 *)ptr; + *(volatile u8 *)ptr = x; + local_irq_restore(flags); + return ret; +#endif /* __xchg_u8 */ + + case 2: +#ifdef __xchg_u16 + return __xchg_u16(x, ptr); +#else + local_irq_save(flags); + ret = *(volatile u16 *)ptr; + *(volatile u16 *)ptr = x; + local_irq_restore(flags); + return ret; +#endif /* __xchg_u16 */ + + case 4: +#ifdef __xchg_u32 + return __xchg_u32(x, ptr); +#else + local_irq_save(flags); + ret = *(volatile u32 *)ptr; + *(volatile u32 *)ptr = x; + local_irq_restore(flags); + return ret; +#endif /* __xchg_u32 */ + +#ifdef CONFIG_64BIT + case 8: +#ifdef __xchg_u64 + return __xchg_u64(x, ptr); +#else + local_irq_save(flags); + ret = *(volatile u64 *)ptr; + *(volatile u64 *)ptr = x; + local_irq_restore(flags); + return ret; +#endif /* __xchg_u64 */ +#endif /* CONFIG_64BIT */ + + default: + __xchg_called_with_bad_pointer(); + return x; + } +} + +#define xchg(ptr, x) \ + ((__typeof__(*(ptr))) __xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))) + +#endif /* xchg */ + +/* + * Atomic compare and exchange. + * + * Do not define __HAVE_ARCH_CMPXCHG because we want to use it to check whether + * a cmpxchg primitive faster than repeated local irq save/restore exists. + */ +#include <asm-generic/cmpxchg-local.h> + +#ifndef cmpxchg_local +#define cmpxchg_local(ptr, o, n) \ + ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ + (unsigned long)(n), sizeof(*(ptr)))) +#endif + +#ifndef cmpxchg64_local +#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) +#endif + +#define cmpxchg(ptr, o, n) cmpxchg_local((ptr), (o), (n)) +#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n)) + +#endif /* __ASM_GENERIC_CMPXCHG_H */ diff --git a/include/asm-generic/cputime.h b/include/asm-generic/cputime.h new file mode 100644 index 000000000..51969436b --- /dev/null +++ b/include/asm-generic/cputime.h @@ -0,0 +1,15 @@ +#ifndef _ASM_GENERIC_CPUTIME_H +#define _ASM_GENERIC_CPUTIME_H + +#include <linux/time.h> +#include <linux/jiffies.h> + +#ifndef CONFIG_VIRT_CPU_ACCOUNTING +# include <asm-generic/cputime_jiffies.h> +#endif + +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN +# include <asm-generic/cputime_nsecs.h> +#endif + +#endif diff --git a/include/asm-generic/cputime_jiffies.h b/include/asm-generic/cputime_jiffies.h new file mode 100644 index 000000000..fe386fc6e --- /dev/null +++ b/include/asm-generic/cputime_jiffies.h @@ -0,0 +1,76 @@ +#ifndef _ASM_GENERIC_CPUTIME_JIFFIES_H +#define _ASM_GENERIC_CPUTIME_JIFFIES_H + +typedef unsigned long __nocast cputime_t; + +#define cmpxchg_cputime(ptr, old, new) cmpxchg(ptr, old, new) + +#define cputime_one_jiffy jiffies_to_cputime(1) +#define cputime_to_jiffies(__ct) (__force unsigned long)(__ct) +#define cputime_to_scaled(__ct) (__ct) +#define jiffies_to_cputime(__hz) (__force cputime_t)(__hz) + +typedef u64 __nocast cputime64_t; + +#define cputime64_to_jiffies64(__ct) (__force u64)(__ct) +#define jiffies64_to_cputime64(__jif) (__force cputime64_t)(__jif) + + +/* + * Convert nanoseconds <-> cputime + */ +#define cputime_to_nsecs(__ct) \ + jiffies_to_nsecs(cputime_to_jiffies(__ct)) +#define nsecs_to_cputime64(__nsec) \ + jiffies64_to_cputime64(nsecs_to_jiffies64(__nsec)) +#define nsecs_to_cputime(__nsec) \ + jiffies_to_cputime(nsecs_to_jiffies(__nsec)) + + +/* + * Convert cputime to microseconds and back. + */ +#define cputime_to_usecs(__ct) \ + jiffies_to_usecs(cputime_to_jiffies(__ct)) +#define usecs_to_cputime(__usec) \ + jiffies_to_cputime(usecs_to_jiffies(__usec)) +#define usecs_to_cputime64(__usec) \ + jiffies64_to_cputime64(nsecs_to_jiffies64((__usec) * 1000)) + +/* + * Convert cputime to seconds and back. + */ +#define cputime_to_secs(jif) (cputime_to_jiffies(jif) / HZ) +#define secs_to_cputime(sec) jiffies_to_cputime((sec) * HZ) + +/* + * Convert cputime to timespec and back. + */ +#define timespec_to_cputime(__val) \ + jiffies_to_cputime(timespec_to_jiffies(__val)) +#define cputime_to_timespec(__ct,__val) \ + jiffies_to_timespec(cputime_to_jiffies(__ct),__val) + +/* + * Convert cputime to timeval and back. + */ +#define timeval_to_cputime(__val) \ + jiffies_to_cputime(timeval_to_jiffies(__val)) +#define cputime_to_timeval(__ct,__val) \ + jiffies_to_timeval(cputime_to_jiffies(__ct),__val) + +/* + * Convert cputime to clock and back. + */ +#define cputime_to_clock_t(__ct) \ + jiffies_to_clock_t(cputime_to_jiffies(__ct)) +#define clock_t_to_cputime(__x) \ + jiffies_to_cputime(clock_t_to_jiffies(__x)) + +/* + * Convert cputime64 to clock. + */ +#define cputime64_to_clock_t(__ct) \ + jiffies_64_to_clock_t(cputime64_to_jiffies64(__ct)) + +#endif diff --git a/include/asm-generic/cputime_nsecs.h b/include/asm-generic/cputime_nsecs.h new file mode 100644 index 000000000..041948589 --- /dev/null +++ b/include/asm-generic/cputime_nsecs.h @@ -0,0 +1,119 @@ +/* + * Definitions for measuring cputime in nsecs resolution. + * + * Based on <arch/ia64/include/asm/cputime.h> + * + * Copyright (C) 2007 FUJITSU LIMITED + * Copyright (C) 2007 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#ifndef _ASM_GENERIC_CPUTIME_NSECS_H +#define _ASM_GENERIC_CPUTIME_NSECS_H + +#include <linux/math64.h> + +typedef u64 __nocast cputime_t; +typedef u64 __nocast cputime64_t; + +#define cmpxchg_cputime(ptr, old, new) cmpxchg64(ptr, old, new) + +#define cputime_one_jiffy jiffies_to_cputime(1) + +#define cputime_div(__ct, divisor) div_u64((__force u64)__ct, divisor) +#define cputime_div_rem(__ct, divisor, remainder) \ + div_u64_rem((__force u64)__ct, divisor, remainder); + +/* + * Convert cputime <-> jiffies (HZ) + */ +#define cputime_to_jiffies(__ct) \ + cputime_div(__ct, NSEC_PER_SEC / HZ) +#define cputime_to_scaled(__ct) (__ct) +#define jiffies_to_cputime(__jif) \ + (__force cputime_t)((__jif) * (NSEC_PER_SEC / HZ)) +#define cputime64_to_jiffies64(__ct) \ + cputime_div(__ct, NSEC_PER_SEC / HZ) +#define jiffies64_to_cputime64(__jif) \ + (__force cputime64_t)((__jif) * (NSEC_PER_SEC / HZ)) + + +/* + * Convert cputime <-> nanoseconds + */ +#define cputime_to_nsecs(__ct) \ + (__force u64)(__ct) +#define nsecs_to_cputime(__nsecs) \ + (__force cputime_t)(__nsecs) + + +/* + * Convert cputime <-> microseconds + */ +#define cputime_to_usecs(__ct) \ + cputime_div(__ct, NSEC_PER_USEC) +#define usecs_to_cputime(__usecs) \ + (__force cputime_t)((__usecs) * NSEC_PER_USEC) +#define usecs_to_cputime64(__usecs) \ + (__force cputime64_t)((__usecs) * NSEC_PER_USEC) + +/* + * Convert cputime <-> seconds + */ +#define cputime_to_secs(__ct) \ + cputime_div(__ct, NSEC_PER_SEC) +#define secs_to_cputime(__secs) \ + (__force cputime_t)((__secs) * NSEC_PER_SEC) + +/* + * Convert cputime <-> timespec (nsec) + */ +static inline cputime_t timespec_to_cputime(const struct timespec *val) +{ + u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec; + return (__force cputime_t) ret; +} +static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val) +{ + u32 rem; + + val->tv_sec = cputime_div_rem(ct, NSEC_PER_SEC, &rem); + val->tv_nsec = rem; +} + +/* + * Convert cputime <-> timeval (msec) + */ +static inline cputime_t timeval_to_cputime(const struct timeval *val) +{ + u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC; + return (__force cputime_t) ret; +} +static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val) +{ + u32 rem; + + val->tv_sec = cputime_div_rem(ct, NSEC_PER_SEC, &rem); + val->tv_usec = rem / NSEC_PER_USEC; +} + +/* + * Convert cputime <-> clock (USER_HZ) + */ +#define cputime_to_clock_t(__ct) \ + cputime_div(__ct, (NSEC_PER_SEC / USER_HZ)) +#define clock_t_to_cputime(__x) \ + (__force cputime_t)((__x) * (NSEC_PER_SEC / USER_HZ)) + +/* + * Convert cputime64 to clock. + */ +#define cputime64_to_clock_t(__ct) \ + cputime_to_clock_t((__force cputime_t)__ct) + +#endif diff --git a/include/asm-generic/current.h b/include/asm-generic/current.h new file mode 100644 index 000000000..5e86f6ae7 --- /dev/null +++ b/include/asm-generic/current.h @@ -0,0 +1,9 @@ +#ifndef __ASM_GENERIC_CURRENT_H +#define __ASM_GENERIC_CURRENT_H + +#include <linux/thread_info.h> + +#define get_current() (current_thread_info()->task) +#define current get_current() + +#endif /* __ASM_GENERIC_CURRENT_H */ diff --git a/include/asm-generic/delay.h b/include/asm-generic/delay.h new file mode 100644 index 000000000..0f79054ce --- /dev/null +++ b/include/asm-generic/delay.h @@ -0,0 +1,44 @@ +#ifndef __ASM_GENERIC_DELAY_H +#define __ASM_GENERIC_DELAY_H + +/* Undefined functions to get compile-time errors */ +extern void __bad_udelay(void); +extern void __bad_ndelay(void); + +extern void __udelay(unsigned long usecs); +extern void __ndelay(unsigned long nsecs); +extern void __const_udelay(unsigned long xloops); +extern void __delay(unsigned long loops); + +/* + * The weird n/20000 thing suppresses a "comparison is always false due to + * limited range of data type" warning with non-const 8-bit arguments. + */ + +/* 0x10c7 is 2**32 / 1000000 (rounded up) */ +#define udelay(n) \ + ({ \ + if (__builtin_constant_p(n)) { \ + if ((n) / 20000 >= 1) \ + __bad_udelay(); \ + else \ + __const_udelay((n) * 0x10c7ul); \ + } else { \ + __udelay(n); \ + } \ + }) + +/* 0x5 is 2**32 / 1000000000 (rounded up) */ +#define ndelay(n) \ + ({ \ + if (__builtin_constant_p(n)) { \ + if ((n) / 20000 >= 1) \ + __bad_ndelay(); \ + else \ + __const_udelay((n) * 5ul); \ + } else { \ + __ndelay(n); \ + } \ + }) + +#endif /* __ASM_GENERIC_DELAY_H */ diff --git a/include/asm-generic/device.h b/include/asm-generic/device.h new file mode 100644 index 000000000..d7c76bba6 --- /dev/null +++ b/include/asm-generic/device.h @@ -0,0 +1,15 @@ +/* + * Arch specific extensions to struct device + * + * This file is released under the GPLv2 + */ +#ifndef _ASM_GENERIC_DEVICE_H +#define _ASM_GENERIC_DEVICE_H + +struct dev_archdata { +}; + +struct pdev_archdata { +}; + +#endif /* _ASM_GENERIC_DEVICE_H */ diff --git a/include/asm-generic/div64.h b/include/asm-generic/div64.h new file mode 100644 index 000000000..8f4e31933 --- /dev/null +++ b/include/asm-generic/div64.h @@ -0,0 +1,58 @@ +#ifndef _ASM_GENERIC_DIV64_H +#define _ASM_GENERIC_DIV64_H +/* + * Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com> + * Based on former asm-ppc/div64.h and asm-m68knommu/div64.h + * + * The semantics of do_div() are: + * + * uint32_t do_div(uint64_t *n, uint32_t base) + * { + * uint32_t remainder = *n % base; + * *n = *n / base; + * return remainder; + * } + * + * NOTE: macro parameter n is evaluated multiple times, + * beware of side effects! + */ + +#include <linux/types.h> +#include <linux/compiler.h> + +#if BITS_PER_LONG == 64 + +# define do_div(n,base) ({ \ + uint32_t __base = (base); \ + uint32_t __rem; \ + __rem = ((uint64_t)(n)) % __base; \ + (n) = ((uint64_t)(n)) / __base; \ + __rem; \ + }) + +#elif BITS_PER_LONG == 32 + +extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor); + +/* The unnecessary pointer compare is there + * to check for type safety (n must be 64bit) + */ +# define do_div(n,base) ({ \ + uint32_t __base = (base); \ + uint32_t __rem; \ + (void)(((typeof((n)) *)0) == ((uint64_t *)0)); \ + if (likely(((n) >> 32) == 0)) { \ + __rem = (uint32_t)(n) % __base; \ + (n) = (uint32_t)(n) / __base; \ + } else \ + __rem = __div64_32(&(n), __base); \ + __rem; \ + }) + +#else /* BITS_PER_LONG == ?? */ + +# error do_div() does not yet support the C64 + +#endif /* BITS_PER_LONG */ + +#endif /* _ASM_GENERIC_DIV64_H */ diff --git a/include/asm-generic/dma-coherent.h b/include/asm-generic/dma-coherent.h new file mode 100644 index 000000000..0297e5875 --- /dev/null +++ b/include/asm-generic/dma-coherent.h @@ -0,0 +1,32 @@ +#ifndef DMA_COHERENT_H +#define DMA_COHERENT_H + +#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT +/* + * These three functions are only for dma allocator. + * Don't use them in device drivers. + */ +int dma_alloc_from_coherent(struct device *dev, ssize_t size, + dma_addr_t *dma_handle, void **ret); +int dma_release_from_coherent(struct device *dev, int order, void *vaddr); + +int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, size_t size, int *ret); +/* + * Standard interface + */ +#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY +int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, + dma_addr_t device_addr, size_t size, int flags); + +void dma_release_declared_memory(struct device *dev); + +void *dma_mark_declared_memory_occupied(struct device *dev, + dma_addr_t device_addr, size_t size); +#else +#define dma_alloc_from_coherent(dev, size, handle, ret) (0) +#define dma_release_from_coherent(dev, order, vaddr) (0) +#define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0) +#endif + +#endif diff --git a/include/asm-generic/dma-contiguous.h b/include/asm-generic/dma-contiguous.h new file mode 100644 index 000000000..292c57175 --- /dev/null +++ b/include/asm-generic/dma-contiguous.h @@ -0,0 +1,9 @@ +#ifndef _ASM_GENERIC_DMA_CONTIGUOUS_H +#define _ASM_GENERIC_DMA_CONTIGUOUS_H + +#include <linux/types.h> + +static inline void +dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { } + +#endif diff --git a/include/asm-generic/dma-mapping-broken.h b/include/asm-generic/dma-mapping-broken.h new file mode 100644 index 000000000..6c32af918 --- /dev/null +++ b/include/asm-generic/dma-mapping-broken.h @@ -0,0 +1,95 @@ +#ifndef _ASM_GENERIC_DMA_MAPPING_H +#define _ASM_GENERIC_DMA_MAPPING_H + +/* define the dma api to allow compilation but not linking of + * dma dependent code. Code that depends on the dma-mapping + * API needs to set 'depends on HAS_DMA' in its Kconfig + */ + +struct scatterlist; + +extern void * +dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, + gfp_t flag); + +extern void +dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t dma_handle); + +static inline void *dma_alloc_attrs(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag, + struct dma_attrs *attrs) +{ + /* attrs is not supported and ignored */ + return dma_alloc_coherent(dev, size, dma_handle, flag); +} + +static inline void dma_free_attrs(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_handle, + struct dma_attrs *attrs) +{ + /* attrs is not supported and ignored */ + dma_free_coherent(dev, size, cpu_addr, dma_handle); +} + +#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) +#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) + +extern dma_addr_t +dma_map_single(struct device *dev, void *ptr, size_t size, + enum dma_data_direction direction); + +extern void +dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, + enum dma_data_direction direction); + +extern int +dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction direction); + +extern void +dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, + enum dma_data_direction direction); + +extern dma_addr_t +dma_map_page(struct device *dev, struct page *page, unsigned long offset, + size_t size, enum dma_data_direction direction); + +extern void +dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, + enum dma_data_direction direction); + +extern void +dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, + enum dma_data_direction direction); + +extern void +dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, + unsigned long offset, size_t size, + enum dma_data_direction direction); + +extern void +dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, + enum dma_data_direction direction); + +#define dma_sync_single_for_device dma_sync_single_for_cpu +#define dma_sync_single_range_for_device dma_sync_single_range_for_cpu +#define dma_sync_sg_for_device dma_sync_sg_for_cpu + +extern int +dma_mapping_error(struct device *dev, dma_addr_t dma_addr); + +extern int +dma_supported(struct device *dev, u64 mask); + +extern int +dma_set_mask(struct device *dev, u64 mask); + +extern int +dma_get_cache_alignment(void); + +extern void +dma_cache_sync(struct device *dev, void *vaddr, size_t size, + enum dma_data_direction direction); + +#endif /* _ASM_GENERIC_DMA_MAPPING_H */ diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h new file mode 100644 index 000000000..940d5ec12 --- /dev/null +++ b/include/asm-generic/dma-mapping-common.h @@ -0,0 +1,240 @@ +#ifndef _ASM_GENERIC_DMA_MAPPING_H +#define _ASM_GENERIC_DMA_MAPPING_H + +#include <linux/kmemcheck.h> +#include <linux/bug.h> +#include <linux/scatterlist.h> +#include <linux/dma-debug.h> +#include <linux/dma-attrs.h> + +static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, + size_t size, + enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + dma_addr_t addr; + + kmemcheck_mark_initialized(ptr, size); + BUG_ON(!valid_dma_direction(dir)); + addr = ops->map_page(dev, virt_to_page(ptr), + (unsigned long)ptr & ~PAGE_MASK, size, + dir, attrs); + debug_dma_map_page(dev, virt_to_page(ptr), + (unsigned long)ptr & ~PAGE_MASK, size, + dir, addr, true); + return addr; +} + +static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, + size_t size, + enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->unmap_page) + ops->unmap_page(dev, addr, size, dir, attrs); + debug_dma_unmap_page(dev, addr, size, dir, true); +} + +/* + * dma_maps_sg_attrs returns 0 on error and > 0 on success. + * It should never return a value < 0. + */ +static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + int i, ents; + struct scatterlist *s; + + for_each_sg(sg, s, nents, i) + kmemcheck_mark_initialized(sg_virt(s), s->length); + BUG_ON(!valid_dma_direction(dir)); + ents = ops->map_sg(dev, sg, nents, dir, attrs); + BUG_ON(ents < 0); + debug_dma_map_sg(dev, sg, nents, ents, dir); + + return ents; +} + +static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + debug_dma_unmap_sg(dev, sg, nents, dir); + if (ops->unmap_sg) + ops->unmap_sg(dev, sg, nents, dir, attrs); +} + +static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, + size_t offset, size_t size, + enum dma_data_direction dir) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + dma_addr_t addr; + + kmemcheck_mark_initialized(page_address(page) + offset, size); + BUG_ON(!valid_dma_direction(dir)); + addr = ops->map_page(dev, page, offset, size, dir, NULL); + debug_dma_map_page(dev, page, offset, size, dir, addr, false); + + return addr; +} + +static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->unmap_page) + ops->unmap_page(dev, addr, size, dir, NULL); + debug_dma_unmap_page(dev, addr, size, dir, false); +} + +static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, + size_t size, + enum dma_data_direction dir) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->sync_single_for_cpu) + ops->sync_single_for_cpu(dev, addr, size, dir); + debug_dma_sync_single_for_cpu(dev, addr, size, dir); +} + +static inline void dma_sync_single_for_device(struct device *dev, + dma_addr_t addr, size_t size, + enum dma_data_direction dir) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->sync_single_for_device) + ops->sync_single_for_device(dev, addr, size, dir); + debug_dma_sync_single_for_device(dev, addr, size, dir); +} + +static inline void dma_sync_single_range_for_cpu(struct device *dev, + dma_addr_t addr, + unsigned long offset, + size_t size, + enum dma_data_direction dir) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->sync_single_for_cpu) + ops->sync_single_for_cpu(dev, addr + offset, size, dir); + debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir); +} + +static inline void dma_sync_single_range_for_device(struct device *dev, + dma_addr_t addr, + unsigned long offset, + size_t size, + enum dma_data_direction dir) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->sync_single_for_device) + ops->sync_single_for_device(dev, addr + offset, size, dir); + debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir); +} + +static inline void +dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, + int nelems, enum dma_data_direction dir) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->sync_sg_for_cpu) + ops->sync_sg_for_cpu(dev, sg, nelems, dir); + debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); +} + +static inline void +dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, + int nelems, enum dma_data_direction dir) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->sync_sg_for_device) + ops->sync_sg_for_device(dev, sg, nelems, dir); + debug_dma_sync_sg_for_device(dev, sg, nelems, dir); + +} + +#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL) +#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL) +#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL) +#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL) + +extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size); + +void *dma_common_contiguous_remap(struct page *page, size_t size, + unsigned long vm_flags, + pgprot_t prot, const void *caller); + +void *dma_common_pages_remap(struct page **pages, size_t size, + unsigned long vm_flags, pgprot_t prot, + const void *caller); +void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags); + +/** + * dma_mmap_attrs - map a coherent DMA allocation into user space + * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices + * @vma: vm_area_struct describing requested user mapping + * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs + * @handle: device-view address returned from dma_alloc_attrs + * @size: size of memory originally requested in dma_alloc_attrs + * @attrs: attributes of mapping properties requested in dma_alloc_attrs + * + * Map a coherent DMA buffer previously allocated by dma_alloc_attrs + * into user space. The coherent DMA buffer must not be freed by the + * driver until the user space mapping has been released. + */ +static inline int +dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, + dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + BUG_ON(!ops); + if (ops->mmap) + return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); + return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); +} + +#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL) + +int +dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t dma_addr, size_t size); + +static inline int +dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, + dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + BUG_ON(!ops); + if (ops->get_sgtable) + return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, + attrs); + return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size); +} + +#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL) + +#endif diff --git a/include/asm-generic/dma.h b/include/asm-generic/dma.h new file mode 100644 index 000000000..9dfc3a7f3 --- /dev/null +++ b/include/asm-generic/dma.h @@ -0,0 +1,15 @@ +#ifndef __ASM_GENERIC_DMA_H +#define __ASM_GENERIC_DMA_H +/* + * This file traditionally describes the i8237 PC style DMA controller. + * Most architectures don't have these any more and can get the minimal + * implementation from kernel/dma.c by not defining MAX_DMA_CHANNELS. + * + * Some code relies on seeing MAX_DMA_ADDRESS though. + */ +#define MAX_DMA_ADDRESS PAGE_OFFSET + +extern int request_dma(unsigned int dmanr, const char *device_id); +extern void free_dma(unsigned int dmanr); + +#endif /* __ASM_GENERIC_DMA_H */ diff --git a/include/asm-generic/early_ioremap.h b/include/asm-generic/early_ioremap.h new file mode 100644 index 000000000..a5de55c04 --- /dev/null +++ b/include/asm-generic/early_ioremap.h @@ -0,0 +1,42 @@ +#ifndef _ASM_EARLY_IOREMAP_H_ +#define _ASM_EARLY_IOREMAP_H_ + +#include <linux/types.h> + +/* + * early_ioremap() and early_iounmap() are for temporary early boot-time + * mappings, before the real ioremap() is functional. + */ +extern void __iomem *early_ioremap(resource_size_t phys_addr, + unsigned long size); +extern void *early_memremap(resource_size_t phys_addr, + unsigned long size); +extern void early_iounmap(void __iomem *addr, unsigned long size); +extern void early_memunmap(void *addr, unsigned long size); + +/* + * Weak function called by early_ioremap_reset(). It does nothing, but + * architectures may provide their own version to do any needed cleanups. + */ +extern void early_ioremap_shutdown(void); + +#if defined(CONFIG_GENERIC_EARLY_IOREMAP) && defined(CONFIG_MMU) +/* Arch-specific initialization */ +extern void early_ioremap_init(void); + +/* Generic initialization called by architecture code */ +extern void early_ioremap_setup(void); + +/* + * Called as last step in paging_init() so library can act + * accordingly for subsequent map/unmap requests. + */ +extern void early_ioremap_reset(void); + +#else +static inline void early_ioremap_init(void) { } +static inline void early_ioremap_setup(void) { } +static inline void early_ioremap_reset(void) { } +#endif + +#endif /* _ASM_EARLY_IOREMAP_H_ */ diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h new file mode 100644 index 000000000..0d68a1eae --- /dev/null +++ b/include/asm-generic/emergency-restart.h @@ -0,0 +1,9 @@ +#ifndef _ASM_GENERIC_EMERGENCY_RESTART_H +#define _ASM_GENERIC_EMERGENCY_RESTART_H + +static inline void machine_emergency_restart(void) +{ + machine_restart(NULL); +} + +#endif /* _ASM_GENERIC_EMERGENCY_RESTART_H */ diff --git a/include/asm-generic/exec.h b/include/asm-generic/exec.h new file mode 100644 index 000000000..567766b00 --- /dev/null +++ b/include/asm-generic/exec.h @@ -0,0 +1,19 @@ +/* Generic process execution definitions, based on MN10300 definitions. + * + * It should be possible to use these on really simple architectures, + * but it serves more as a starting point for new ports. + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +#ifndef __ASM_GENERIC_EXEC_H +#define __ASM_GENERIC_EXEC_H + +#define arch_align_stack(x) (x) + +#endif /* __ASM_GENERIC_EXEC_H */ diff --git a/include/asm-generic/fb.h b/include/asm-generic/fb.h new file mode 100644 index 000000000..fe8ca7fce --- /dev/null +++ b/include/asm-generic/fb.h @@ -0,0 +1,12 @@ +#ifndef __ASM_GENERIC_FB_H_ +#define __ASM_GENERIC_FB_H_ +#include <linux/fb.h> + +#define fb_pgprotect(...) do {} while (0) + +static inline int fb_is_primary_device(struct fb_info *info) +{ + return 0; +} + +#endif /* __ASM_GENERIC_FB_H_ */ diff --git a/include/asm-generic/fixmap.h b/include/asm-generic/fixmap.h new file mode 100644 index 000000000..f23174fb9 --- /dev/null +++ b/include/asm-generic/fixmap.h @@ -0,0 +1,100 @@ +/* + * fixmap.h: compile-time virtual memory allocation + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1998 Ingo Molnar + * + * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 + * x86_32 and x86_64 integration by Gustavo F. Padovan, February 2009 + * Break out common bits to asm-generic by Mark Salter, November 2013 + */ + +#ifndef __ASM_GENERIC_FIXMAP_H +#define __ASM_GENERIC_FIXMAP_H + +#include <linux/bug.h> + +#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) +#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) + +#ifndef __ASSEMBLY__ +/* + * 'index to address' translation. If anyone tries to use the idx + * directly without translation, we catch the bug with a NULL-deference + * kernel oops. Illegal ranges of incoming indices are caught too. + */ +static __always_inline unsigned long fix_to_virt(const unsigned int idx) +{ + BUILD_BUG_ON(idx >= __end_of_fixed_addresses); + return __fix_to_virt(idx); +} + +static inline unsigned long virt_to_fix(const unsigned long vaddr) +{ + BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); + return __virt_to_fix(vaddr); +} + +/* + * Provide some reasonable defaults for page flags. + * Not all architectures use all of these different types and some + * architectures use different names. + */ +#ifndef FIXMAP_PAGE_NORMAL +#define FIXMAP_PAGE_NORMAL PAGE_KERNEL +#endif +#ifndef FIXMAP_PAGE_NOCACHE +#define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_NOCACHE +#endif +#ifndef FIXMAP_PAGE_IO +#define FIXMAP_PAGE_IO PAGE_KERNEL_IO +#endif +#ifndef FIXMAP_PAGE_CLEAR +#define FIXMAP_PAGE_CLEAR __pgprot(0) +#endif + +#ifndef set_fixmap +#define set_fixmap(idx, phys) \ + __set_fixmap(idx, phys, FIXMAP_PAGE_NORMAL) +#endif + +#ifndef clear_fixmap +#define clear_fixmap(idx) \ + __set_fixmap(idx, 0, FIXMAP_PAGE_CLEAR) +#endif + +/* Return a pointer with offset calculated */ +#define __set_fixmap_offset(idx, phys, flags) \ +({ \ + unsigned long addr; \ + __set_fixmap(idx, phys, flags); \ + addr = fix_to_virt(idx) + ((phys) & (PAGE_SIZE - 1)); \ + addr; \ +}) + +#define set_fixmap_offset(idx, phys) \ + __set_fixmap_offset(idx, phys, FIXMAP_PAGE_NORMAL) + +/* + * Some hardware wants to get fixmapped without caching. + */ +#define set_fixmap_nocache(idx, phys) \ + __set_fixmap(idx, phys, FIXMAP_PAGE_NOCACHE) + +#define set_fixmap_offset_nocache(idx, phys) \ + __set_fixmap_offset(idx, phys, FIXMAP_PAGE_NOCACHE) + +/* + * Some fixmaps are for IO + */ +#define set_fixmap_io(idx, phys) \ + __set_fixmap(idx, phys, FIXMAP_PAGE_IO) + +#define set_fixmap_offset_io(idx, phys) \ + __set_fixmap_offset(idx, phys, FIXMAP_PAGE_IO) + +#endif /* __ASSEMBLY__ */ +#endif /* __ASM_GENERIC_FIXMAP_H */ diff --git a/include/asm-generic/ftrace.h b/include/asm-generic/ftrace.h new file mode 100644 index 000000000..51abba9ea --- /dev/null +++ b/include/asm-generic/ftrace.h @@ -0,0 +1,16 @@ +/* + * linux/include/asm-generic/ftrace.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __ASM_GENERIC_FTRACE_H__ +#define __ASM_GENERIC_FTRACE_H__ + +/* + * Not all architectures need their own ftrace.h, the most + * common definitions are already in linux/ftrace.h. + */ + +#endif /* __ASM_GENERIC_FTRACE_H__ */ diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h new file mode 100644 index 000000000..b59b5a526 --- /dev/null +++ b/include/asm-generic/futex.h @@ -0,0 +1,171 @@ +#ifndef _ASM_GENERIC_FUTEX_H +#define _ASM_GENERIC_FUTEX_H + +#include <linux/futex.h> +#include <linux/uaccess.h> +#include <asm/errno.h> + +#ifndef CONFIG_SMP +/* + * The following implementation only for uniprocessor machines. + * For UP, it's relies on the fact that pagefault_disable() also disables + * preemption to ensure mutual exclusion. + * + */ + +/** + * futex_atomic_op_inuser() - Atomic arithmetic operation with constant + * argument and comparison of the previous + * futex value with another constant. + * + * @encoded_op: encoded operation to execute + * @uaddr: pointer to user space address + * + * Return: + * 0 - On success + * <0 - On error + */ +static inline int +futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) +{ + int op = (encoded_op >> 28) & 7; + int cmp = (encoded_op >> 24) & 15; + int oparg = (encoded_op << 8) >> 20; + int cmparg = (encoded_op << 20) >> 20; + int oldval, ret; + u32 tmp; + + if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) + oparg = 1 << oparg; + + pagefault_disable(); + + ret = -EFAULT; + if (unlikely(get_user(oldval, uaddr) != 0)) + goto out_pagefault_enable; + + ret = 0; + tmp = oldval; + + switch (op) { + case FUTEX_OP_SET: + tmp = oparg; + break; + case FUTEX_OP_ADD: + tmp += oparg; + break; + case FUTEX_OP_OR: + tmp |= oparg; + break; + case FUTEX_OP_ANDN: + tmp &= ~oparg; + break; + case FUTEX_OP_XOR: + tmp ^= oparg; + break; + default: + ret = -ENOSYS; + } + + if (ret == 0 && unlikely(put_user(tmp, uaddr) != 0)) + ret = -EFAULT; + +out_pagefault_enable: + pagefault_enable(); + + if (ret == 0) { + switch (cmp) { + case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; + case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; + case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; + case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; + case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; + case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; + default: ret = -ENOSYS; + } + } + return ret; +} + +/** + * futex_atomic_cmpxchg_inatomic() - Compare and exchange the content of the + * uaddr with newval if the current value is + * oldval. + * @uval: pointer to store content of @uaddr + * @uaddr: pointer to user space address + * @oldval: old value + * @newval: new value to store to @uaddr + * + * Return: + * 0 - On success + * <0 - On error + */ +static inline int +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) +{ + u32 val; + + if (unlikely(get_user(val, uaddr) != 0)) + return -EFAULT; + + if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) + return -EFAULT; + + *uval = val; + + return 0; +} + +#else +static inline int +futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) +{ + int op = (encoded_op >> 28) & 7; + int cmp = (encoded_op >> 24) & 15; + int oparg = (encoded_op << 8) >> 20; + int cmparg = (encoded_op << 20) >> 20; + int oldval = 0, ret; + if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) + oparg = 1 << oparg; + + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) + return -EFAULT; + + pagefault_disable(); + + switch (op) { + case FUTEX_OP_SET: + case FUTEX_OP_ADD: + case FUTEX_OP_OR: + case FUTEX_OP_ANDN: + case FUTEX_OP_XOR: + default: + ret = -ENOSYS; + } + + pagefault_enable(); + + if (!ret) { + switch (cmp) { + case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; + case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; + case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; + case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; + case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; + case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; + default: ret = -ENOSYS; + } + } + return ret; +} + +static inline int +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) +{ + return -ENOSYS; +} + +#endif /* CONFIG_SMP */ +#endif diff --git a/include/asm-generic/getorder.h b/include/asm-generic/getorder.h new file mode 100644 index 000000000..65e4468ac --- /dev/null +++ b/include/asm-generic/getorder.h @@ -0,0 +1,61 @@ +#ifndef __ASM_GENERIC_GETORDER_H +#define __ASM_GENERIC_GETORDER_H + +#ifndef __ASSEMBLY__ + +#include <linux/compiler.h> +#include <linux/log2.h> + +/* + * Runtime evaluation of get_order() + */ +static inline __attribute_const__ +int __get_order(unsigned long size) +{ + int order; + + size--; + size >>= PAGE_SHIFT; +#if BITS_PER_LONG == 32 + order = fls(size); +#else + order = fls64(size); +#endif + return order; +} + +/** + * get_order - Determine the allocation order of a memory size + * @size: The size for which to get the order + * + * Determine the allocation order of a particular sized block of memory. This + * is on a logarithmic scale, where: + * + * 0 -> 2^0 * PAGE_SIZE and below + * 1 -> 2^1 * PAGE_SIZE to 2^0 * PAGE_SIZE + 1 + * 2 -> 2^2 * PAGE_SIZE to 2^1 * PAGE_SIZE + 1 + * 3 -> 2^3 * PAGE_SIZE to 2^2 * PAGE_SIZE + 1 + * 4 -> 2^4 * PAGE_SIZE to 2^3 * PAGE_SIZE + 1 + * ... + * + * The order returned is used to find the smallest allocation granule required + * to hold an object of the specified size. + * + * The result is undefined if the size is 0. + * + * This function may be used to initialise variables with compile time + * evaluations of constants. + */ +#define get_order(n) \ +( \ + __builtin_constant_p(n) ? ( \ + ((n) == 0UL) ? BITS_PER_LONG - PAGE_SHIFT : \ + (((n) < (1UL << PAGE_SHIFT)) ? 0 : \ + ilog2((n) - 1) - PAGE_SHIFT + 1) \ + ) : \ + __get_order(n) \ +) + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_GENERIC_GETORDER_H */ diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h new file mode 100644 index 000000000..9bb0d1172 --- /dev/null +++ b/include/asm-generic/gpio.h @@ -0,0 +1,172 @@ +#ifndef _ASM_GENERIC_GPIO_H +#define _ASM_GENERIC_GPIO_H + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/of.h> + +#ifdef CONFIG_GPIOLIB + +#include <linux/compiler.h> +#include <linux/gpio/driver.h> +#include <linux/gpio/consumer.h> + +/* Platforms may implement their GPIO interface with library code, + * at a small performance cost for non-inlined operations and some + * extra memory (for code and for per-GPIO table entries). + * + * While the GPIO programming interface defines valid GPIO numbers + * to be in the range 0..MAX_INT, this library restricts them to the + * smaller range 0..ARCH_NR_GPIOS-1. + * + * ARCH_NR_GPIOS is somewhat arbitrary; it usually reflects the sum of + * builtin/SoC GPIOs plus a number of GPIOs on expanders; the latter is + * actually an estimate of a board-specific value. + */ + +#ifndef ARCH_NR_GPIOS +#define ARCH_NR_GPIOS 512 +#endif + +/* + * "valid" GPIO numbers are nonnegative and may be passed to + * setup routines like gpio_request(). only some valid numbers + * can successfully be requested and used. + * + * Invalid GPIO numbers are useful for indicating no-such-GPIO in + * platform data and other tables. + */ + +static inline bool gpio_is_valid(int number) +{ + return number >= 0 && number < ARCH_NR_GPIOS; +} + +struct device; +struct gpio; +struct seq_file; +struct module; +struct device_node; +struct gpio_desc; + +/* caller holds gpio_lock *OR* gpio is marked as requested */ +static inline struct gpio_chip *gpio_to_chip(unsigned gpio) +{ + return gpiod_to_chip(gpio_to_desc(gpio)); +} + +/* Always use the library code for GPIO management calls, + * or when sleeping may be involved. + */ +extern int gpio_request(unsigned gpio, const char *label); +extern void gpio_free(unsigned gpio); + +static inline int gpio_direction_input(unsigned gpio) +{ + return gpiod_direction_input(gpio_to_desc(gpio)); +} +static inline int gpio_direction_output(unsigned gpio, int value) +{ + return gpiod_direction_output_raw(gpio_to_desc(gpio), value); +} + +static inline int gpio_set_debounce(unsigned gpio, unsigned debounce) +{ + return gpiod_set_debounce(gpio_to_desc(gpio), debounce); +} + +static inline int gpio_get_value_cansleep(unsigned gpio) +{ + return gpiod_get_raw_value_cansleep(gpio_to_desc(gpio)); +} +static inline void gpio_set_value_cansleep(unsigned gpio, int value) +{ + return gpiod_set_raw_value_cansleep(gpio_to_desc(gpio), value); +} + + +/* A platform's <asm/gpio.h> code may want to inline the I/O calls when + * the GPIO is constant and refers to some always-present controller, + * giving direct access to chip registers and tight bitbanging loops. + */ +static inline int __gpio_get_value(unsigned gpio) +{ + return gpiod_get_raw_value(gpio_to_desc(gpio)); +} +static inline void __gpio_set_value(unsigned gpio, int value) +{ + return gpiod_set_raw_value(gpio_to_desc(gpio), value); +} + +static inline int __gpio_cansleep(unsigned gpio) +{ + return gpiod_cansleep(gpio_to_desc(gpio)); +} + +static inline int __gpio_to_irq(unsigned gpio) +{ + return gpiod_to_irq(gpio_to_desc(gpio)); +} + +extern int gpio_request_one(unsigned gpio, unsigned long flags, const char *label); +extern int gpio_request_array(const struct gpio *array, size_t num); +extern void gpio_free_array(const struct gpio *array, size_t num); + +/* + * A sysfs interface can be exported by individual drivers if they want, + * but more typically is configured entirely from userspace. + */ +static inline int gpio_export(unsigned gpio, bool direction_may_change) +{ + return gpiod_export(gpio_to_desc(gpio), direction_may_change); +} + +static inline int gpio_export_link(struct device *dev, const char *name, + unsigned gpio) +{ + return gpiod_export_link(dev, name, gpio_to_desc(gpio)); +} + +static inline int gpio_sysfs_set_active_low(unsigned gpio, int value) +{ + return gpiod_sysfs_set_active_low(gpio_to_desc(gpio), value); +} + +static inline void gpio_unexport(unsigned gpio) +{ + gpiod_unexport(gpio_to_desc(gpio)); +} + +#else /* !CONFIG_GPIOLIB */ + +static inline bool gpio_is_valid(int number) +{ + /* only non-negative numbers are valid */ + return number >= 0; +} + +/* platforms that don't directly support access to GPIOs through I2C, SPI, + * or other blocking infrastructure can use these wrappers. + */ + +static inline int gpio_cansleep(unsigned gpio) +{ + return 0; +} + +static inline int gpio_get_value_cansleep(unsigned gpio) +{ + might_sleep(); + return __gpio_get_value(gpio); +} + +static inline void gpio_set_value_cansleep(unsigned gpio, int value) +{ + might_sleep(); + __gpio_set_value(gpio, value); +} + +#endif /* !CONFIG_GPIOLIB */ + +#endif /* _ASM_GENERIC_GPIO_H */ diff --git a/include/asm-generic/hardirq.h b/include/asm-generic/hardirq.h new file mode 100644 index 000000000..04d0a977c --- /dev/null +++ b/include/asm-generic/hardirq.h @@ -0,0 +1,21 @@ +#ifndef __ASM_GENERIC_HARDIRQ_H +#define __ASM_GENERIC_HARDIRQ_H + +#include <linux/cache.h> +#include <linux/threads.h> + +typedef struct { + unsigned int __softirq_pending; +} ____cacheline_aligned irq_cpustat_t; + +#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ +#include <linux/irq.h> + +#ifndef ack_bad_irq +static inline void ack_bad_irq(unsigned int irq) +{ + printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq); +} +#endif + +#endif /* __ASM_GENERIC_HARDIRQ_H */ diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h new file mode 100644 index 000000000..99b490b4d --- /dev/null +++ b/include/asm-generic/hugetlb.h @@ -0,0 +1,40 @@ +#ifndef _ASM_GENERIC_HUGETLB_H +#define _ASM_GENERIC_HUGETLB_H + +static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot) +{ + return mk_pte(page, pgprot); +} + +static inline unsigned long huge_pte_write(pte_t pte) +{ + return pte_write(pte); +} + +static inline unsigned long huge_pte_dirty(pte_t pte) +{ + return pte_dirty(pte); +} + +static inline pte_t huge_pte_mkwrite(pte_t pte) +{ + return pte_mkwrite(pte); +} + +static inline pte_t huge_pte_mkdirty(pte_t pte) +{ + return pte_mkdirty(pte); +} + +static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot) +{ + return pte_modify(pte, newprot); +} + +static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) +{ + pte_clear(mm, addr, ptep); +} + +#endif /* _ASM_GENERIC_HUGETLB_H */ diff --git a/include/asm-generic/hw_irq.h b/include/asm-generic/hw_irq.h new file mode 100644 index 000000000..89036d7b4 --- /dev/null +++ b/include/asm-generic/hw_irq.h @@ -0,0 +1,9 @@ +#ifndef __ASM_GENERIC_HW_IRQ_H +#define __ASM_GENERIC_HW_IRQ_H +/* + * hw_irq.h has internal declarations for the low-level interrupt + * controller, like the original i8259A. + * In general, this is not needed for new architectures. + */ + +#endif /* __ASM_GENERIC_HW_IRQ_H */ diff --git a/include/asm-generic/ide_iops.h b/include/asm-generic/ide_iops.h new file mode 100644 index 000000000..1b91d0681 --- /dev/null +++ b/include/asm-generic/ide_iops.h @@ -0,0 +1,38 @@ +/* Generic I/O and MEMIO string operations. */ + +#define __ide_insw insw +#define __ide_insl insl +#define __ide_outsw outsw +#define __ide_outsl outsl + +static __inline__ void __ide_mm_insw(void __iomem *port, void *addr, u32 count) +{ + while (count--) { + *(u16 *)addr = readw(port); + addr += 2; + } +} + +static __inline__ void __ide_mm_insl(void __iomem *port, void *addr, u32 count) +{ + while (count--) { + *(u32 *)addr = readl(port); + addr += 4; + } +} + +static __inline__ void __ide_mm_outsw(void __iomem *port, void *addr, u32 count) +{ + while (count--) { + writew(*(u16 *)addr, port); + addr += 2; + } +} + +static __inline__ void __ide_mm_outsl(void __iomem * port, void *addr, u32 count) +{ + while (count--) { + writel(*(u32 *)addr, port); + addr += 4; + } +} diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h new file mode 100644 index 000000000..4cd84855c --- /dev/null +++ b/include/asm-generic/int-ll64.h @@ -0,0 +1,49 @@ +/* + * asm-generic/int-ll64.h + * + * Integer declarations for architectures which use "long long" + * for 64-bit types. + */ +#ifndef _ASM_GENERIC_INT_LL64_H +#define _ASM_GENERIC_INT_LL64_H + +#include <uapi/asm-generic/int-ll64.h> + + +#ifndef __ASSEMBLY__ + +typedef signed char s8; +typedef unsigned char u8; + +typedef signed short s16; +typedef unsigned short u16; + +typedef signed int s32; +typedef unsigned int u32; + +typedef signed long long s64; +typedef unsigned long long u64; + +#define S8_C(x) x +#define U8_C(x) x ## U +#define S16_C(x) x +#define U16_C(x) x ## U +#define S32_C(x) x +#define U32_C(x) x ## U +#define S64_C(x) x ## LL +#define U64_C(x) x ## ULL + +#else /* __ASSEMBLY__ */ + +#define S8_C(x) x +#define U8_C(x) x +#define S16_C(x) x +#define U16_C(x) x +#define S32_C(x) x +#define U32_C(x) x +#define S64_C(x) x +#define U64_C(x) x + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_GENERIC_INT_LL64_H */ diff --git a/include/asm-generic/io-64-nonatomic-hi-lo.h b/include/asm-generic/io-64-nonatomic-hi-lo.h new file mode 100644 index 000000000..2e29d13fc --- /dev/null +++ b/include/asm-generic/io-64-nonatomic-hi-lo.h @@ -0,0 +1,32 @@ +#ifndef _ASM_IO_64_NONATOMIC_HI_LO_H_ +#define _ASM_IO_64_NONATOMIC_HI_LO_H_ + +#include <linux/io.h> +#include <asm-generic/int-ll64.h> + +static inline __u64 hi_lo_readq(const volatile void __iomem *addr) +{ + const volatile u32 __iomem *p = addr; + u32 low, high; + + high = readl(p + 1); + low = readl(p); + + return low + ((u64)high << 32); +} + +static inline void hi_lo_writeq(__u64 val, volatile void __iomem *addr) +{ + writel(val >> 32, addr + 4); + writel(val, addr); +} + +#ifndef readq +#define readq hi_lo_readq +#endif + +#ifndef writeq +#define writeq hi_lo_writeq +#endif + +#endif /* _ASM_IO_64_NONATOMIC_HI_LO_H_ */ diff --git a/include/asm-generic/io-64-nonatomic-lo-hi.h b/include/asm-generic/io-64-nonatomic-lo-hi.h new file mode 100644 index 000000000..0efacff0a --- /dev/null +++ b/include/asm-generic/io-64-nonatomic-lo-hi.h @@ -0,0 +1,32 @@ +#ifndef _ASM_IO_64_NONATOMIC_LO_HI_H_ +#define _ASM_IO_64_NONATOMIC_LO_HI_H_ + +#include <linux/io.h> +#include <asm-generic/int-ll64.h> + +static inline __u64 lo_hi_readq(const volatile void __iomem *addr) +{ + const volatile u32 __iomem *p = addr; + u32 low, high; + + low = readl(p); + high = readl(p + 1); + + return low + ((u64)high << 32); +} + +static inline void lo_hi_writeq(__u64 val, volatile void __iomem *addr) +{ + writel(val, addr); + writel(val >> 32, addr + 4); +} + +#ifndef readq +#define readq lo_hi_readq +#endif + +#ifndef writeq +#define writeq lo_hi_writeq +#endif + +#endif /* _ASM_IO_64_NONATOMIC_LO_HI_H_ */ diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h new file mode 100644 index 000000000..9db042304 --- /dev/null +++ b/include/asm-generic/io.h @@ -0,0 +1,877 @@ +/* Generic I/O port emulation, based on MN10300 code + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +#ifndef __ASM_GENERIC_IO_H +#define __ASM_GENERIC_IO_H + +#include <asm/page.h> /* I/O is all done through memory accesses */ +#include <linux/string.h> /* for memset() and memcpy() */ +#include <linux/types.h> + +#ifdef CONFIG_GENERIC_IOMAP +#include <asm-generic/iomap.h> +#endif + +#include <asm-generic/pci_iomap.h> + +#ifndef mmiowb +#define mmiowb() do {} while (0) +#endif + +/* + * __raw_{read,write}{b,w,l,q}() access memory in native endianness. + * + * On some architectures memory mapped IO needs to be accessed differently. + * On the simple architectures, we just read/write the memory location + * directly. + */ + +#ifndef __raw_readb +#define __raw_readb __raw_readb +static inline u8 __raw_readb(const volatile void __iomem *addr) +{ + return *(const volatile u8 __force *)addr; +} +#endif + +#ifndef __raw_readw +#define __raw_readw __raw_readw +static inline u16 __raw_readw(const volatile void __iomem *addr) +{ + return *(const volatile u16 __force *)addr; +} +#endif + +#ifndef __raw_readl +#define __raw_readl __raw_readl +static inline u32 __raw_readl(const volatile void __iomem *addr) +{ + return *(const volatile u32 __force *)addr; +} +#endif + +#ifdef CONFIG_64BIT +#ifndef __raw_readq +#define __raw_readq __raw_readq +static inline u64 __raw_readq(const volatile void __iomem *addr) +{ + return *(const volatile u64 __force *)addr; +} +#endif +#endif /* CONFIG_64BIT */ + +#ifndef __raw_writeb +#define __raw_writeb __raw_writeb +static inline void __raw_writeb(u8 value, volatile void __iomem *addr) +{ + *(volatile u8 __force *)addr = value; +} +#endif + +#ifndef __raw_writew +#define __raw_writew __raw_writew +static inline void __raw_writew(u16 value, volatile void __iomem *addr) +{ + *(volatile u16 __force *)addr = value; +} +#endif + +#ifndef __raw_writel +#define __raw_writel __raw_writel +static inline void __raw_writel(u32 value, volatile void __iomem *addr) +{ + *(volatile u32 __force *)addr = value; +} +#endif + +#ifdef CONFIG_64BIT +#ifndef __raw_writeq +#define __raw_writeq __raw_writeq +static inline void __raw_writeq(u64 value, volatile void __iomem *addr) +{ + *(volatile u64 __force *)addr = value; +} +#endif +#endif /* CONFIG_64BIT */ + +/* + * {read,write}{b,w,l,q}() access little endian memory and return result in + * native endianness. + */ + +#ifndef readb +#define readb readb +static inline u8 readb(const volatile void __iomem *addr) +{ + return __raw_readb(addr); +} +#endif + +#ifndef readw +#define readw readw +static inline u16 readw(const volatile void __iomem *addr) +{ + return __le16_to_cpu(__raw_readw(addr)); +} +#endif + +#ifndef readl +#define readl readl +static inline u32 readl(const volatile void __iomem *addr) +{ + return __le32_to_cpu(__raw_readl(addr)); +} +#endif + +#ifdef CONFIG_64BIT +#ifndef readq +#define readq readq +static inline u64 readq(const volatile void __iomem *addr) +{ + return __le64_to_cpu(__raw_readq(addr)); +} +#endif +#endif /* CONFIG_64BIT */ + +#ifndef writeb +#define writeb writeb +static inline void writeb(u8 value, volatile void __iomem *addr) +{ + __raw_writeb(value, addr); +} +#endif + +#ifndef writew +#define writew writew +static inline void writew(u16 value, volatile void __iomem *addr) +{ + __raw_writew(cpu_to_le16(value), addr); +} +#endif + +#ifndef writel +#define writel writel +static inline void writel(u32 value, volatile void __iomem *addr) +{ + __raw_writel(__cpu_to_le32(value), addr); +} +#endif + +#ifdef CONFIG_64BIT +#ifndef writeq +#define writeq writeq +static inline void writeq(u64 value, volatile void __iomem *addr) +{ + __raw_writeq(__cpu_to_le64(value), addr); +} +#endif +#endif /* CONFIG_64BIT */ + +/* + * {read,write}{b,w,l,q}_relaxed() are like the regular version, but + * are not guaranteed to provide ordering against spinlocks or memory + * accesses. + */ +#ifndef readb_relaxed +#define readb_relaxed readb +#endif + +#ifndef readw_relaxed +#define readw_relaxed readw +#endif + +#ifndef readl_relaxed +#define readl_relaxed readl +#endif + +#ifndef readq_relaxed +#define readq_relaxed readq +#endif + +#ifndef writeb_relaxed +#define writeb_relaxed writeb +#endif + +#ifndef writew_relaxed +#define writew_relaxed writew +#endif + +#ifndef writel_relaxed +#define writel_relaxed writel +#endif + +#ifndef writeq_relaxed +#define writeq_relaxed writeq +#endif + +/* + * {read,write}s{b,w,l,q}() repeatedly access the same memory address in + * native endianness in 8-, 16-, 32- or 64-bit chunks (@count times). + */ +#ifndef readsb +#define readsb readsb +static inline void readsb(const volatile void __iomem *addr, void *buffer, + unsigned int count) +{ + if (count) { + u8 *buf = buffer; + + do { + u8 x = __raw_readb(addr); + *buf++ = x; + } while (--count); + } +} +#endif + +#ifndef readsw +#define readsw readsw +static inline void readsw(const volatile void __iomem *addr, void *buffer, + unsigned int count) +{ + if (count) { + u16 *buf = buffer; + + do { + u16 x = __raw_readw(addr); + *buf++ = x; + } while (--count); + } +} +#endif + +#ifndef readsl +#define readsl readsl +static inline void readsl(const volatile void __iomem *addr, void *buffer, + unsigned int count) +{ + if (count) { + u32 *buf = buffer; + + do { + u32 x = __raw_readl(addr); + *buf++ = x; + } while (--count); + } +} +#endif + +#ifdef CONFIG_64BIT +#ifndef readsq +#define readsq readsq +static inline void readsq(const volatile void __iomem *addr, void *buffer, + unsigned int count) +{ + if (count) { + u64 *buf = buffer; + + do { + u64 x = __raw_readq(addr); + *buf++ = x; + } while (--count); + } +} +#endif +#endif /* CONFIG_64BIT */ + +#ifndef writesb +#define writesb writesb +static inline void writesb(volatile void __iomem *addr, const void *buffer, + unsigned int count) +{ + if (count) { + const u8 *buf = buffer; + + do { + __raw_writeb(*buf++, addr); + } while (--count); + } +} +#endif + +#ifndef writesw +#define writesw writesw +static inline void writesw(volatile void __iomem *addr, const void *buffer, + unsigned int count) +{ + if (count) { + const u16 *buf = buffer; + + do { + __raw_writew(*buf++, addr); + } while (--count); + } +} +#endif + +#ifndef writesl +#define writesl writesl +static inline void writesl(volatile void __iomem *addr, const void *buffer, + unsigned int count) +{ + if (count) { + const u32 *buf = buffer; + + do { + __raw_writel(*buf++, addr); + } while (--count); + } +} +#endif + +#ifdef CONFIG_64BIT +#ifndef writesq +#define writesq writesq +static inline void writesq(volatile void __iomem *addr, const void *buffer, + unsigned int count) +{ + if (count) { + const u64 *buf = buffer; + + do { + __raw_writeq(*buf++, addr); + } while (--count); + } +} +#endif +#endif /* CONFIG_64BIT */ + +#ifndef PCI_IOBASE +#define PCI_IOBASE ((void __iomem *)0) +#endif + +#ifndef IO_SPACE_LIMIT +#define IO_SPACE_LIMIT 0xffff +#endif + +/* + * {in,out}{b,w,l}() access little endian I/O. {in,out}{b,w,l}_p() can be + * implemented on hardware that needs an additional delay for I/O accesses to + * take effect. + */ + +#ifndef inb +#define inb inb +static inline u8 inb(unsigned long addr) +{ + return readb(PCI_IOBASE + addr); +} +#endif + +#ifndef inw +#define inw inw +static inline u16 inw(unsigned long addr) +{ + return readw(PCI_IOBASE + addr); +} +#endif + +#ifndef inl +#define inl inl +static inline u32 inl(unsigned long addr) +{ + return readl(PCI_IOBASE + addr); +} +#endif + +#ifndef outb +#define outb outb +static inline void outb(u8 value, unsigned long addr) +{ + writeb(value, PCI_IOBASE + addr); +} +#endif + +#ifndef outw +#define outw outw +static inline void outw(u16 value, unsigned long addr) +{ + writew(value, PCI_IOBASE + addr); +} +#endif + +#ifndef outl +#define outl outl +static inline void outl(u32 value, unsigned long addr) +{ + writel(value, PCI_IOBASE + addr); +} +#endif + +#ifndef inb_p +#define inb_p inb_p +static inline u8 inb_p(unsigned long addr) +{ + return inb(addr); +} +#endif + +#ifndef inw_p +#define inw_p inw_p +static inline u16 inw_p(unsigned long addr) +{ + return inw(addr); +} +#endif + +#ifndef inl_p +#define inl_p inl_p +static inline u32 inl_p(unsigned long addr) +{ + return inl(addr); +} +#endif + +#ifndef outb_p +#define outb_p outb_p +static inline void outb_p(u8 value, unsigned long addr) +{ + outb(value, addr); +} +#endif + +#ifndef outw_p +#define outw_p outw_p +static inline void outw_p(u16 value, unsigned long addr) +{ + outw(value, addr); +} +#endif + +#ifndef outl_p +#define outl_p outl_p +static inline void outl_p(u32 value, unsigned long addr) +{ + outl(value, addr); +} +#endif + +/* + * {in,out}s{b,w,l}{,_p}() are variants of the above that repeatedly access a + * single I/O port multiple times. + */ + +#ifndef insb +#define insb insb +static inline void insb(unsigned long addr, void *buffer, unsigned int count) +{ + readsb(PCI_IOBASE + addr, buffer, count); +} +#endif + +#ifndef insw +#define insw insw +static inline void insw(unsigned long addr, void *buffer, unsigned int count) +{ + readsw(PCI_IOBASE + addr, buffer, count); +} +#endif + +#ifndef insl +#define insl insl +static inline void insl(unsigned long addr, void *buffer, unsigned int count) +{ + readsl(PCI_IOBASE + addr, buffer, count); +} +#endif + +#ifndef outsb +#define outsb outsb +static inline void outsb(unsigned long addr, const void *buffer, + unsigned int count) +{ + writesb(PCI_IOBASE + addr, buffer, count); +} +#endif + +#ifndef outsw +#define outsw outsw +static inline void outsw(unsigned long addr, const void *buffer, + unsigned int count) +{ + writesw(PCI_IOBASE + addr, buffer, count); +} +#endif + +#ifndef outsl +#define outsl outsl +static inline void outsl(unsigned long addr, const void *buffer, + unsigned int count) +{ + writesl(PCI_IOBASE + addr, buffer, count); +} +#endif + +#ifndef insb_p +#define insb_p insb_p +static inline void insb_p(unsigned long addr, void *buffer, unsigned int count) +{ + insb(addr, buffer, count); +} +#endif + +#ifndef insw_p +#define insw_p insw_p +static inline void insw_p(unsigned long addr, void *buffer, unsigned int count) +{ + insw(addr, buffer, count); +} +#endif + +#ifndef insl_p +#define insl_p insl_p +static inline void insl_p(unsigned long addr, void *buffer, unsigned int count) +{ + insl(addr, buffer, count); +} +#endif + +#ifndef outsb_p +#define outsb_p outsb_p +static inline void outsb_p(unsigned long addr, const void *buffer, + unsigned int count) +{ + outsb(addr, buffer, count); +} +#endif + +#ifndef outsw_p +#define outsw_p outsw_p +static inline void outsw_p(unsigned long addr, const void *buffer, + unsigned int count) +{ + outsw(addr, buffer, count); +} +#endif + +#ifndef outsl_p +#define outsl_p outsl_p +static inline void outsl_p(unsigned long addr, const void *buffer, + unsigned int count) +{ + outsl(addr, buffer, count); +} +#endif + +#ifndef CONFIG_GENERIC_IOMAP +#ifndef ioread8 +#define ioread8 ioread8 +static inline u8 ioread8(const volatile void __iomem *addr) +{ + return readb(addr); +} +#endif + +#ifndef ioread16 +#define ioread16 ioread16 +static inline u16 ioread16(const volatile void __iomem *addr) +{ + return readw(addr); +} +#endif + +#ifndef ioread32 +#define ioread32 ioread32 +static inline u32 ioread32(const volatile void __iomem *addr) +{ + return readl(addr); +} +#endif + +#ifndef iowrite8 +#define iowrite8 iowrite8 +static inline void iowrite8(u8 value, volatile void __iomem *addr) +{ + writeb(value, addr); +} +#endif + +#ifndef iowrite16 +#define iowrite16 iowrite16 +static inline void iowrite16(u16 value, volatile void __iomem *addr) +{ + writew(value, addr); +} +#endif + +#ifndef iowrite32 +#define iowrite32 iowrite32 +static inline void iowrite32(u32 value, volatile void __iomem *addr) +{ + writel(value, addr); +} +#endif + +#ifndef ioread16be +#define ioread16be ioread16be +static inline u16 ioread16be(const volatile void __iomem *addr) +{ + return __be16_to_cpu(__raw_readw(addr)); +} +#endif + +#ifndef ioread32be +#define ioread32be ioread32be +static inline u32 ioread32be(const volatile void __iomem *addr) +{ + return __be32_to_cpu(__raw_readl(addr)); +} +#endif + +#ifndef iowrite16be +#define iowrite16be iowrite16be +static inline void iowrite16be(u16 value, void volatile __iomem *addr) +{ + __raw_writew(__cpu_to_be16(value), addr); +} +#endif + +#ifndef iowrite32be +#define iowrite32be iowrite32be +static inline void iowrite32be(u32 value, volatile void __iomem *addr) +{ + __raw_writel(__cpu_to_be32(value), addr); +} +#endif + +#ifndef ioread8_rep +#define ioread8_rep ioread8_rep +static inline void ioread8_rep(const volatile void __iomem *addr, void *buffer, + unsigned int count) +{ + readsb(addr, buffer, count); +} +#endif + +#ifndef ioread16_rep +#define ioread16_rep ioread16_rep +static inline void ioread16_rep(const volatile void __iomem *addr, + void *buffer, unsigned int count) +{ + readsw(addr, buffer, count); +} +#endif + +#ifndef ioread32_rep +#define ioread32_rep ioread32_rep +static inline void ioread32_rep(const volatile void __iomem *addr, + void *buffer, unsigned int count) +{ + readsl(addr, buffer, count); +} +#endif + +#ifndef iowrite8_rep +#define iowrite8_rep iowrite8_rep +static inline void iowrite8_rep(volatile void __iomem *addr, + const void *buffer, + unsigned int count) +{ + writesb(addr, buffer, count); +} +#endif + +#ifndef iowrite16_rep +#define iowrite16_rep iowrite16_rep +static inline void iowrite16_rep(volatile void __iomem *addr, + const void *buffer, + unsigned int count) +{ + writesw(addr, buffer, count); +} +#endif + +#ifndef iowrite32_rep +#define iowrite32_rep iowrite32_rep +static inline void iowrite32_rep(volatile void __iomem *addr, + const void *buffer, + unsigned int count) +{ + writesl(addr, buffer, count); +} +#endif +#endif /* CONFIG_GENERIC_IOMAP */ + +#ifdef __KERNEL__ + +#include <linux/vmalloc.h> +#define __io_virt(x) ((void __force *)(x)) + +#ifndef CONFIG_GENERIC_IOMAP +struct pci_dev; +extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); + +#ifndef pci_iounmap +#define pci_iounmap pci_iounmap +static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p) +{ +} +#endif +#endif /* CONFIG_GENERIC_IOMAP */ + +/* + * Change virtual addresses to physical addresses and vv. + * These are pretty trivial + */ +#ifndef virt_to_phys +#define virt_to_phys virt_to_phys +static inline unsigned long virt_to_phys(volatile void *address) +{ + return __pa((unsigned long)address); +} +#endif + +#ifndef phys_to_virt +#define phys_to_virt phys_to_virt +static inline void *phys_to_virt(unsigned long address) +{ + return __va(address); +} +#endif + +/* + * Change "struct page" to physical address. + * + * This implementation is for the no-MMU case only... if you have an MMU + * you'll need to provide your own definitions. + */ + +#ifndef CONFIG_MMU +#ifndef ioremap +#define ioremap ioremap +static inline void __iomem *ioremap(phys_addr_t offset, size_t size) +{ + return (void __iomem *)(unsigned long)offset; +} +#endif + +#ifndef __ioremap +#define __ioremap __ioremap +static inline void __iomem *__ioremap(phys_addr_t offset, size_t size, + unsigned long flags) +{ + return ioremap(offset, size); +} +#endif + +#ifndef ioremap_nocache +#define ioremap_nocache ioremap_nocache +static inline void __iomem *ioremap_nocache(phys_addr_t offset, size_t size) +{ + return ioremap(offset, size); +} +#endif + +#ifndef ioremap_wc +#define ioremap_wc ioremap_wc +static inline void __iomem *ioremap_wc(phys_addr_t offset, size_t size) +{ + return ioremap_nocache(offset, size); +} +#endif + +#ifndef iounmap +#define iounmap iounmap +static inline void iounmap(void __iomem *addr) +{ +} +#endif +#endif /* CONFIG_MMU */ + +#ifdef CONFIG_HAS_IOPORT_MAP +#ifndef CONFIG_GENERIC_IOMAP +#ifndef ioport_map +#define ioport_map ioport_map +static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) +{ + return PCI_IOBASE + (port & IO_SPACE_LIMIT); +} +#endif + +#ifndef ioport_unmap +#define ioport_unmap ioport_unmap +static inline void ioport_unmap(void __iomem *p) +{ +} +#endif +#else /* CONFIG_GENERIC_IOMAP */ +extern void __iomem *ioport_map(unsigned long port, unsigned int nr); +extern void ioport_unmap(void __iomem *p); +#endif /* CONFIG_GENERIC_IOMAP */ +#endif /* CONFIG_HAS_IOPORT_MAP */ + +#ifndef xlate_dev_kmem_ptr +#define xlate_dev_kmem_ptr xlate_dev_kmem_ptr +static inline void *xlate_dev_kmem_ptr(void *addr) +{ + return addr; +} +#endif + +#ifndef xlate_dev_mem_ptr +#define xlate_dev_mem_ptr xlate_dev_mem_ptr +static inline void *xlate_dev_mem_ptr(phys_addr_t addr) +{ + return __va(addr); +} +#endif + +#ifndef unxlate_dev_mem_ptr +#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr +static inline void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) +{ +} +#endif + +#ifdef CONFIG_VIRT_TO_BUS +#ifndef virt_to_bus +static inline unsigned long virt_to_bus(void *address) +{ + return (unsigned long)address; +} + +static inline void *bus_to_virt(unsigned long address) +{ + return (void *)address; +} +#endif +#endif + +#ifndef memset_io +#define memset_io memset_io +static inline void memset_io(volatile void __iomem *addr, int value, + size_t size) +{ + memset(__io_virt(addr), value, size); +} +#endif + +#ifndef memcpy_fromio +#define memcpy_fromio memcpy_fromio +static inline void memcpy_fromio(void *buffer, + const volatile void __iomem *addr, + size_t size) +{ + memcpy(buffer, __io_virt(addr), size); +} +#endif + +#ifndef memcpy_toio +#define memcpy_toio memcpy_toio +static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer, + size_t size) +{ + memcpy(__io_virt(addr), buffer, size); +} +#endif + +#endif /* __KERNEL__ */ + +#endif /* __ASM_GENERIC_IO_H */ diff --git a/include/asm-generic/ioctl.h b/include/asm-generic/ioctl.h new file mode 100644 index 000000000..297fb0d7c --- /dev/null +++ b/include/asm-generic/ioctl.h @@ -0,0 +1,17 @@ +#ifndef _ASM_GENERIC_IOCTL_H +#define _ASM_GENERIC_IOCTL_H + +#include <uapi/asm-generic/ioctl.h> + +#ifdef __CHECKER__ +#define _IOC_TYPECHECK(t) (sizeof(t)) +#else +/* provoke compile error for invalid uses of size argument */ +extern unsigned int __invalid_size_argument_for_IOC; +#define _IOC_TYPECHECK(t) \ + ((sizeof(t) == sizeof(t[1]) && \ + sizeof(t) < (1 << _IOC_SIZEBITS)) ? \ + sizeof(t) : __invalid_size_argument_for_IOC) +#endif + +#endif /* _ASM_GENERIC_IOCTL_H */ diff --git a/include/asm-generic/iomap.h b/include/asm-generic/iomap.h new file mode 100644 index 000000000..1b4101164 --- /dev/null +++ b/include/asm-generic/iomap.h @@ -0,0 +1,81 @@ +#ifndef __GENERIC_IO_H +#define __GENERIC_IO_H + +#include <linux/linkage.h> +#include <asm/byteorder.h> + +/* + * These are the "generic" interfaces for doing new-style + * memory-mapped or PIO accesses. Architectures may do + * their own arch-optimized versions, these just act as + * wrappers around the old-style IO register access functions: + * read[bwl]/write[bwl]/in[bwl]/out[bwl] + * + * Don't include this directly, include it from <asm/io.h>. + */ + +/* + * Read/write from/to an (offsettable) iomem cookie. It might be a PIO + * access or a MMIO access, these functions don't care. The info is + * encoded in the hardware mapping set up by the mapping functions + * (or the cookie itself, depending on implementation and hw). + * + * The generic routines just encode the PIO/MMIO as part of the + * cookie, and coldly assume that the MMIO IO mappings are not + * in the low address range. Architectures for which this is not + * true can't use this generic implementation. + */ +extern unsigned int ioread8(void __iomem *); +extern unsigned int ioread16(void __iomem *); +extern unsigned int ioread16be(void __iomem *); +extern unsigned int ioread32(void __iomem *); +extern unsigned int ioread32be(void __iomem *); + +extern void iowrite8(u8, void __iomem *); +extern void iowrite16(u16, void __iomem *); +extern void iowrite16be(u16, void __iomem *); +extern void iowrite32(u32, void __iomem *); +extern void iowrite32be(u32, void __iomem *); + +/* + * "string" versions of the above. Note that they + * use native byte ordering for the accesses (on + * the assumption that IO and memory agree on a + * byte order, and CPU byteorder is irrelevant). + * + * They do _not_ update the port address. If you + * want MMIO that copies stuff laid out in MMIO + * memory across multiple ports, use "memcpy_toio()" + * and friends. + */ +extern void ioread8_rep(void __iomem *port, void *buf, unsigned long count); +extern void ioread16_rep(void __iomem *port, void *buf, unsigned long count); +extern void ioread32_rep(void __iomem *port, void *buf, unsigned long count); + +extern void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count); +extern void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count); +extern void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count); + +#ifdef CONFIG_HAS_IOPORT_MAP +/* Create a virtual mapping cookie for an IO port range */ +extern void __iomem *ioport_map(unsigned long port, unsigned int nr); +extern void ioport_unmap(void __iomem *); +#endif + +#ifndef ARCH_HAS_IOREMAP_WC +#define ioremap_wc ioremap_nocache +#endif + +#ifdef CONFIG_PCI +/* Destroy a virtual mapping cookie for a PCI BAR (memory or IO) */ +struct pci_dev; +extern void pci_iounmap(struct pci_dev *dev, void __iomem *); +#elif defined(CONFIG_GENERIC_IOMAP) +struct pci_dev; +static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) +{ } +#endif + +#include <asm-generic/pci_iomap.h> + +#endif diff --git a/include/asm-generic/irq.h b/include/asm-generic/irq.h new file mode 100644 index 000000000..b90ec0bc4 --- /dev/null +++ b/include/asm-generic/irq.h @@ -0,0 +1,18 @@ +#ifndef __ASM_GENERIC_IRQ_H +#define __ASM_GENERIC_IRQ_H + +/* + * NR_IRQS is the upper bound of how many interrupts can be handled + * in the platform. It is used to size the static irq_map array, + * so don't make it too big. + */ +#ifndef NR_IRQS +#define NR_IRQS 64 +#endif + +static inline int irq_canonicalize(int irq) +{ + return irq; +} + +#endif /* __ASM_GENERIC_IRQ_H */ diff --git a/include/asm-generic/irq_regs.h b/include/asm-generic/irq_regs.h new file mode 100644 index 000000000..6bf9355fa --- /dev/null +++ b/include/asm-generic/irq_regs.h @@ -0,0 +1,37 @@ +/* Fallback per-CPU frame pointer holder + * + * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _ASM_GENERIC_IRQ_REGS_H +#define _ASM_GENERIC_IRQ_REGS_H + +#include <linux/percpu.h> + +/* + * Per-cpu current frame pointer - the location of the last exception frame on + * the stack + */ +DECLARE_PER_CPU(struct pt_regs *, __irq_regs); + +static inline struct pt_regs *get_irq_regs(void) +{ + return __this_cpu_read(__irq_regs); +} + +static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs) +{ + struct pt_regs *old_regs; + + old_regs = __this_cpu_read(__irq_regs); + __this_cpu_write(__irq_regs, new_regs); + return old_regs; +} + +#endif /* _ASM_GENERIC_IRQ_REGS_H */ diff --git a/include/asm-generic/irq_work.h b/include/asm-generic/irq_work.h new file mode 100644 index 000000000..a44f452c6 --- /dev/null +++ b/include/asm-generic/irq_work.h @@ -0,0 +1,10 @@ +#ifndef __ASM_IRQ_WORK_H +#define __ASM_IRQ_WORK_H + +static inline bool arch_irq_work_has_interrupt(void) +{ + return false; +} + +#endif /* __ASM_IRQ_WORK_H */ + diff --git a/include/asm-generic/irqflags.h b/include/asm-generic/irqflags.h new file mode 100644 index 000000000..1f40d0024 --- /dev/null +++ b/include/asm-generic/irqflags.h @@ -0,0 +1,66 @@ +#ifndef __ASM_GENERIC_IRQFLAGS_H +#define __ASM_GENERIC_IRQFLAGS_H + +/* + * All architectures should implement at least the first two functions, + * usually inline assembly will be the best way. + */ +#ifndef ARCH_IRQ_DISABLED +#define ARCH_IRQ_DISABLED 0 +#define ARCH_IRQ_ENABLED 1 +#endif + +/* read interrupt enabled status */ +#ifndef arch_local_save_flags +unsigned long arch_local_save_flags(void); +#endif + +/* set interrupt enabled status */ +#ifndef arch_local_irq_restore +void arch_local_irq_restore(unsigned long flags); +#endif + +/* get status and disable interrupts */ +#ifndef arch_local_irq_save +static inline unsigned long arch_local_irq_save(void) +{ + unsigned long flags; + flags = arch_local_save_flags(); + arch_local_irq_restore(ARCH_IRQ_DISABLED); + return flags; +} +#endif + +/* test flags */ +#ifndef arch_irqs_disabled_flags +static inline int arch_irqs_disabled_flags(unsigned long flags) +{ + return flags == ARCH_IRQ_DISABLED; +} +#endif + +/* unconditionally enable interrupts */ +#ifndef arch_local_irq_enable +static inline void arch_local_irq_enable(void) +{ + arch_local_irq_restore(ARCH_IRQ_ENABLED); +} +#endif + +/* unconditionally disable interrupts */ +#ifndef arch_local_irq_disable +static inline void arch_local_irq_disable(void) +{ + arch_local_irq_restore(ARCH_IRQ_DISABLED); +} +#endif + +/* test hardware interrupt enable bit */ +#ifndef arch_irqs_disabled +static inline int arch_irqs_disabled(void) +{ + return arch_irqs_disabled_flags(arch_local_save_flags()); +} +#endif + +#endif /* __ASM_GENERIC_IRQFLAGS_H */ diff --git a/include/asm-generic/kdebug.h b/include/asm-generic/kdebug.h new file mode 100644 index 000000000..d1814497b --- /dev/null +++ b/include/asm-generic/kdebug.h @@ -0,0 +1,9 @@ +#ifndef _ASM_GENERIC_KDEBUG_H +#define _ASM_GENERIC_KDEBUG_H + +enum die_val { + DIE_UNUSED, + DIE_OOPS = 1, +}; + +#endif /* _ASM_GENERIC_KDEBUG_H */ diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h new file mode 100644 index 000000000..90f99c74d --- /dev/null +++ b/include/asm-generic/kmap_types.h @@ -0,0 +1,10 @@ +#ifndef _ASM_GENERIC_KMAP_TYPES_H +#define _ASM_GENERIC_KMAP_TYPES_H + +#ifdef __WITH_KM_FENCE +# define KM_TYPE_NR 41 +#else +# define KM_TYPE_NR 20 +#endif + +#endif diff --git a/include/asm-generic/kvm_para.h b/include/asm-generic/kvm_para.h new file mode 100644 index 000000000..fa25becbd --- /dev/null +++ b/include/asm-generic/kvm_para.h @@ -0,0 +1,26 @@ +#ifndef _ASM_GENERIC_KVM_PARA_H +#define _ASM_GENERIC_KVM_PARA_H + +#include <uapi/asm-generic/kvm_para.h> + + +/* + * This function is used by architectures that support kvm to avoid issuing + * false soft lockup messages. + */ +static inline bool kvm_check_and_clear_guest_paused(void) +{ + return false; +} + +static inline unsigned int kvm_arch_para_features(void) +{ + return 0; +} + +static inline bool kvm_para_available(void) +{ + return false; +} + +#endif diff --git a/include/asm-generic/libata-portmap.h b/include/asm-generic/libata-portmap.h new file mode 100644 index 000000000..cf14f2ff4 --- /dev/null +++ b/include/asm-generic/libata-portmap.h @@ -0,0 +1,7 @@ +#ifndef __ASM_GENERIC_LIBATA_PORTMAP_H +#define __ASM_GENERIC_LIBATA_PORTMAP_H + +#define ATA_PRIMARY_IRQ(dev) 14 +#define ATA_SECONDARY_IRQ(dev) 15 + +#endif diff --git a/include/asm-generic/linkage.h b/include/asm-generic/linkage.h new file mode 100644 index 000000000..fef7a01e5 --- /dev/null +++ b/include/asm-generic/linkage.h @@ -0,0 +1,8 @@ +#ifndef __ASM_GENERIC_LINKAGE_H +#define __ASM_GENERIC_LINKAGE_H +/* + * linux/linkage.h provides reasonable defaults. + * an architecture can override them by providing its own version. + */ + +#endif /* __ASM_GENERIC_LINKAGE_H */ diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h new file mode 100644 index 000000000..9ceb03b4f --- /dev/null +++ b/include/asm-generic/local.h @@ -0,0 +1,55 @@ +#ifndef _ASM_GENERIC_LOCAL_H +#define _ASM_GENERIC_LOCAL_H + +#include <linux/percpu.h> +#include <linux/atomic.h> +#include <asm/types.h> + +/* + * A signed long type for operations which are atomic for a single CPU. + * Usually used in combination with per-cpu variables. + * + * This is the default implementation, which uses atomic_long_t. Which is + * rather pointless. The whole point behind local_t is that some processors + * can perform atomic adds and subtracts in a manner which is atomic wrt IRQs + * running on this CPU. local_t allows exploitation of such capabilities. + */ + +/* Implement in terms of atomics. */ + +/* Don't use typedef: don't want them to be mixed with atomic_t's. */ +typedef struct +{ + atomic_long_t a; +} local_t; + +#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) } + +#define local_read(l) atomic_long_read(&(l)->a) +#define local_set(l,i) atomic_long_set((&(l)->a),(i)) +#define local_inc(l) atomic_long_inc(&(l)->a) +#define local_dec(l) atomic_long_dec(&(l)->a) +#define local_add(i,l) atomic_long_add((i),(&(l)->a)) +#define local_sub(i,l) atomic_long_sub((i),(&(l)->a)) + +#define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a)) +#define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a) +#define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a) +#define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a)) +#define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a)) +#define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a)) +#define local_inc_return(l) atomic_long_inc_return(&(l)->a) + +#define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n)) +#define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n)) +#define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u)) +#define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a) + +/* Non-atomic variants, ie. preemption disabled and won't be touched + * in interrupt, etc. Some archs can optimize this case well. */ +#define __local_inc(l) local_set((l), local_read(l) + 1) +#define __local_dec(l) local_set((l), local_read(l) - 1) +#define __local_add(i,l) local_set((l), local_read(l) + (i)) +#define __local_sub(i,l) local_set((l), local_read(l) - (i)) + +#endif /* _ASM_GENERIC_LOCAL_H */ diff --git a/include/asm-generic/local64.h b/include/asm-generic/local64.h new file mode 100644 index 000000000..5980002b8 --- /dev/null +++ b/include/asm-generic/local64.h @@ -0,0 +1,96 @@ +#ifndef _ASM_GENERIC_LOCAL64_H +#define _ASM_GENERIC_LOCAL64_H + +#include <linux/percpu.h> +#include <asm/types.h> + +/* + * A signed long type for operations which are atomic for a single CPU. + * Usually used in combination with per-cpu variables. + * + * This is the default implementation, which uses atomic64_t. Which is + * rather pointless. The whole point behind local64_t is that some processors + * can perform atomic adds and subtracts in a manner which is atomic wrt IRQs + * running on this CPU. local64_t allows exploitation of such capabilities. + */ + +/* Implement in terms of atomics. */ + +#if BITS_PER_LONG == 64 + +#include <asm/local.h> + +typedef struct { + local_t a; +} local64_t; + +#define LOCAL64_INIT(i) { LOCAL_INIT(i) } + +#define local64_read(l) local_read(&(l)->a) +#define local64_set(l,i) local_set((&(l)->a),(i)) +#define local64_inc(l) local_inc(&(l)->a) +#define local64_dec(l) local_dec(&(l)->a) +#define local64_add(i,l) local_add((i),(&(l)->a)) +#define local64_sub(i,l) local_sub((i),(&(l)->a)) + +#define local64_sub_and_test(i, l) local_sub_and_test((i), (&(l)->a)) +#define local64_dec_and_test(l) local_dec_and_test(&(l)->a) +#define local64_inc_and_test(l) local_inc_and_test(&(l)->a) +#define local64_add_negative(i, l) local_add_negative((i), (&(l)->a)) +#define local64_add_return(i, l) local_add_return((i), (&(l)->a)) +#define local64_sub_return(i, l) local_sub_return((i), (&(l)->a)) +#define local64_inc_return(l) local_inc_return(&(l)->a) + +#define local64_cmpxchg(l, o, n) local_cmpxchg((&(l)->a), (o), (n)) +#define local64_xchg(l, n) local_xchg((&(l)->a), (n)) +#define local64_add_unless(l, _a, u) local_add_unless((&(l)->a), (_a), (u)) +#define local64_inc_not_zero(l) local_inc_not_zero(&(l)->a) + +/* Non-atomic variants, ie. preemption disabled and won't be touched + * in interrupt, etc. Some archs can optimize this case well. */ +#define __local64_inc(l) local64_set((l), local64_read(l) + 1) +#define __local64_dec(l) local64_set((l), local64_read(l) - 1) +#define __local64_add(i,l) local64_set((l), local64_read(l) + (i)) +#define __local64_sub(i,l) local64_set((l), local64_read(l) - (i)) + +#else /* BITS_PER_LONG != 64 */ + +#include <linux/atomic.h> + +/* Don't use typedef: don't want them to be mixed with atomic_t's. */ +typedef struct { + atomic64_t a; +} local64_t; + +#define LOCAL64_INIT(i) { ATOMIC_LONG_INIT(i) } + +#define local64_read(l) atomic64_read(&(l)->a) +#define local64_set(l,i) atomic64_set((&(l)->a),(i)) +#define local64_inc(l) atomic64_inc(&(l)->a) +#define local64_dec(l) atomic64_dec(&(l)->a) +#define local64_add(i,l) atomic64_add((i),(&(l)->a)) +#define local64_sub(i,l) atomic64_sub((i),(&(l)->a)) + +#define local64_sub_and_test(i, l) atomic64_sub_and_test((i), (&(l)->a)) +#define local64_dec_and_test(l) atomic64_dec_and_test(&(l)->a) +#define local64_inc_and_test(l) atomic64_inc_and_test(&(l)->a) +#define local64_add_negative(i, l) atomic64_add_negative((i), (&(l)->a)) +#define local64_add_return(i, l) atomic64_add_return((i), (&(l)->a)) +#define local64_sub_return(i, l) atomic64_sub_return((i), (&(l)->a)) +#define local64_inc_return(l) atomic64_inc_return(&(l)->a) + +#define local64_cmpxchg(l, o, n) atomic64_cmpxchg((&(l)->a), (o), (n)) +#define local64_xchg(l, n) atomic64_xchg((&(l)->a), (n)) +#define local64_add_unless(l, _a, u) atomic64_add_unless((&(l)->a), (_a), (u)) +#define local64_inc_not_zero(l) atomic64_inc_not_zero(&(l)->a) + +/* Non-atomic variants, ie. preemption disabled and won't be touched + * in interrupt, etc. Some archs can optimize this case well. */ +#define __local64_inc(l) local64_set((l), local64_read(l) + 1) +#define __local64_dec(l) local64_set((l), local64_read(l) - 1) +#define __local64_add(i,l) local64_set((l), local64_read(l) + (i)) +#define __local64_sub(i,l) local64_set((l), local64_read(l) - (i)) + +#endif /* BITS_PER_LONG != 64 */ + +#endif /* _ASM_GENERIC_LOCAL64_H */ diff --git a/include/asm-generic/mcs_spinlock.h b/include/asm-generic/mcs_spinlock.h new file mode 100644 index 000000000..10cd4ffc6 --- /dev/null +++ b/include/asm-generic/mcs_spinlock.h @@ -0,0 +1,13 @@ +#ifndef __ASM_MCS_SPINLOCK_H +#define __ASM_MCS_SPINLOCK_H + +/* + * Architectures can define their own: + * + * arch_mcs_spin_lock_contended(l) + * arch_mcs_spin_unlock_contended(l) + * + * See kernel/locking/mcs_spinlock.c. + */ + +#endif /* __ASM_MCS_SPINLOCK_H */ diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h new file mode 100644 index 000000000..14909b0b9 --- /dev/null +++ b/include/asm-generic/memory_model.h @@ -0,0 +1,77 @@ +#ifndef __ASM_MEMORY_MODEL_H +#define __ASM_MEMORY_MODEL_H + +#ifndef __ASSEMBLY__ + +#if defined(CONFIG_FLATMEM) + +#ifndef ARCH_PFN_OFFSET +#define ARCH_PFN_OFFSET (0UL) +#endif + +#elif defined(CONFIG_DISCONTIGMEM) + +#ifndef arch_pfn_to_nid +#define arch_pfn_to_nid(pfn) pfn_to_nid(pfn) +#endif + +#ifndef arch_local_page_offset +#define arch_local_page_offset(pfn, nid) \ + ((pfn) - NODE_DATA(nid)->node_start_pfn) +#endif + +#endif /* CONFIG_DISCONTIGMEM */ + +/* + * supports 3 memory models. + */ +#if defined(CONFIG_FLATMEM) + +#define __pfn_to_page(pfn) (mem_map + ((pfn) - ARCH_PFN_OFFSET)) +#define __page_to_pfn(page) ((unsigned long)((page) - mem_map) + \ + ARCH_PFN_OFFSET) +#elif defined(CONFIG_DISCONTIGMEM) + +#define __pfn_to_page(pfn) \ +({ unsigned long __pfn = (pfn); \ + unsigned long __nid = arch_pfn_to_nid(__pfn); \ + NODE_DATA(__nid)->node_mem_map + arch_local_page_offset(__pfn, __nid);\ +}) + +#define __page_to_pfn(pg) \ +({ const struct page *__pg = (pg); \ + struct pglist_data *__pgdat = NODE_DATA(page_to_nid(__pg)); \ + (unsigned long)(__pg - __pgdat->node_mem_map) + \ + __pgdat->node_start_pfn; \ +}) + +#elif defined(CONFIG_SPARSEMEM_VMEMMAP) + +/* memmap is virtually contiguous. */ +#define __pfn_to_page(pfn) (vmemmap + (pfn)) +#define __page_to_pfn(page) (unsigned long)((page) - vmemmap) + +#elif defined(CONFIG_SPARSEMEM) +/* + * Note: section's mem_map is encoded to reflect its start_pfn. + * section[i].section_mem_map == mem_map's address - start_pfn; + */ +#define __page_to_pfn(pg) \ +({ const struct page *__pg = (pg); \ + int __sec = page_to_section(__pg); \ + (unsigned long)(__pg - __section_mem_map_addr(__nr_to_section(__sec))); \ +}) + +#define __pfn_to_page(pfn) \ +({ unsigned long __pfn = (pfn); \ + struct mem_section *__sec = __pfn_to_section(__pfn); \ + __section_mem_map_addr(__sec) + __pfn; \ +}) +#endif /* CONFIG_FLATMEM/DISCONTIGMEM/SPARSEMEM */ + +#define page_to_pfn __page_to_pfn +#define pfn_to_page __pfn_to_page + +#endif /* __ASSEMBLY__ */ + +#endif diff --git a/include/asm-generic/mm_hooks.h b/include/asm-generic/mm_hooks.h new file mode 100644 index 000000000..866aa461e --- /dev/null +++ b/include/asm-generic/mm_hooks.h @@ -0,0 +1,29 @@ +/* + * Define generic no-op hooks for arch_dup_mmap, arch_exit_mmap + * and arch_unmap to be included in asm-FOO/mmu_context.h for any + * arch FOO which doesn't need to hook these. + */ +#ifndef _ASM_GENERIC_MM_HOOKS_H +#define _ASM_GENERIC_MM_HOOKS_H + +static inline void arch_dup_mmap(struct mm_struct *oldmm, + struct mm_struct *mm) +{ +} + +static inline void arch_exit_mmap(struct mm_struct *mm) +{ +} + +static inline void arch_unmap(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ +} + +static inline void arch_bprm_mm_init(struct mm_struct *mm, + struct vm_area_struct *vma) +{ +} + +#endif /* _ASM_GENERIC_MM_HOOKS_H */ diff --git a/include/asm-generic/mmu.h b/include/asm-generic/mmu.h new file mode 100644 index 000000000..0ed3f1cfb --- /dev/null +++ b/include/asm-generic/mmu.h @@ -0,0 +1,19 @@ +#ifndef __ASM_GENERIC_MMU_H +#define __ASM_GENERIC_MMU_H + +/* + * This is the mmu.h header for nommu implementations. + * Architectures with an MMU need something more complex. + */ +#ifndef __ASSEMBLY__ +typedef struct { + unsigned long end_brk; + +#ifdef CONFIG_BINFMT_ELF_FDPIC + unsigned long exec_fdpic_loadmap; + unsigned long interp_fdpic_loadmap; +#endif +} mm_context_t; +#endif + +#endif /* __ASM_GENERIC_MMU_H */ diff --git a/include/asm-generic/mmu_context.h b/include/asm-generic/mmu_context.h new file mode 100644 index 000000000..a7eec910b --- /dev/null +++ b/include/asm-generic/mmu_context.h @@ -0,0 +1,45 @@ +#ifndef __ASM_GENERIC_MMU_CONTEXT_H +#define __ASM_GENERIC_MMU_CONTEXT_H + +/* + * Generic hooks for NOMMU architectures, which do not need to do + * anything special here. + */ + +#include <asm-generic/mm_hooks.h> + +struct task_struct; +struct mm_struct; + +static inline void enter_lazy_tlb(struct mm_struct *mm, + struct task_struct *tsk) +{ +} + +static inline int init_new_context(struct task_struct *tsk, + struct mm_struct *mm) +{ + return 0; +} + +static inline void destroy_context(struct mm_struct *mm) +{ +} + +static inline void deactivate_mm(struct task_struct *task, + struct mm_struct *mm) +{ +} + +static inline void switch_mm(struct mm_struct *prev, + struct mm_struct *next, + struct task_struct *tsk) +{ +} + +static inline void activate_mm(struct mm_struct *prev_mm, + struct mm_struct *next_mm) +{ +} + +#endif /* __ASM_GENERIC_MMU_CONTEXT_H */ diff --git a/include/asm-generic/module.h b/include/asm-generic/module.h new file mode 100644 index 000000000..14dc41d18 --- /dev/null +++ b/include/asm-generic/module.h @@ -0,0 +1,48 @@ +#ifndef __ASM_GENERIC_MODULE_H +#define __ASM_GENERIC_MODULE_H + +/* + * Many architectures just need a simple module + * loader without arch specific data. + */ +#ifndef CONFIG_HAVE_MOD_ARCH_SPECIFIC +struct mod_arch_specific +{ +}; +#endif + +#ifdef CONFIG_64BIT +#define Elf_Shdr Elf64_Shdr +#define Elf_Phdr Elf64_Phdr +#define Elf_Sym Elf64_Sym +#define Elf_Dyn Elf64_Dyn +#define Elf_Ehdr Elf64_Ehdr +#define Elf_Addr Elf64_Addr +#ifdef CONFIG_MODULES_USE_ELF_REL +#define Elf_Rel Elf64_Rel +#endif +#ifdef CONFIG_MODULES_USE_ELF_RELA +#define Elf_Rela Elf64_Rela +#endif +#define ELF_R_TYPE(X) ELF64_R_TYPE(X) +#define ELF_R_SYM(X) ELF64_R_SYM(X) + +#else /* CONFIG_64BIT */ + +#define Elf_Shdr Elf32_Shdr +#define Elf_Phdr Elf32_Phdr +#define Elf_Sym Elf32_Sym +#define Elf_Dyn Elf32_Dyn +#define Elf_Ehdr Elf32_Ehdr +#define Elf_Addr Elf32_Addr +#ifdef CONFIG_MODULES_USE_ELF_REL +#define Elf_Rel Elf32_Rel +#endif +#ifdef CONFIG_MODULES_USE_ELF_RELA +#define Elf_Rela Elf32_Rela +#endif +#define ELF_R_TYPE(X) ELF32_R_TYPE(X) +#define ELF_R_SYM(X) ELF32_R_SYM(X) +#endif + +#endif /* __ASM_GENERIC_MODULE_H */ diff --git a/include/asm-generic/msi.h b/include/asm-generic/msi.h new file mode 100644 index 000000000..61c58d887 --- /dev/null +++ b/include/asm-generic/msi.h @@ -0,0 +1,32 @@ +#ifndef __ASM_GENERIC_MSI_H +#define __ASM_GENERIC_MSI_H + +#include <linux/types.h> + +#ifndef NUM_MSI_ALLOC_SCRATCHPAD_REGS +# define NUM_MSI_ALLOC_SCRATCHPAD_REGS 2 +#endif + +struct msi_desc; + +/** + * struct msi_alloc_info - Default structure for MSI interrupt allocation. + * @desc: Pointer to msi descriptor + * @hwirq: Associated hw interrupt number in the domain + * @scratchpad: Storage for implementation specific scratch data + * + * Architectures can provide their own implementation by not including + * asm-generic/msi.h into their arch specific header file. + */ +typedef struct msi_alloc_info { + struct msi_desc *desc; + irq_hw_number_t hwirq; + union { + unsigned long ul; + void *ptr; + } scratchpad[NUM_MSI_ALLOC_SCRATCHPAD_REGS]; +} msi_alloc_info_t; + +#define GENERIC_MSI_DOMAIN_OPS 1 + +#endif diff --git a/include/asm-generic/mutex-dec.h b/include/asm-generic/mutex-dec.h new file mode 100644 index 000000000..d4f9fb4e5 --- /dev/null +++ b/include/asm-generic/mutex-dec.h @@ -0,0 +1,88 @@ +/* + * include/asm-generic/mutex-dec.h + * + * Generic implementation of the mutex fastpath, based on atomic + * decrement/increment. + */ +#ifndef _ASM_GENERIC_MUTEX_DEC_H +#define _ASM_GENERIC_MUTEX_DEC_H + +/** + * __mutex_fastpath_lock - try to take the lock by moving the count + * from 1 to a 0 value + * @count: pointer of type atomic_t + * @fail_fn: function to call if the original value was not 1 + * + * Change the count from 1 to a value lower than 1, and call <fail_fn> if + * it wasn't 1 originally. This function MUST leave the value lower than + * 1 even when the "1" assertion wasn't true. + */ +static inline void +__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) +{ + if (unlikely(atomic_dec_return(count) < 0)) + fail_fn(count); +} + +/** + * __mutex_fastpath_lock_retval - try to take the lock by moving the count + * from 1 to a 0 value + * @count: pointer of type atomic_t + * + * Change the count from 1 to a value lower than 1. This function returns 0 + * if the fastpath succeeds, or -1 otherwise. + */ +static inline int +__mutex_fastpath_lock_retval(atomic_t *count) +{ + if (unlikely(atomic_dec_return(count) < 0)) + return -1; + return 0; +} + +/** + * __mutex_fastpath_unlock - try to promote the count from 0 to 1 + * @count: pointer of type atomic_t + * @fail_fn: function to call if the original value was not 0 + * + * Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>. + * In the failure case, this function is allowed to either set the value to + * 1, or to set it to a value lower than 1. + * + * If the implementation sets it to a value of lower than 1, then the + * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs + * to return 0 otherwise. + */ +static inline void +__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) +{ + if (unlikely(atomic_inc_return(count) <= 0)) + fail_fn(count); +} + +#define __mutex_slowpath_needs_to_unlock() 1 + +/** + * __mutex_fastpath_trylock - try to acquire the mutex, without waiting + * + * @count: pointer of type atomic_t + * @fail_fn: fallback function + * + * Change the count from 1 to a value lower than 1, and return 0 (failure) + * if it wasn't 1 originally, or return 1 (success) otherwise. This function + * MUST leave the value lower than 1 even when the "1" assertion wasn't true. + * Additionally, if the value was < 0 originally, this function must not leave + * it to 0 on failure. + * + * If the architecture has no effective trylock variant, it should call the + * <fail_fn> spinlock-based trylock variant unconditionally. + */ +static inline int +__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) +{ + if (likely(atomic_cmpxchg(count, 1, 0) == 1)) + return 1; + return 0; +} + +#endif diff --git a/include/asm-generic/mutex-null.h b/include/asm-generic/mutex-null.h new file mode 100644 index 000000000..61069ed33 --- /dev/null +++ b/include/asm-generic/mutex-null.h @@ -0,0 +1,19 @@ +/* + * include/asm-generic/mutex-null.h + * + * Generic implementation of the mutex fastpath, based on NOP :-) + * + * This is used by the mutex-debugging infrastructure, but it can also + * be used by architectures that (for whatever reason) want to use the + * spinlock based slowpath. + */ +#ifndef _ASM_GENERIC_MUTEX_NULL_H +#define _ASM_GENERIC_MUTEX_NULL_H + +#define __mutex_fastpath_lock(count, fail_fn) fail_fn(count) +#define __mutex_fastpath_lock_retval(count) (-1) +#define __mutex_fastpath_unlock(count, fail_fn) fail_fn(count) +#define __mutex_fastpath_trylock(count, fail_fn) fail_fn(count) +#define __mutex_slowpath_needs_to_unlock() 1 + +#endif diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h new file mode 100644 index 000000000..f169ec064 --- /dev/null +++ b/include/asm-generic/mutex-xchg.h @@ -0,0 +1,116 @@ +/* + * include/asm-generic/mutex-xchg.h + * + * Generic implementation of the mutex fastpath, based on xchg(). + * + * NOTE: An xchg based implementation might be less optimal than an atomic + * decrement/increment based implementation. If your architecture + * has a reasonable atomic dec/inc then you should probably use + * asm-generic/mutex-dec.h instead, or you could open-code an + * optimized version in asm/mutex.h. + */ +#ifndef _ASM_GENERIC_MUTEX_XCHG_H +#define _ASM_GENERIC_MUTEX_XCHG_H + +/** + * __mutex_fastpath_lock - try to take the lock by moving the count + * from 1 to a 0 value + * @count: pointer of type atomic_t + * @fail_fn: function to call if the original value was not 1 + * + * Change the count from 1 to a value lower than 1, and call <fail_fn> if it + * wasn't 1 originally. This function MUST leave the value lower than 1 + * even when the "1" assertion wasn't true. + */ +static inline void +__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) +{ + if (unlikely(atomic_xchg(count, 0) != 1)) + /* + * We failed to acquire the lock, so mark it contended + * to ensure that any waiting tasks are woken up by the + * unlock slow path. + */ + if (likely(atomic_xchg(count, -1) != 1)) + fail_fn(count); +} + +/** + * __mutex_fastpath_lock_retval - try to take the lock by moving the count + * from 1 to a 0 value + * @count: pointer of type atomic_t + * + * Change the count from 1 to a value lower than 1. This function returns 0 + * if the fastpath succeeds, or -1 otherwise. + */ +static inline int +__mutex_fastpath_lock_retval(atomic_t *count) +{ + if (unlikely(atomic_xchg(count, 0) != 1)) + if (likely(atomic_xchg(count, -1) != 1)) + return -1; + return 0; +} + +/** + * __mutex_fastpath_unlock - try to promote the mutex from 0 to 1 + * @count: pointer of type atomic_t + * @fail_fn: function to call if the original value was not 0 + * + * try to promote the mutex from 0 to 1. if it wasn't 0, call <function> + * In the failure case, this function is allowed to either set the value to + * 1, or to set it to a value lower than one. + * If the implementation sets it to a value of lower than one, the + * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs + * to return 0 otherwise. + */ +static inline void +__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) +{ + if (unlikely(atomic_xchg(count, 1) != 0)) + fail_fn(count); +} + +#define __mutex_slowpath_needs_to_unlock() 0 + +/** + * __mutex_fastpath_trylock - try to acquire the mutex, without waiting + * + * @count: pointer of type atomic_t + * @fail_fn: spinlock based trylock implementation + * + * Change the count from 1 to a value lower than 1, and return 0 (failure) + * if it wasn't 1 originally, or return 1 (success) otherwise. This function + * MUST leave the value lower than 1 even when the "1" assertion wasn't true. + * Additionally, if the value was < 0 originally, this function must not leave + * it to 0 on failure. + * + * If the architecture has no effective trylock variant, it should call the + * <fail_fn> spinlock-based trylock variant unconditionally. + */ +static inline int +__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) +{ + int prev = atomic_xchg(count, 0); + + if (unlikely(prev < 0)) { + /* + * The lock was marked contended so we must restore that + * state. If while doing so we get back a prev value of 1 + * then we just own it. + * + * [ In the rare case of the mutex going to 1, to 0, to -1 + * and then back to 0 in this few-instructions window, + * this has the potential to trigger the slowpath for the + * owner's unlock path needlessly, but that's not a problem + * in practice. ] + */ + prev = atomic_xchg(count, prev); + if (prev < 0) + prev = 0; + } + + return prev; +} + +#endif diff --git a/include/asm-generic/mutex.h b/include/asm-generic/mutex.h new file mode 100644 index 000000000..fe91ab502 --- /dev/null +++ b/include/asm-generic/mutex.h @@ -0,0 +1,9 @@ +#ifndef __ASM_GENERIC_MUTEX_H +#define __ASM_GENERIC_MUTEX_H +/* + * Pull in the generic implementation for the mutex fastpath, + * which is a reasonable default on many architectures. + */ + +#include <asm-generic/mutex-dec.h> +#endif /* __ASM_GENERIC_MUTEX_H */ diff --git a/include/asm-generic/page.h b/include/asm-generic/page.h new file mode 100644 index 000000000..37d1fe289 --- /dev/null +++ b/include/asm-generic/page.h @@ -0,0 +1,103 @@ +#ifndef __ASM_GENERIC_PAGE_H +#define __ASM_GENERIC_PAGE_H +/* + * Generic page.h implementation, for NOMMU architectures. + * This provides the dummy definitions for the memory management. + */ + +#ifdef CONFIG_MMU +#error need to prove a real asm/page.h +#endif + + +/* PAGE_SHIFT determines the page size */ + +#define PAGE_SHIFT 12 +#ifdef __ASSEMBLY__ +#define PAGE_SIZE (1 << PAGE_SHIFT) +#else +#define PAGE_SIZE (1UL << PAGE_SHIFT) +#endif +#define PAGE_MASK (~(PAGE_SIZE-1)) + +#include <asm/setup.h> + +#ifndef __ASSEMBLY__ + +#define get_user_page(vaddr) __get_free_page(GFP_KERNEL) +#define free_user_page(page, addr) free_page(addr) + +#define clear_page(page) memset((page), 0, PAGE_SIZE) +#define copy_page(to,from) memcpy((to), (from), PAGE_SIZE) + +#define clear_user_page(page, vaddr, pg) clear_page(page) +#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) + +/* + * These are used to make use of C type-checking.. + */ +typedef struct { + unsigned long pte; +} pte_t; +typedef struct { + unsigned long pmd[16]; +} pmd_t; +typedef struct { + unsigned long pgd; +} pgd_t; +typedef struct { + unsigned long pgprot; +} pgprot_t; +typedef struct page *pgtable_t; + +#define pte_val(x) ((x).pte) +#define pmd_val(x) ((&x)->pmd[0]) +#define pgd_val(x) ((x).pgd) +#define pgprot_val(x) ((x).pgprot) + +#define __pte(x) ((pte_t) { (x) } ) +#define __pmd(x) ((pmd_t) { (x) } ) +#define __pgd(x) ((pgd_t) { (x) } ) +#define __pgprot(x) ((pgprot_t) { (x) } ) + +extern unsigned long memory_start; +extern unsigned long memory_end; + +#endif /* !__ASSEMBLY__ */ + +#ifdef CONFIG_KERNEL_RAM_BASE_ADDRESS +#define PAGE_OFFSET (CONFIG_KERNEL_RAM_BASE_ADDRESS) +#else +#define PAGE_OFFSET (0) +#endif + +#ifndef ARCH_PFN_OFFSET +#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) +#endif + +#ifndef __ASSEMBLY__ + +#define __va(x) ((void *)((unsigned long) (x))) +#define __pa(x) ((unsigned long) (x)) + +#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) +#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT) + +#define virt_to_page(addr) pfn_to_page(virt_to_pfn(addr)) +#define page_to_virt(page) pfn_to_virt(page_to_pfn(page)) + +#ifndef page_to_phys +#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) +#endif + +#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr) + +#define virt_addr_valid(kaddr) (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \ + ((void *)(kaddr) < (void *)memory_end)) + +#endif /* __ASSEMBLY__ */ + +#include <asm-generic/memory_model.h> +#include <asm-generic/getorder.h> + +#endif /* __ASM_GENERIC_PAGE_H */ diff --git a/include/asm-generic/param.h b/include/asm-generic/param.h new file mode 100644 index 000000000..04e715bcc --- /dev/null +++ b/include/asm-generic/param.h @@ -0,0 +1,10 @@ +#ifndef __ASM_GENERIC_PARAM_H +#define __ASM_GENERIC_PARAM_H + +#include <uapi/asm-generic/param.h> + +# undef HZ +# define HZ CONFIG_HZ /* Internal kernel timer frequency */ +# define USER_HZ 100 /* some user interfaces are */ +# define CLOCKS_PER_SEC (USER_HZ) /* in "ticks" like times() */ +#endif /* __ASM_GENERIC_PARAM_H */ diff --git a/include/asm-generic/parport.h b/include/asm-generic/parport.h new file mode 100644 index 000000000..2c9f9d433 --- /dev/null +++ b/include/asm-generic/parport.h @@ -0,0 +1,23 @@ +#ifndef __ASM_GENERIC_PARPORT_H +#define __ASM_GENERIC_PARPORT_H + +/* + * An ISA bus may have i8255 parallel ports at well-known + * locations in the I/O space, which are scanned by + * parport_pc_find_isa_ports. + * + * Without ISA support, the driver will only attach + * to devices on the PCI bus. + */ + +static int parport_pc_find_isa_ports(int autoirq, int autodma); +static int parport_pc_find_nonpci_ports(int autoirq, int autodma) +{ +#ifdef CONFIG_ISA + return parport_pc_find_isa_ports(autoirq, autodma); +#else + return 0; +#endif +} + +#endif /* __ASM_GENERIC_PARPORT_H */ diff --git a/include/asm-generic/pci-bridge.h b/include/asm-generic/pci-bridge.h new file mode 100644 index 000000000..20db2e5a0 --- /dev/null +++ b/include/asm-generic/pci-bridge.h @@ -0,0 +1,74 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _ASM_GENERIC_PCI_BRIDGE_H +#define _ASM_GENERIC_PCI_BRIDGE_H + +#ifdef __KERNEL__ + +enum { + /* Force re-assigning all resources (ignore firmware + * setup completely) + */ + PCI_REASSIGN_ALL_RSRC = 0x00000001, + + /* Re-assign all bus numbers */ + PCI_REASSIGN_ALL_BUS = 0x00000002, + + /* Do not try to assign, just use existing setup */ + PCI_PROBE_ONLY = 0x00000004, + + /* Don't bother with ISA alignment unless the bridge has + * ISA forwarding enabled + */ + PCI_CAN_SKIP_ISA_ALIGN = 0x00000008, + + /* Enable domain numbers in /proc */ + PCI_ENABLE_PROC_DOMAINS = 0x00000010, + /* ... except for domain 0 */ + PCI_COMPAT_DOMAIN_0 = 0x00000020, + + /* PCIe downstream ports are bridges that normally lead to only a + * device 0, but if this is set, we scan all possible devices, not + * just device 0. + */ + PCI_SCAN_ALL_PCIE_DEVS = 0x00000040, +}; + +#ifdef CONFIG_PCI +extern unsigned int pci_flags; + +static inline void pci_set_flags(int flags) +{ + pci_flags = flags; +} + +static inline void pci_add_flags(int flags) +{ + pci_flags |= flags; +} + +static inline void pci_clear_flags(int flags) +{ + pci_flags &= ~flags; +} + +static inline int pci_has_flag(int flag) +{ + return pci_flags & flag; +} +#else +static inline void pci_set_flags(int flags) { } +static inline void pci_add_flags(int flags) { } +static inline void pci_clear_flags(int flags) { } +static inline int pci_has_flag(int flag) +{ + return 0; +} +#endif /* CONFIG_PCI */ + +#endif /* __KERNEL__ */ +#endif /* _ASM_GENERIC_PCI_BRIDGE_H */ diff --git a/include/asm-generic/pci-dma-compat.h b/include/asm-generic/pci-dma-compat.h new file mode 100644 index 000000000..c110843fc --- /dev/null +++ b/include/asm-generic/pci-dma-compat.h @@ -0,0 +1,124 @@ +/* include this file if the platform implements the dma_ DMA Mapping API + * and wants to provide the pci_ DMA Mapping API in terms of it */ + +#ifndef _ASM_GENERIC_PCI_DMA_COMPAT_H +#define _ASM_GENERIC_PCI_DMA_COMPAT_H + +#include <linux/dma-mapping.h> + +static inline int +pci_dma_supported(struct pci_dev *hwdev, u64 mask) +{ + return dma_supported(hwdev == NULL ? NULL : &hwdev->dev, mask); +} + +static inline void * +pci_alloc_consistent(struct pci_dev *hwdev, size_t size, + dma_addr_t *dma_handle) +{ + return dma_alloc_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, dma_handle, GFP_ATOMIC); +} + +static inline void * +pci_zalloc_consistent(struct pci_dev *hwdev, size_t size, + dma_addr_t *dma_handle) +{ + return dma_zalloc_coherent(hwdev == NULL ? NULL : &hwdev->dev, + size, dma_handle, GFP_ATOMIC); +} + +static inline void +pci_free_consistent(struct pci_dev *hwdev, size_t size, + void *vaddr, dma_addr_t dma_handle) +{ + dma_free_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, vaddr, dma_handle); +} + +static inline dma_addr_t +pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction) +{ + return dma_map_single(hwdev == NULL ? NULL : &hwdev->dev, ptr, size, (enum dma_data_direction)direction); +} + +static inline void +pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, + size_t size, int direction) +{ + dma_unmap_single(hwdev == NULL ? NULL : &hwdev->dev, dma_addr, size, (enum dma_data_direction)direction); +} + +static inline dma_addr_t +pci_map_page(struct pci_dev *hwdev, struct page *page, + unsigned long offset, size_t size, int direction) +{ + return dma_map_page(hwdev == NULL ? NULL : &hwdev->dev, page, offset, size, (enum dma_data_direction)direction); +} + +static inline void +pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address, + size_t size, int direction) +{ + dma_unmap_page(hwdev == NULL ? NULL : &hwdev->dev, dma_address, size, (enum dma_data_direction)direction); +} + +static inline int +pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, + int nents, int direction) +{ + return dma_map_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction); +} + +static inline void +pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, + int nents, int direction) +{ + dma_unmap_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction); +} + +static inline void +pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, + size_t size, int direction) +{ + dma_sync_single_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction); +} + +static inline void +pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, + size_t size, int direction) +{ + dma_sync_single_for_device(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction); +} + +static inline void +pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, + int nelems, int direction) +{ + dma_sync_sg_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction); +} + +static inline void +pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, + int nelems, int direction) +{ + dma_sync_sg_for_device(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction); +} + +static inline int +pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr) +{ + return dma_mapping_error(&pdev->dev, dma_addr); +} + +#ifdef CONFIG_PCI +static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask) +{ + return dma_set_mask(&dev->dev, mask); +} + +static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) +{ + return dma_set_coherent_mask(&dev->dev, mask); +} +#endif + +#endif diff --git a/include/asm-generic/pci.h b/include/asm-generic/pci.h new file mode 100644 index 000000000..e80a0495e --- /dev/null +++ b/include/asm-generic/pci.h @@ -0,0 +1,37 @@ +/* + * linux/include/asm-generic/pci.h + * + * Copyright (C) 2003 Russell King + */ +#ifndef _ASM_GENERIC_PCI_H +#define _ASM_GENERIC_PCI_H + +static inline struct resource * +pcibios_select_root(struct pci_dev *pdev, struct resource *res) +{ + struct resource *root = NULL; + + if (res->flags & IORESOURCE_IO) + root = &ioport_resource; + if (res->flags & IORESOURCE_MEM) + root = &iomem_resource; + + return root; +} + +#ifndef HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ +static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) +{ + return channel ? 15 : 14; +} +#endif /* HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ */ + +/* + * By default, assume that no iommu is in use and that the PCI + * space is mapped to address physical 0. + */ +#ifndef PCI_DMA_BUS_IS_PHYS +#define PCI_DMA_BUS_IS_PHYS (1) +#endif + +#endif /* _ASM_GENERIC_PCI_H */ diff --git a/include/asm-generic/pci_iomap.h b/include/asm-generic/pci_iomap.h new file mode 100644 index 000000000..7389c8711 --- /dev/null +++ b/include/asm-generic/pci_iomap.h @@ -0,0 +1,45 @@ +/* Generic I/O port emulation, based on MN10300 code + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +#ifndef __ASM_GENERIC_PCI_IOMAP_H +#define __ASM_GENERIC_PCI_IOMAP_H + +struct pci_dev; +#ifdef CONFIG_PCI +/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */ +extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); +extern void __iomem *pci_iomap_range(struct pci_dev *dev, int bar, + unsigned long offset, + unsigned long maxlen); +/* Create a virtual mapping cookie for a port on a given PCI device. + * Do not call this directly, it exists to make it easier for architectures + * to override */ +#ifdef CONFIG_NO_GENERIC_PCI_IOPORT_MAP +extern void __iomem *__pci_ioport_map(struct pci_dev *dev, unsigned long port, + unsigned int nr); +#else +#define __pci_ioport_map(dev, port, nr) ioport_map((port), (nr)) +#endif + +#elif defined(CONFIG_GENERIC_PCI_IOMAP) +static inline void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max) +{ + return NULL; +} + +static inline void __iomem *pci_iomap_range(struct pci_dev *dev, int bar, + unsigned long offset, + unsigned long maxlen) +{ + return NULL; +} +#endif + +#endif /* __ASM_GENERIC_IO_H */ diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h new file mode 100644 index 000000000..4d9f233c4 --- /dev/null +++ b/include/asm-generic/percpu.h @@ -0,0 +1,420 @@ +#ifndef _ASM_GENERIC_PERCPU_H_ +#define _ASM_GENERIC_PERCPU_H_ + +#include <linux/compiler.h> +#include <linux/threads.h> +#include <linux/percpu-defs.h> + +#ifdef CONFIG_SMP + +/* + * per_cpu_offset() is the offset that has to be added to a + * percpu variable to get to the instance for a certain processor. + * + * Most arches use the __per_cpu_offset array for those offsets but + * some arches have their own ways of determining the offset (x86_64, s390). + */ +#ifndef __per_cpu_offset +extern unsigned long __per_cpu_offset[NR_CPUS]; + +#define per_cpu_offset(x) (__per_cpu_offset[x]) +#endif + +/* + * Determine the offset for the currently active processor. + * An arch may define __my_cpu_offset to provide a more effective + * means of obtaining the offset to the per cpu variables of the + * current processor. + */ +#ifndef __my_cpu_offset +#define __my_cpu_offset per_cpu_offset(raw_smp_processor_id()) +#endif +#ifdef CONFIG_DEBUG_PREEMPT +#define my_cpu_offset per_cpu_offset(smp_processor_id()) +#else +#define my_cpu_offset __my_cpu_offset +#endif + +/* + * Arch may define arch_raw_cpu_ptr() to provide more efficient address + * translations for raw_cpu_ptr(). + */ +#ifndef arch_raw_cpu_ptr +#define arch_raw_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset) +#endif + +#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA +extern void setup_per_cpu_areas(void); +#endif + +#endif /* SMP */ + +#ifndef PER_CPU_BASE_SECTION +#ifdef CONFIG_SMP +#define PER_CPU_BASE_SECTION ".data..percpu" +#else +#define PER_CPU_BASE_SECTION ".data" +#endif +#endif + +#ifndef PER_CPU_ATTRIBUTES +#define PER_CPU_ATTRIBUTES +#endif + +#ifndef PER_CPU_DEF_ATTRIBUTES +#define PER_CPU_DEF_ATTRIBUTES +#endif + +#define raw_cpu_generic_to_op(pcp, val, op) \ +do { \ + *raw_cpu_ptr(&(pcp)) op val; \ +} while (0) + +#define raw_cpu_generic_add_return(pcp, val) \ +({ \ + raw_cpu_add(pcp, val); \ + raw_cpu_read(pcp); \ +}) + +#define raw_cpu_generic_xchg(pcp, nval) \ +({ \ + typeof(pcp) __ret; \ + __ret = raw_cpu_read(pcp); \ + raw_cpu_write(pcp, nval); \ + __ret; \ +}) + +#define raw_cpu_generic_cmpxchg(pcp, oval, nval) \ +({ \ + typeof(pcp) __ret; \ + __ret = raw_cpu_read(pcp); \ + if (__ret == (oval)) \ + raw_cpu_write(pcp, nval); \ + __ret; \ +}) + +#define raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ +({ \ + int __ret = 0; \ + if (raw_cpu_read(pcp1) == (oval1) && \ + raw_cpu_read(pcp2) == (oval2)) { \ + raw_cpu_write(pcp1, nval1); \ + raw_cpu_write(pcp2, nval2); \ + __ret = 1; \ + } \ + (__ret); \ +}) + +#define this_cpu_generic_read(pcp) \ +({ \ + typeof(pcp) __ret; \ + preempt_disable(); \ + __ret = *this_cpu_ptr(&(pcp)); \ + preempt_enable(); \ + __ret; \ +}) + +#define this_cpu_generic_to_op(pcp, val, op) \ +do { \ + unsigned long __flags; \ + raw_local_irq_save(__flags); \ + *raw_cpu_ptr(&(pcp)) op val; \ + raw_local_irq_restore(__flags); \ +} while (0) + +#define this_cpu_generic_add_return(pcp, val) \ +({ \ + typeof(pcp) __ret; \ + unsigned long __flags; \ + raw_local_irq_save(__flags); \ + raw_cpu_add(pcp, val); \ + __ret = raw_cpu_read(pcp); \ + raw_local_irq_restore(__flags); \ + __ret; \ +}) + +#define this_cpu_generic_xchg(pcp, nval) \ +({ \ + typeof(pcp) __ret; \ + unsigned long __flags; \ + raw_local_irq_save(__flags); \ + __ret = raw_cpu_read(pcp); \ + raw_cpu_write(pcp, nval); \ + raw_local_irq_restore(__flags); \ + __ret; \ +}) + +#define this_cpu_generic_cmpxchg(pcp, oval, nval) \ +({ \ + typeof(pcp) __ret; \ + unsigned long __flags; \ + raw_local_irq_save(__flags); \ + __ret = raw_cpu_read(pcp); \ + if (__ret == (oval)) \ + raw_cpu_write(pcp, nval); \ + raw_local_irq_restore(__flags); \ + __ret; \ +}) + +#define this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ +({ \ + int __ret; \ + unsigned long __flags; \ + raw_local_irq_save(__flags); \ + __ret = raw_cpu_generic_cmpxchg_double(pcp1, pcp2, \ + oval1, oval2, nval1, nval2); \ + raw_local_irq_restore(__flags); \ + __ret; \ +}) + +#ifndef raw_cpu_read_1 +#define raw_cpu_read_1(pcp) (*raw_cpu_ptr(&(pcp))) +#endif +#ifndef raw_cpu_read_2 +#define raw_cpu_read_2(pcp) (*raw_cpu_ptr(&(pcp))) +#endif +#ifndef raw_cpu_read_4 +#define raw_cpu_read_4(pcp) (*raw_cpu_ptr(&(pcp))) +#endif +#ifndef raw_cpu_read_8 +#define raw_cpu_read_8(pcp) (*raw_cpu_ptr(&(pcp))) +#endif + +#ifndef raw_cpu_write_1 +#define raw_cpu_write_1(pcp, val) raw_cpu_generic_to_op(pcp, val, =) +#endif +#ifndef raw_cpu_write_2 +#define raw_cpu_write_2(pcp, val) raw_cpu_generic_to_op(pcp, val, =) +#endif +#ifndef raw_cpu_write_4 +#define raw_cpu_write_4(pcp, val) raw_cpu_generic_to_op(pcp, val, =) +#endif +#ifndef raw_cpu_write_8 +#define raw_cpu_write_8(pcp, val) raw_cpu_generic_to_op(pcp, val, =) +#endif + +#ifndef raw_cpu_add_1 +#define raw_cpu_add_1(pcp, val) raw_cpu_generic_to_op(pcp, val, +=) +#endif +#ifndef raw_cpu_add_2 +#define raw_cpu_add_2(pcp, val) raw_cpu_generic_to_op(pcp, val, +=) +#endif +#ifndef raw_cpu_add_4 +#define raw_cpu_add_4(pcp, val) raw_cpu_generic_to_op(pcp, val, +=) +#endif +#ifndef raw_cpu_add_8 +#define raw_cpu_add_8(pcp, val) raw_cpu_generic_to_op(pcp, val, +=) +#endif + +#ifndef raw_cpu_and_1 +#define raw_cpu_and_1(pcp, val) raw_cpu_generic_to_op(pcp, val, &=) +#endif +#ifndef raw_cpu_and_2 +#define raw_cpu_and_2(pcp, val) raw_cpu_generic_to_op(pcp, val, &=) +#endif +#ifndef raw_cpu_and_4 +#define raw_cpu_and_4(pcp, val) raw_cpu_generic_to_op(pcp, val, &=) +#endif +#ifndef raw_cpu_and_8 +#define raw_cpu_and_8(pcp, val) raw_cpu_generic_to_op(pcp, val, &=) +#endif + +#ifndef raw_cpu_or_1 +#define raw_cpu_or_1(pcp, val) raw_cpu_generic_to_op(pcp, val, |=) +#endif +#ifndef raw_cpu_or_2 +#define raw_cpu_or_2(pcp, val) raw_cpu_generic_to_op(pcp, val, |=) +#endif +#ifndef raw_cpu_or_4 +#define raw_cpu_or_4(pcp, val) raw_cpu_generic_to_op(pcp, val, |=) +#endif +#ifndef raw_cpu_or_8 +#define raw_cpu_or_8(pcp, val) raw_cpu_generic_to_op(pcp, val, |=) +#endif + +#ifndef raw_cpu_add_return_1 +#define raw_cpu_add_return_1(pcp, val) raw_cpu_generic_add_return(pcp, val) +#endif +#ifndef raw_cpu_add_return_2 +#define raw_cpu_add_return_2(pcp, val) raw_cpu_generic_add_return(pcp, val) +#endif +#ifndef raw_cpu_add_return_4 +#define raw_cpu_add_return_4(pcp, val) raw_cpu_generic_add_return(pcp, val) +#endif +#ifndef raw_cpu_add_return_8 +#define raw_cpu_add_return_8(pcp, val) raw_cpu_generic_add_return(pcp, val) +#endif + +#ifndef raw_cpu_xchg_1 +#define raw_cpu_xchg_1(pcp, nval) raw_cpu_generic_xchg(pcp, nval) +#endif +#ifndef raw_cpu_xchg_2 +#define raw_cpu_xchg_2(pcp, nval) raw_cpu_generic_xchg(pcp, nval) +#endif +#ifndef raw_cpu_xchg_4 +#define raw_cpu_xchg_4(pcp, nval) raw_cpu_generic_xchg(pcp, nval) +#endif +#ifndef raw_cpu_xchg_8 +#define raw_cpu_xchg_8(pcp, nval) raw_cpu_generic_xchg(pcp, nval) +#endif + +#ifndef raw_cpu_cmpxchg_1 +#define raw_cpu_cmpxchg_1(pcp, oval, nval) \ + raw_cpu_generic_cmpxchg(pcp, oval, nval) +#endif +#ifndef raw_cpu_cmpxchg_2 +#define raw_cpu_cmpxchg_2(pcp, oval, nval) \ + raw_cpu_generic_cmpxchg(pcp, oval, nval) +#endif +#ifndef raw_cpu_cmpxchg_4 +#define raw_cpu_cmpxchg_4(pcp, oval, nval) \ + raw_cpu_generic_cmpxchg(pcp, oval, nval) +#endif +#ifndef raw_cpu_cmpxchg_8 +#define raw_cpu_cmpxchg_8(pcp, oval, nval) \ + raw_cpu_generic_cmpxchg(pcp, oval, nval) +#endif + +#ifndef raw_cpu_cmpxchg_double_1 +#define raw_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#endif +#ifndef raw_cpu_cmpxchg_double_2 +#define raw_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#endif +#ifndef raw_cpu_cmpxchg_double_4 +#define raw_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#endif +#ifndef raw_cpu_cmpxchg_double_8 +#define raw_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#endif + +#ifndef this_cpu_read_1 +#define this_cpu_read_1(pcp) this_cpu_generic_read(pcp) +#endif +#ifndef this_cpu_read_2 +#define this_cpu_read_2(pcp) this_cpu_generic_read(pcp) +#endif +#ifndef this_cpu_read_4 +#define this_cpu_read_4(pcp) this_cpu_generic_read(pcp) +#endif +#ifndef this_cpu_read_8 +#define this_cpu_read_8(pcp) this_cpu_generic_read(pcp) +#endif + +#ifndef this_cpu_write_1 +#define this_cpu_write_1(pcp, val) this_cpu_generic_to_op(pcp, val, =) +#endif +#ifndef this_cpu_write_2 +#define this_cpu_write_2(pcp, val) this_cpu_generic_to_op(pcp, val, =) +#endif +#ifndef this_cpu_write_4 +#define this_cpu_write_4(pcp, val) this_cpu_generic_to_op(pcp, val, =) +#endif +#ifndef this_cpu_write_8 +#define this_cpu_write_8(pcp, val) this_cpu_generic_to_op(pcp, val, =) +#endif + +#ifndef this_cpu_add_1 +#define this_cpu_add_1(pcp, val) this_cpu_generic_to_op(pcp, val, +=) +#endif +#ifndef this_cpu_add_2 +#define this_cpu_add_2(pcp, val) this_cpu_generic_to_op(pcp, val, +=) +#endif +#ifndef this_cpu_add_4 +#define this_cpu_add_4(pcp, val) this_cpu_generic_to_op(pcp, val, +=) +#endif +#ifndef this_cpu_add_8 +#define this_cpu_add_8(pcp, val) this_cpu_generic_to_op(pcp, val, +=) +#endif + +#ifndef this_cpu_and_1 +#define this_cpu_and_1(pcp, val) this_cpu_generic_to_op(pcp, val, &=) +#endif +#ifndef this_cpu_and_2 +#define this_cpu_and_2(pcp, val) this_cpu_generic_to_op(pcp, val, &=) +#endif +#ifndef this_cpu_and_4 +#define this_cpu_and_4(pcp, val) this_cpu_generic_to_op(pcp, val, &=) +#endif +#ifndef this_cpu_and_8 +#define this_cpu_and_8(pcp, val) this_cpu_generic_to_op(pcp, val, &=) +#endif + +#ifndef this_cpu_or_1 +#define this_cpu_or_1(pcp, val) this_cpu_generic_to_op(pcp, val, |=) +#endif +#ifndef this_cpu_or_2 +#define this_cpu_or_2(pcp, val) this_cpu_generic_to_op(pcp, val, |=) +#endif +#ifndef this_cpu_or_4 +#define this_cpu_or_4(pcp, val) this_cpu_generic_to_op(pcp, val, |=) +#endif +#ifndef this_cpu_or_8 +#define this_cpu_or_8(pcp, val) this_cpu_generic_to_op(pcp, val, |=) +#endif + +#ifndef this_cpu_add_return_1 +#define this_cpu_add_return_1(pcp, val) this_cpu_generic_add_return(pcp, val) +#endif +#ifndef this_cpu_add_return_2 +#define this_cpu_add_return_2(pcp, val) this_cpu_generic_add_return(pcp, val) +#endif +#ifndef this_cpu_add_return_4 +#define this_cpu_add_return_4(pcp, val) this_cpu_generic_add_return(pcp, val) +#endif +#ifndef this_cpu_add_return_8 +#define this_cpu_add_return_8(pcp, val) this_cpu_generic_add_return(pcp, val) +#endif + +#ifndef this_cpu_xchg_1 +#define this_cpu_xchg_1(pcp, nval) this_cpu_generic_xchg(pcp, nval) +#endif +#ifndef this_cpu_xchg_2 +#define this_cpu_xchg_2(pcp, nval) this_cpu_generic_xchg(pcp, nval) +#endif +#ifndef this_cpu_xchg_4 +#define this_cpu_xchg_4(pcp, nval) this_cpu_generic_xchg(pcp, nval) +#endif +#ifndef this_cpu_xchg_8 +#define this_cpu_xchg_8(pcp, nval) this_cpu_generic_xchg(pcp, nval) +#endif + +#ifndef this_cpu_cmpxchg_1 +#define this_cpu_cmpxchg_1(pcp, oval, nval) \ + this_cpu_generic_cmpxchg(pcp, oval, nval) +#endif +#ifndef this_cpu_cmpxchg_2 +#define this_cpu_cmpxchg_2(pcp, oval, nval) \ + this_cpu_generic_cmpxchg(pcp, oval, nval) +#endif +#ifndef this_cpu_cmpxchg_4 +#define this_cpu_cmpxchg_4(pcp, oval, nval) \ + this_cpu_generic_cmpxchg(pcp, oval, nval) +#endif +#ifndef this_cpu_cmpxchg_8 +#define this_cpu_cmpxchg_8(pcp, oval, nval) \ + this_cpu_generic_cmpxchg(pcp, oval, nval) +#endif + +#ifndef this_cpu_cmpxchg_double_1 +#define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#endif +#ifndef this_cpu_cmpxchg_double_2 +#define this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#endif +#ifndef this_cpu_cmpxchg_double_4 +#define this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#endif +#ifndef this_cpu_cmpxchg_double_8 +#define this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#endif + +#endif /* _ASM_GENERIC_PERCPU_H_ */ diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h new file mode 100644 index 000000000..9e429d08b --- /dev/null +++ b/include/asm-generic/pgalloc.h @@ -0,0 +1,12 @@ +#ifndef __ASM_GENERIC_PGALLOC_H +#define __ASM_GENERIC_PGALLOC_H +/* + * an empty file is enough for a nommu architecture + */ +#ifdef CONFIG_MMU +#error need to implement an architecture specific asm/pgalloc.h +#endif + +#define check_pgt_cache() do { } while (0) + +#endif /* __ASM_GENERIC_PGALLOC_H */ diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h new file mode 100644 index 000000000..725612b79 --- /dev/null +++ b/include/asm-generic/pgtable-nopmd.h @@ -0,0 +1,69 @@ +#ifndef _PGTABLE_NOPMD_H +#define _PGTABLE_NOPMD_H + +#ifndef __ASSEMBLY__ + +#include <asm-generic/pgtable-nopud.h> + +struct mm_struct; + +#define __PAGETABLE_PMD_FOLDED + +/* + * Having the pmd type consist of a pud gets the size right, and allows + * us to conceptually access the pud entry that this pmd is folded into + * without casting. + */ +typedef struct { pud_t pud; } pmd_t; + +#define PMD_SHIFT PUD_SHIFT +#define PTRS_PER_PMD 1 +#define PMD_SIZE (1UL << PMD_SHIFT) +#define PMD_MASK (~(PMD_SIZE-1)) + +/* + * The "pud_xxx()" functions here are trivial for a folded two-level + * setup: the pmd is never bad, and a pmd always exists (as it's folded + * into the pud entry) + */ +static inline int pud_none(pud_t pud) { return 0; } +static inline int pud_bad(pud_t pud) { return 0; } +static inline int pud_present(pud_t pud) { return 1; } +static inline void pud_clear(pud_t *pud) { } +#define pmd_ERROR(pmd) (pud_ERROR((pmd).pud)) + +#define pud_populate(mm, pmd, pte) do { } while (0) + +/* + * (pmds are folded into puds so this doesn't get actually called, + * but the define is needed for a generic inline function.) + */ +#define set_pud(pudptr, pudval) set_pmd((pmd_t *)(pudptr), (pmd_t) { pudval }) + +static inline pmd_t * pmd_offset(pud_t * pud, unsigned long address) +{ + return (pmd_t *)pud; +} + +#define pmd_val(x) (pud_val((x).pud)) +#define __pmd(x) ((pmd_t) { __pud(x) } ) + +#define pud_page(pud) (pmd_page((pmd_t){ pud })) +#define pud_page_vaddr(pud) (pmd_page_vaddr((pmd_t){ pud })) + +/* + * allocating and freeing a pmd is trivial: the 1-entry pmd is + * inside the pud, so has no extra memory associated with it. + */ +#define pmd_alloc_one(mm, address) NULL +static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) +{ +} +#define __pmd_free_tlb(tlb, x, a) do { } while (0) + +#undef pmd_addr_end +#define pmd_addr_end(addr, end) (end) + +#endif /* __ASSEMBLY__ */ + +#endif /* _PGTABLE_NOPMD_H */ diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h new file mode 100644 index 000000000..810431d83 --- /dev/null +++ b/include/asm-generic/pgtable-nopud.h @@ -0,0 +1,61 @@ +#ifndef _PGTABLE_NOPUD_H +#define _PGTABLE_NOPUD_H + +#ifndef __ASSEMBLY__ + +#define __PAGETABLE_PUD_FOLDED + +/* + * Having the pud type consist of a pgd gets the size right, and allows + * us to conceptually access the pgd entry that this pud is folded into + * without casting. + */ +typedef struct { pgd_t pgd; } pud_t; + +#define PUD_SHIFT PGDIR_SHIFT +#define PTRS_PER_PUD 1 +#define PUD_SIZE (1UL << PUD_SHIFT) +#define PUD_MASK (~(PUD_SIZE-1)) + +/* + * The "pgd_xxx()" functions here are trivial for a folded two-level + * setup: the pud is never bad, and a pud always exists (as it's folded + * into the pgd entry) + */ +static inline int pgd_none(pgd_t pgd) { return 0; } +static inline int pgd_bad(pgd_t pgd) { return 0; } +static inline int pgd_present(pgd_t pgd) { return 1; } +static inline void pgd_clear(pgd_t *pgd) { } +#define pud_ERROR(pud) (pgd_ERROR((pud).pgd)) + +#define pgd_populate(mm, pgd, pud) do { } while (0) +/* + * (puds are folded into pgds so this doesn't get actually called, + * but the define is needed for a generic inline function.) + */ +#define set_pgd(pgdptr, pgdval) set_pud((pud_t *)(pgdptr), (pud_t) { pgdval }) + +static inline pud_t * pud_offset(pgd_t * pgd, unsigned long address) +{ + return (pud_t *)pgd; +} + +#define pud_val(x) (pgd_val((x).pgd)) +#define __pud(x) ((pud_t) { __pgd(x) } ) + +#define pgd_page(pgd) (pud_page((pud_t){ pgd })) +#define pgd_page_vaddr(pgd) (pud_page_vaddr((pud_t){ pgd })) + +/* + * allocating and freeing a pud is trivial: the 1-entry pud is + * inside the pgd, so has no extra memory associated with it. + */ +#define pud_alloc_one(mm, address) NULL +#define pud_free(mm, x) do { } while (0) +#define __pud_free_tlb(tlb, x, a) do { } while (0) + +#undef pud_addr_end +#define pud_addr_end(addr, end) (end) + +#endif /* __ASSEMBLY__ */ +#endif /* _PGTABLE_NOPUD_H */ diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h new file mode 100644 index 000000000..9cf36952a --- /dev/null +++ b/include/asm-generic/pgtable.h @@ -0,0 +1,743 @@ +#ifndef _ASM_GENERIC_PGTABLE_H +#define _ASM_GENERIC_PGTABLE_H + +#ifndef __ASSEMBLY__ +#ifdef CONFIG_MMU + +#include <linux/mm_types.h> +#include <linux/bug.h> +#include <linux/errno.h> + +#if 4 - defined(__PAGETABLE_PUD_FOLDED) - defined(__PAGETABLE_PMD_FOLDED) != \ + CONFIG_PGTABLE_LEVELS +#error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{PUD,PMD}_FOLDED +#endif + +/* + * On almost all architectures and configurations, 0 can be used as the + * upper ceiling to free_pgtables(): on many architectures it has the same + * effect as using TASK_SIZE. However, there is one configuration which + * must impose a more careful limit, to avoid freeing kernel pgtables. + */ +#ifndef USER_PGTABLES_CEILING +#define USER_PGTABLES_CEILING 0UL +#endif + +#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS +extern int ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep, + pte_t entry, int dirty); +#endif + +#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS +extern int pmdp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, + pmd_t entry, int dirty); +#endif + +#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG +static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, + unsigned long address, + pte_t *ptep) +{ + pte_t pte = *ptep; + int r = 1; + if (!pte_young(pte)) + r = 0; + else + set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte)); + return r; +} +#endif + +#ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, + unsigned long address, + pmd_t *pmdp) +{ + pmd_t pmd = *pmdp; + int r = 1; + if (!pmd_young(pmd)) + r = 0; + else + set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd)); + return r; +} +#else /* CONFIG_TRANSPARENT_HUGEPAGE */ +static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, + unsigned long address, + pmd_t *pmdp) +{ + BUG(); + return 0; +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +#endif + +#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH +int ptep_clear_flush_young(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep); +#endif + +#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH +int pmdp_clear_flush_young(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp); +#endif + +#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR +static inline pte_t ptep_get_and_clear(struct mm_struct *mm, + unsigned long address, + pte_t *ptep) +{ + pte_t pte = *ptep; + pte_clear(mm, address, ptep); + return pte; +} +#endif + +#ifndef __HAVE_ARCH_PMDP_GET_AND_CLEAR +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, + unsigned long address, + pmd_t *pmdp) +{ + pmd_t pmd = *pmdp; + pmd_clear(pmdp); + return pmd; +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +#endif + +#ifndef __HAVE_ARCH_PMDP_GET_AND_CLEAR_FULL +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline pmd_t pmdp_get_and_clear_full(struct mm_struct *mm, + unsigned long address, pmd_t *pmdp, + int full) +{ + return pmdp_get_and_clear(mm, address, pmdp); +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +#endif + +#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL +static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, + unsigned long address, pte_t *ptep, + int full) +{ + pte_t pte; + pte = ptep_get_and_clear(mm, address, ptep); + return pte; +} +#endif + +/* + * Some architectures may be able to avoid expensive synchronization + * primitives when modifications are made to PTE's which are already + * not present, or in the process of an address space destruction. + */ +#ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL +static inline void pte_clear_not_present_full(struct mm_struct *mm, + unsigned long address, + pte_t *ptep, + int full) +{ + pte_clear(mm, address, ptep); +} +#endif + +#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH +extern pte_t ptep_clear_flush(struct vm_area_struct *vma, + unsigned long address, + pte_t *ptep); +#endif + +#ifndef __HAVE_ARCH_PMDP_CLEAR_FLUSH +extern pmd_t pmdp_clear_flush(struct vm_area_struct *vma, + unsigned long address, + pmd_t *pmdp); +#endif + +#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT +struct mm_struct; +static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) +{ + pte_t old_pte = *ptep; + set_pte_at(mm, address, ptep, pte_wrprotect(old_pte)); +} +#endif + +#ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline void pmdp_set_wrprotect(struct mm_struct *mm, + unsigned long address, pmd_t *pmdp) +{ + pmd_t old_pmd = *pmdp; + set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd)); +} +#else /* CONFIG_TRANSPARENT_HUGEPAGE */ +static inline void pmdp_set_wrprotect(struct mm_struct *mm, + unsigned long address, pmd_t *pmdp) +{ + BUG(); +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +#endif + +#ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH +extern void pmdp_splitting_flush(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp); +#endif + +#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT +extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, + pgtable_t pgtable); +#endif + +#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW +extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); +#endif + +#ifndef __HAVE_ARCH_PMDP_INVALIDATE +extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, + pmd_t *pmdp); +#endif + +#ifndef __HAVE_ARCH_PTE_SAME +static inline int pte_same(pte_t pte_a, pte_t pte_b) +{ + return pte_val(pte_a) == pte_val(pte_b); +} +#endif + +#ifndef __HAVE_ARCH_PTE_UNUSED +/* + * Some architectures provide facilities to virtualization guests + * so that they can flag allocated pages as unused. This allows the + * host to transparently reclaim unused pages. This function returns + * whether the pte's page is unused. + */ +static inline int pte_unused(pte_t pte) +{ + return 0; +} +#endif + +#ifndef __HAVE_ARCH_PMD_SAME +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) +{ + return pmd_val(pmd_a) == pmd_val(pmd_b); +} +#else /* CONFIG_TRANSPARENT_HUGEPAGE */ +static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) +{ + BUG(); + return 0; +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +#endif + +#ifndef __HAVE_ARCH_PGD_OFFSET_GATE +#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr) +#endif + +#ifndef __HAVE_ARCH_MOVE_PTE +#define move_pte(pte, prot, old_addr, new_addr) (pte) +#endif + +#ifndef pte_accessible +# define pte_accessible(mm, pte) ((void)(pte), 1) +#endif + +#ifndef flush_tlb_fix_spurious_fault +#define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address) +#endif + +#ifndef pgprot_noncached +#define pgprot_noncached(prot) (prot) +#endif + +#ifndef pgprot_writecombine +#define pgprot_writecombine pgprot_noncached +#endif + +#ifndef pgprot_device +#define pgprot_device pgprot_noncached +#endif + +#ifndef pgprot_modify +#define pgprot_modify pgprot_modify +static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) +{ + if (pgprot_val(oldprot) == pgprot_val(pgprot_noncached(oldprot))) + newprot = pgprot_noncached(newprot); + if (pgprot_val(oldprot) == pgprot_val(pgprot_writecombine(oldprot))) + newprot = pgprot_writecombine(newprot); + if (pgprot_val(oldprot) == pgprot_val(pgprot_device(oldprot))) + newprot = pgprot_device(newprot); + return newprot; +} +#endif + +/* + * When walking page tables, get the address of the next boundary, + * or the end address of the range if that comes earlier. Although no + * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout. + */ + +#define pgd_addr_end(addr, end) \ +({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \ + (__boundary - 1 < (end) - 1)? __boundary: (end); \ +}) + +#ifndef pud_addr_end +#define pud_addr_end(addr, end) \ +({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \ + (__boundary - 1 < (end) - 1)? __boundary: (end); \ +}) +#endif + +#ifndef pmd_addr_end +#define pmd_addr_end(addr, end) \ +({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \ + (__boundary - 1 < (end) - 1)? __boundary: (end); \ +}) +#endif + +/* + * When walking page tables, we usually want to skip any p?d_none entries; + * and any p?d_bad entries - reporting the error before resetting to none. + * Do the tests inline, but report and clear the bad entry in mm/memory.c. + */ +void pgd_clear_bad(pgd_t *); +void pud_clear_bad(pud_t *); +void pmd_clear_bad(pmd_t *); + +static inline int pgd_none_or_clear_bad(pgd_t *pgd) +{ + if (pgd_none(*pgd)) + return 1; + if (unlikely(pgd_bad(*pgd))) { + pgd_clear_bad(pgd); + return 1; + } + return 0; +} + +static inline int pud_none_or_clear_bad(pud_t *pud) +{ + if (pud_none(*pud)) + return 1; + if (unlikely(pud_bad(*pud))) { + pud_clear_bad(pud); + return 1; + } + return 0; +} + +static inline int pmd_none_or_clear_bad(pmd_t *pmd) +{ + if (pmd_none(*pmd)) + return 1; + if (unlikely(pmd_bad(*pmd))) { + pmd_clear_bad(pmd); + return 1; + } + return 0; +} + +static inline pte_t __ptep_modify_prot_start(struct mm_struct *mm, + unsigned long addr, + pte_t *ptep) +{ + /* + * Get the current pte state, but zero it out to make it + * non-present, preventing the hardware from asynchronously + * updating it. + */ + return ptep_get_and_clear(mm, addr, ptep); +} + +static inline void __ptep_modify_prot_commit(struct mm_struct *mm, + unsigned long addr, + pte_t *ptep, pte_t pte) +{ + /* + * The pte is non-present, so there's no hardware state to + * preserve. + */ + set_pte_at(mm, addr, ptep, pte); +} + +#ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION +/* + * Start a pte protection read-modify-write transaction, which + * protects against asynchronous hardware modifications to the pte. + * The intention is not to prevent the hardware from making pte + * updates, but to prevent any updates it may make from being lost. + * + * This does not protect against other software modifications of the + * pte; the appropriate pte lock must be held over the transation. + * + * Note that this interface is intended to be batchable, meaning that + * ptep_modify_prot_commit may not actually update the pte, but merely + * queue the update to be done at some later time. The update must be + * actually committed before the pte lock is released, however. + */ +static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, + unsigned long addr, + pte_t *ptep) +{ + return __ptep_modify_prot_start(mm, addr, ptep); +} + +/* + * Commit an update to a pte, leaving any hardware-controlled bits in + * the PTE unmodified. + */ +static inline void ptep_modify_prot_commit(struct mm_struct *mm, + unsigned long addr, + pte_t *ptep, pte_t pte) +{ + __ptep_modify_prot_commit(mm, addr, ptep, pte); +} +#endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */ +#endif /* CONFIG_MMU */ + +/* + * A facility to provide lazy MMU batching. This allows PTE updates and + * page invalidations to be delayed until a call to leave lazy MMU mode + * is issued. Some architectures may benefit from doing this, and it is + * beneficial for both shadow and direct mode hypervisors, which may batch + * the PTE updates which happen during this window. Note that using this + * interface requires that read hazards be removed from the code. A read + * hazard could result in the direct mode hypervisor case, since the actual + * write to the page tables may not yet have taken place, so reads though + * a raw PTE pointer after it has been modified are not guaranteed to be + * up to date. This mode can only be entered and left under the protection of + * the page table locks for all page tables which may be modified. In the UP + * case, this is required so that preemption is disabled, and in the SMP case, + * it must synchronize the delayed page table writes properly on other CPUs. + */ +#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE +#define arch_enter_lazy_mmu_mode() do {} while (0) +#define arch_leave_lazy_mmu_mode() do {} while (0) +#define arch_flush_lazy_mmu_mode() do {} while (0) +#endif + +/* + * A facility to provide batching of the reload of page tables and + * other process state with the actual context switch code for + * paravirtualized guests. By convention, only one of the batched + * update (lazy) modes (CPU, MMU) should be active at any given time, + * entry should never be nested, and entry and exits should always be + * paired. This is for sanity of maintaining and reasoning about the + * kernel code. In this case, the exit (end of the context switch) is + * in architecture-specific code, and so doesn't need a generic + * definition. + */ +#ifndef __HAVE_ARCH_START_CONTEXT_SWITCH +#define arch_start_context_switch(prev) do {} while (0) +#endif + +#ifndef CONFIG_HAVE_ARCH_SOFT_DIRTY +static inline int pte_soft_dirty(pte_t pte) +{ + return 0; +} + +static inline int pmd_soft_dirty(pmd_t pmd) +{ + return 0; +} + +static inline pte_t pte_mksoft_dirty(pte_t pte) +{ + return pte; +} + +static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) +{ + return pmd; +} + +static inline pte_t pte_swp_mksoft_dirty(pte_t pte) +{ + return pte; +} + +static inline int pte_swp_soft_dirty(pte_t pte) +{ + return 0; +} + +static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) +{ + return pte; +} +#endif + +#ifndef __HAVE_PFNMAP_TRACKING +/* + * Interfaces that can be used by architecture code to keep track of + * memory type of pfn mappings specified by the remap_pfn_range, + * vm_insert_pfn. + */ + +/* + * track_pfn_remap is called when a _new_ pfn mapping is being established + * by remap_pfn_range() for physical range indicated by pfn and size. + */ +static inline int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, + unsigned long pfn, unsigned long addr, + unsigned long size) +{ + return 0; +} + +/* + * track_pfn_insert is called when a _new_ single pfn is established + * by vm_insert_pfn(). + */ +static inline int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, + unsigned long pfn) +{ + return 0; +} + +/* + * track_pfn_copy is called when vma that is covering the pfnmap gets + * copied through copy_page_range(). + */ +static inline int track_pfn_copy(struct vm_area_struct *vma) +{ + return 0; +} + +/* + * untrack_pfn_vma is called while unmapping a pfnmap for a region. + * untrack can be called for a specific region indicated by pfn and size or + * can be for the entire vma (in which case pfn, size are zero). + */ +static inline void untrack_pfn(struct vm_area_struct *vma, + unsigned long pfn, unsigned long size) +{ +} +#else +extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, + unsigned long pfn, unsigned long addr, + unsigned long size); +extern int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, + unsigned long pfn); +extern int track_pfn_copy(struct vm_area_struct *vma); +extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, + unsigned long size); +#endif + +#ifdef CONFIG_UKSM +static inline int is_uksm_zero_pfn(unsigned long pfn) +{ + extern unsigned long uksm_zero_pfn; + return pfn == uksm_zero_pfn; +} +#else +static inline int is_uksm_zero_pfn(unsigned long pfn) +{ + return 0; +} +#endif + +#ifdef __HAVE_COLOR_ZERO_PAGE +static inline int is_zero_pfn(unsigned long pfn) +{ + extern unsigned long zero_pfn; + unsigned long offset_from_zero_pfn = pfn - zero_pfn; + return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT) || is_uksm_zero_pfn(pfn); +} + +#define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr)) + +#else +static inline int is_zero_pfn(unsigned long pfn) +{ + extern unsigned long zero_pfn; + return (pfn == zero_pfn) || (is_uksm_zero_pfn(pfn)); +} + +static inline unsigned long my_zero_pfn(unsigned long addr) +{ + extern unsigned long zero_pfn; + return zero_pfn; +} +#endif + +#ifdef CONFIG_MMU + +#ifndef CONFIG_TRANSPARENT_HUGEPAGE +static inline int pmd_trans_huge(pmd_t pmd) +{ + return 0; +} +static inline int pmd_trans_splitting(pmd_t pmd) +{ + return 0; +} +#ifndef __HAVE_ARCH_PMD_WRITE +static inline int pmd_write(pmd_t pmd) +{ + BUG(); + return 0; +} +#endif /* __HAVE_ARCH_PMD_WRITE */ +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +#ifndef pmd_read_atomic +static inline pmd_t pmd_read_atomic(pmd_t *pmdp) +{ + /* + * Depend on compiler for an atomic pmd read. NOTE: this is + * only going to work, if the pmdval_t isn't larger than + * an unsigned long. + */ + return *pmdp; +} +#endif + +#ifndef pmd_move_must_withdraw +static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl, + spinlock_t *old_pmd_ptl) +{ + /* + * With split pmd lock we also need to move preallocated + * PTE page table if new_pmd is on different PMD page table. + */ + return new_pmd_ptl != old_pmd_ptl; +} +#endif + +/* + * This function is meant to be used by sites walking pagetables with + * the mmap_sem hold in read mode to protect against MADV_DONTNEED and + * transhuge page faults. MADV_DONTNEED can convert a transhuge pmd + * into a null pmd and the transhuge page fault can convert a null pmd + * into an hugepmd or into a regular pmd (if the hugepage allocation + * fails). While holding the mmap_sem in read mode the pmd becomes + * stable and stops changing under us only if it's not null and not a + * transhuge pmd. When those races occurs and this function makes a + * difference vs the standard pmd_none_or_clear_bad, the result is + * undefined so behaving like if the pmd was none is safe (because it + * can return none anyway). The compiler level barrier() is critically + * important to compute the two checks atomically on the same pmdval. + * + * For 32bit kernels with a 64bit large pmd_t this automatically takes + * care of reading the pmd atomically to avoid SMP race conditions + * against pmd_populate() when the mmap_sem is hold for reading by the + * caller (a special atomic read not done by "gcc" as in the generic + * version above, is also needed when THP is disabled because the page + * fault can populate the pmd from under us). + */ +static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd) +{ + pmd_t pmdval = pmd_read_atomic(pmd); + /* + * The barrier will stabilize the pmdval in a register or on + * the stack so that it will stop changing under the code. + * + * When CONFIG_TRANSPARENT_HUGEPAGE=y on x86 32bit PAE, + * pmd_read_atomic is allowed to return a not atomic pmdval + * (for example pointing to an hugepage that has never been + * mapped in the pmd). The below checks will only care about + * the low part of the pmd with 32bit PAE x86 anyway, with the + * exception of pmd_none(). So the important thing is that if + * the low part of the pmd is found null, the high part will + * be also null or the pmd_none() check below would be + * confused. + */ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + barrier(); +#endif + if (pmd_none(pmdval) || pmd_trans_huge(pmdval)) + return 1; + if (unlikely(pmd_bad(pmdval))) { + pmd_clear_bad(pmd); + return 1; + } + return 0; +} + +/* + * This is a noop if Transparent Hugepage Support is not built into + * the kernel. Otherwise it is equivalent to + * pmd_none_or_trans_huge_or_clear_bad(), and shall only be called in + * places that already verified the pmd is not none and they want to + * walk ptes while holding the mmap sem in read mode (write mode don't + * need this). If THP is not enabled, the pmd can't go away under the + * code even if MADV_DONTNEED runs, but if THP is enabled we need to + * run a pmd_trans_unstable before walking the ptes after + * split_huge_page_pmd returns (because it may have run when the pmd + * become null, but then a page fault can map in a THP and not a + * regular page). + */ +static inline int pmd_trans_unstable(pmd_t *pmd) +{ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + return pmd_none_or_trans_huge_or_clear_bad(pmd); +#else + return 0; +#endif +} + +#ifndef CONFIG_NUMA_BALANCING +/* + * Technically a PTE can be PROTNONE even when not doing NUMA balancing but + * the only case the kernel cares is for NUMA balancing and is only ever set + * when the VMA is accessible. For PROT_NONE VMAs, the PTEs are not marked + * _PAGE_PROTNONE so by by default, implement the helper as "always no". It + * is the responsibility of the caller to distinguish between PROT_NONE + * protections and NUMA hinting fault protections. + */ +static inline int pte_protnone(pte_t pte) +{ + return 0; +} + +static inline int pmd_protnone(pmd_t pmd) +{ + return 0; +} +#endif /* CONFIG_NUMA_BALANCING */ + +#endif /* CONFIG_MMU */ + +#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP +int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot); +int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot); +int pud_clear_huge(pud_t *pud); +int pmd_clear_huge(pmd_t *pmd); +#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ +static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) +{ + return 0; +} +static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) +{ + return 0; +} +static inline int pud_clear_huge(pud_t *pud) +{ + return 0; +} +static inline int pmd_clear_huge(pmd_t *pmd) +{ + return 0; +} +#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ + +#endif /* !__ASSEMBLY__ */ + +#ifndef io_remap_pfn_range +#define io_remap_pfn_range remap_pfn_range +#endif + +#endif /* _ASM_GENERIC_PGTABLE_H */ diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h new file mode 100644 index 000000000..eb6f9e6c3 --- /dev/null +++ b/include/asm-generic/preempt.h @@ -0,0 +1,89 @@ +#ifndef __ASM_PREEMPT_H +#define __ASM_PREEMPT_H + +#include <linux/thread_info.h> + +#define PREEMPT_ENABLED (0) + +static __always_inline int preempt_count(void) +{ + return current_thread_info()->preempt_count; +} + +static __always_inline int *preempt_count_ptr(void) +{ + return ¤t_thread_info()->preempt_count; +} + +static __always_inline void preempt_count_set(int pc) +{ + *preempt_count_ptr() = pc; +} + +/* + * must be macros to avoid header recursion hell + */ +#define init_task_preempt_count(p) do { \ + task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \ +} while (0) + +#define init_idle_preempt_count(p, cpu) do { \ + task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \ +} while (0) + +static __always_inline void set_preempt_need_resched(void) +{ +} + +static __always_inline void clear_preempt_need_resched(void) +{ +} + +static __always_inline bool test_preempt_need_resched(void) +{ + return false; +} + +/* + * The various preempt_count add/sub methods + */ + +static __always_inline void __preempt_count_add(int val) +{ + *preempt_count_ptr() += val; +} + +static __always_inline void __preempt_count_sub(int val) +{ + *preempt_count_ptr() -= val; +} + +static __always_inline bool __preempt_count_dec_and_test(void) +{ + /* + * Because of load-store architectures cannot do per-cpu atomic + * operations; we cannot use PREEMPT_NEED_RESCHED because it might get + * lost. + */ + return !--*preempt_count_ptr() && tif_need_resched(); +} + +/* + * Returns true when we need to resched and can (barring IRQ state). + */ +static __always_inline bool should_resched(void) +{ + return unlikely(!preempt_count() && tif_need_resched()); +} + +#ifdef CONFIG_PREEMPT +extern asmlinkage void preempt_schedule(void); +#define __preempt_schedule() preempt_schedule() + +#ifdef CONFIG_CONTEXT_TRACKING +extern asmlinkage void preempt_schedule_context(void); +#define __preempt_schedule_context() preempt_schedule_context() +#endif +#endif /* CONFIG_PREEMPT */ + +#endif /* __ASM_PREEMPT_H */ diff --git a/include/asm-generic/ptrace.h b/include/asm-generic/ptrace.h new file mode 100644 index 000000000..82e674f6b --- /dev/null +++ b/include/asm-generic/ptrace.h @@ -0,0 +1,74 @@ +/* + * Common low level (register) ptrace helpers + * + * Copyright 2004-2011 Analog Devices Inc. + * + * Licensed under the GPL-2 or later. + */ + +#ifndef __ASM_GENERIC_PTRACE_H__ +#define __ASM_GENERIC_PTRACE_H__ + +#ifndef __ASSEMBLY__ + +/* Helpers for working with the instruction pointer */ +#ifndef GET_IP +#define GET_IP(regs) ((regs)->pc) +#endif +#ifndef SET_IP +#define SET_IP(regs, val) (GET_IP(regs) = (val)) +#endif + +static inline unsigned long instruction_pointer(struct pt_regs *regs) +{ + return GET_IP(regs); +} +static inline void instruction_pointer_set(struct pt_regs *regs, + unsigned long val) +{ + SET_IP(regs, val); +} + +#ifndef profile_pc +#define profile_pc(regs) instruction_pointer(regs) +#endif + +/* Helpers for working with the user stack pointer */ +#ifndef GET_USP +#define GET_USP(regs) ((regs)->usp) +#endif +#ifndef SET_USP +#define SET_USP(regs, val) (GET_USP(regs) = (val)) +#endif + +static inline unsigned long user_stack_pointer(struct pt_regs *regs) +{ + return GET_USP(regs); +} +static inline void user_stack_pointer_set(struct pt_regs *regs, + unsigned long val) +{ + SET_USP(regs, val); +} + +/* Helpers for working with the frame pointer */ +#ifndef GET_FP +#define GET_FP(regs) ((regs)->fp) +#endif +#ifndef SET_FP +#define SET_FP(regs, val) (GET_FP(regs) = (val)) +#endif + +static inline unsigned long frame_pointer(struct pt_regs *regs) +{ + return GET_FP(regs); +} +static inline void frame_pointer_set(struct pt_regs *regs, + unsigned long val) +{ + SET_FP(regs, val); +} + +#endif /* __ASSEMBLY__ */ + +#endif diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h new file mode 100644 index 000000000..6383d54bf --- /dev/null +++ b/include/asm-generic/qrwlock.h @@ -0,0 +1,166 @@ +/* + * Queue read/write lock + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P. + * + * Authors: Waiman Long <waiman.long@hp.com> + */ +#ifndef __ASM_GENERIC_QRWLOCK_H +#define __ASM_GENERIC_QRWLOCK_H + +#include <linux/atomic.h> +#include <asm/barrier.h> +#include <asm/processor.h> + +#include <asm-generic/qrwlock_types.h> + +/* + * Writer states & reader shift and bias + */ +#define _QW_WAITING 1 /* A writer is waiting */ +#define _QW_LOCKED 0xff /* A writer holds the lock */ +#define _QW_WMASK 0xff /* Writer mask */ +#define _QR_SHIFT 8 /* Reader count shift */ +#define _QR_BIAS (1U << _QR_SHIFT) + +/* + * External function declarations + */ +extern void queue_read_lock_slowpath(struct qrwlock *lock); +extern void queue_write_lock_slowpath(struct qrwlock *lock); + +/** + * queue_read_can_lock- would read_trylock() succeed? + * @lock: Pointer to queue rwlock structure + */ +static inline int queue_read_can_lock(struct qrwlock *lock) +{ + return !(atomic_read(&lock->cnts) & _QW_WMASK); +} + +/** + * queue_write_can_lock- would write_trylock() succeed? + * @lock: Pointer to queue rwlock structure + */ +static inline int queue_write_can_lock(struct qrwlock *lock) +{ + return !atomic_read(&lock->cnts); +} + +/** + * queue_read_trylock - try to acquire read lock of a queue rwlock + * @lock : Pointer to queue rwlock structure + * Return: 1 if lock acquired, 0 if failed + */ +static inline int queue_read_trylock(struct qrwlock *lock) +{ + u32 cnts; + + cnts = atomic_read(&lock->cnts); + if (likely(!(cnts & _QW_WMASK))) { + cnts = (u32)atomic_add_return(_QR_BIAS, &lock->cnts); + if (likely(!(cnts & _QW_WMASK))) + return 1; + atomic_sub(_QR_BIAS, &lock->cnts); + } + return 0; +} + +/** + * queue_write_trylock - try to acquire write lock of a queue rwlock + * @lock : Pointer to queue rwlock structure + * Return: 1 if lock acquired, 0 if failed + */ +static inline int queue_write_trylock(struct qrwlock *lock) +{ + u32 cnts; + + cnts = atomic_read(&lock->cnts); + if (unlikely(cnts)) + return 0; + + return likely(atomic_cmpxchg(&lock->cnts, + cnts, cnts | _QW_LOCKED) == cnts); +} +/** + * queue_read_lock - acquire read lock of a queue rwlock + * @lock: Pointer to queue rwlock structure + */ +static inline void queue_read_lock(struct qrwlock *lock) +{ + u32 cnts; + + cnts = atomic_add_return(_QR_BIAS, &lock->cnts); + if (likely(!(cnts & _QW_WMASK))) + return; + + /* The slowpath will decrement the reader count, if necessary. */ + queue_read_lock_slowpath(lock); +} + +/** + * queue_write_lock - acquire write lock of a queue rwlock + * @lock : Pointer to queue rwlock structure + */ +static inline void queue_write_lock(struct qrwlock *lock) +{ + /* Optimize for the unfair lock case where the fair flag is 0. */ + if (atomic_cmpxchg(&lock->cnts, 0, _QW_LOCKED) == 0) + return; + + queue_write_lock_slowpath(lock); +} + +/** + * queue_read_unlock - release read lock of a queue rwlock + * @lock : Pointer to queue rwlock structure + */ +static inline void queue_read_unlock(struct qrwlock *lock) +{ + /* + * Atomically decrement the reader count + */ + smp_mb__before_atomic(); + atomic_sub(_QR_BIAS, &lock->cnts); +} + +#ifndef queue_write_unlock +/** + * queue_write_unlock - release write lock of a queue rwlock + * @lock : Pointer to queue rwlock structure + */ +static inline void queue_write_unlock(struct qrwlock *lock) +{ + /* + * If the writer field is atomic, it can be cleared directly. + * Otherwise, an atomic subtraction will be used to clear it. + */ + smp_mb__before_atomic(); + atomic_sub(_QW_LOCKED, &lock->cnts); +} +#endif + +/* + * Remapping rwlock architecture specific functions to the corresponding + * queue rwlock functions. + */ +#define arch_read_can_lock(l) queue_read_can_lock(l) +#define arch_write_can_lock(l) queue_write_can_lock(l) +#define arch_read_lock(l) queue_read_lock(l) +#define arch_write_lock(l) queue_write_lock(l) +#define arch_read_trylock(l) queue_read_trylock(l) +#define arch_write_trylock(l) queue_write_trylock(l) +#define arch_read_unlock(l) queue_read_unlock(l) +#define arch_write_unlock(l) queue_write_unlock(l) + +#endif /* __ASM_GENERIC_QRWLOCK_H */ diff --git a/include/asm-generic/qrwlock_types.h b/include/asm-generic/qrwlock_types.h new file mode 100644 index 000000000..4d76f24df --- /dev/null +++ b/include/asm-generic/qrwlock_types.h @@ -0,0 +1,21 @@ +#ifndef __ASM_GENERIC_QRWLOCK_TYPES_H +#define __ASM_GENERIC_QRWLOCK_TYPES_H + +#include <linux/types.h> +#include <asm/spinlock_types.h> + +/* + * The queue read/write lock data structure + */ + +typedef struct qrwlock { + atomic_t cnts; + arch_spinlock_t lock; +} arch_rwlock_t; + +#define __ARCH_RW_LOCK_UNLOCKED { \ + .cnts = ATOMIC_INIT(0), \ + .lock = __ARCH_SPIN_LOCK_UNLOCKED, \ +} + +#endif /* __ASM_GENERIC_QRWLOCK_TYPES_H */ diff --git a/include/asm-generic/resource.h b/include/asm-generic/resource.h new file mode 100644 index 000000000..5e752b959 --- /dev/null +++ b/include/asm-generic/resource.h @@ -0,0 +1,30 @@ +#ifndef _ASM_GENERIC_RESOURCE_H +#define _ASM_GENERIC_RESOURCE_H + +#include <uapi/asm-generic/resource.h> + + +/* + * boot-time rlimit defaults for the init task: + */ +#define INIT_RLIMITS \ +{ \ + [RLIMIT_CPU] = { RLIM_INFINITY, RLIM_INFINITY }, \ + [RLIMIT_FSIZE] = { RLIM_INFINITY, RLIM_INFINITY }, \ + [RLIMIT_DATA] = { RLIM_INFINITY, RLIM_INFINITY }, \ + [RLIMIT_STACK] = { _STK_LIM, RLIM_INFINITY }, \ + [RLIMIT_CORE] = { 0, RLIM_INFINITY }, \ + [RLIMIT_RSS] = { RLIM_INFINITY, RLIM_INFINITY }, \ + [RLIMIT_NPROC] = { 0, 0 }, \ + [RLIMIT_NOFILE] = { INR_OPEN_CUR, INR_OPEN_MAX }, \ + [RLIMIT_MEMLOCK] = { MLOCK_LIMIT, MLOCK_LIMIT }, \ + [RLIMIT_AS] = { RLIM_INFINITY, RLIM_INFINITY }, \ + [RLIMIT_LOCKS] = { RLIM_INFINITY, RLIM_INFINITY }, \ + [RLIMIT_SIGPENDING] = { 0, 0 }, \ + [RLIMIT_MSGQUEUE] = { MQ_BYTES_MAX, MQ_BYTES_MAX }, \ + [RLIMIT_NICE] = { 0, 0 }, \ + [RLIMIT_RTPRIO] = { 0, 0 }, \ + [RLIMIT_RTTIME] = { RLIM_INFINITY, RLIM_INFINITY }, \ +} + +#endif diff --git a/include/asm-generic/rtc.h b/include/asm-generic/rtc.h new file mode 100644 index 000000000..fa86f240c --- /dev/null +++ b/include/asm-generic/rtc.h @@ -0,0 +1,218 @@ +/* + * include/asm-generic/rtc.h + * + * Author: Tom Rini <trini@mvista.com> + * + * Based on: + * drivers/char/rtc.c + * + * Please read the COPYING file for all license details. + */ + +#ifndef __ASM_RTC_H__ +#define __ASM_RTC_H__ + +#include <linux/mc146818rtc.h> +#include <linux/rtc.h> +#include <linux/bcd.h> +#include <linux/delay.h> + +#define RTC_PIE 0x40 /* periodic interrupt enable */ +#define RTC_AIE 0x20 /* alarm interrupt enable */ +#define RTC_UIE 0x10 /* update-finished interrupt enable */ + +/* some dummy definitions */ +#define RTC_BATT_BAD 0x100 /* battery bad */ +#define RTC_SQWE 0x08 /* enable square-wave output */ +#define RTC_DM_BINARY 0x04 /* all time/date values are BCD if clear */ +#define RTC_24H 0x02 /* 24 hour mode - else hours bit 7 means pm */ +#define RTC_DST_EN 0x01 /* auto switch DST - works f. USA only */ + +/* + * Returns true if a clock update is in progress + */ +static inline unsigned char rtc_is_updating(void) +{ + unsigned char uip; + unsigned long flags; + + spin_lock_irqsave(&rtc_lock, flags); + uip = (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP); + spin_unlock_irqrestore(&rtc_lock, flags); + return uip; +} + +static inline unsigned int __get_rtc_time(struct rtc_time *time) +{ + unsigned char ctrl; + unsigned long flags; + +#ifdef CONFIG_MACH_DECSTATION + unsigned int real_year; +#endif + + /* + * read RTC once any update in progress is done. The update + * can take just over 2ms. We wait 20ms. There is no need to + * to poll-wait (up to 1s - eeccch) for the falling edge of RTC_UIP. + * If you need to know *exactly* when a second has started, enable + * periodic update complete interrupts, (via ioctl) and then + * immediately read /dev/rtc which will block until you get the IRQ. + * Once the read clears, read the RTC time (again via ioctl). Easy. + */ + if (rtc_is_updating()) + mdelay(20); + + /* + * Only the values that we read from the RTC are set. We leave + * tm_wday, tm_yday and tm_isdst untouched. Even though the + * RTC has RTC_DAY_OF_WEEK, we ignore it, as it is only updated + * by the RTC when initially set to a non-zero value. + */ + spin_lock_irqsave(&rtc_lock, flags); + time->tm_sec = CMOS_READ(RTC_SECONDS); + time->tm_min = CMOS_READ(RTC_MINUTES); + time->tm_hour = CMOS_READ(RTC_HOURS); + time->tm_mday = CMOS_READ(RTC_DAY_OF_MONTH); + time->tm_mon = CMOS_READ(RTC_MONTH); + time->tm_year = CMOS_READ(RTC_YEAR); +#ifdef CONFIG_MACH_DECSTATION + real_year = CMOS_READ(RTC_DEC_YEAR); +#endif + ctrl = CMOS_READ(RTC_CONTROL); + spin_unlock_irqrestore(&rtc_lock, flags); + + if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) + { + time->tm_sec = bcd2bin(time->tm_sec); + time->tm_min = bcd2bin(time->tm_min); + time->tm_hour = bcd2bin(time->tm_hour); + time->tm_mday = bcd2bin(time->tm_mday); + time->tm_mon = bcd2bin(time->tm_mon); + time->tm_year = bcd2bin(time->tm_year); + } + +#ifdef CONFIG_MACH_DECSTATION + time->tm_year += real_year - 72; +#endif + + /* + * Account for differences between how the RTC uses the values + * and how they are defined in a struct rtc_time; + */ + if (time->tm_year <= 69) + time->tm_year += 100; + + time->tm_mon--; + + return RTC_24H; +} + +#ifndef get_rtc_time +#define get_rtc_time __get_rtc_time +#endif + +/* Set the current date and time in the real time clock. */ +static inline int __set_rtc_time(struct rtc_time *time) +{ + unsigned long flags; + unsigned char mon, day, hrs, min, sec; + unsigned char save_control, save_freq_select; + unsigned int yrs; +#ifdef CONFIG_MACH_DECSTATION + unsigned int real_yrs, leap_yr; +#endif + + yrs = time->tm_year; + mon = time->tm_mon + 1; /* tm_mon starts at zero */ + day = time->tm_mday; + hrs = time->tm_hour; + min = time->tm_min; + sec = time->tm_sec; + + if (yrs > 255) /* They are unsigned */ + return -EINVAL; + + spin_lock_irqsave(&rtc_lock, flags); +#ifdef CONFIG_MACH_DECSTATION + real_yrs = yrs; + leap_yr = ((!((yrs + 1900) % 4) && ((yrs + 1900) % 100)) || + !((yrs + 1900) % 400)); + yrs = 72; + + /* + * We want to keep the year set to 73 until March + * for non-leap years, so that Feb, 29th is handled + * correctly. + */ + if (!leap_yr && mon < 3) { + real_yrs--; + yrs = 73; + } +#endif + /* These limits and adjustments are independent of + * whether the chip is in binary mode or not. + */ + if (yrs > 169) { + spin_unlock_irqrestore(&rtc_lock, flags); + return -EINVAL; + } + + if (yrs >= 100) + yrs -= 100; + + if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) + || RTC_ALWAYS_BCD) { + sec = bin2bcd(sec); + min = bin2bcd(min); + hrs = bin2bcd(hrs); + day = bin2bcd(day); + mon = bin2bcd(mon); + yrs = bin2bcd(yrs); + } + + save_control = CMOS_READ(RTC_CONTROL); + CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); + save_freq_select = CMOS_READ(RTC_FREQ_SELECT); + CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); + +#ifdef CONFIG_MACH_DECSTATION + CMOS_WRITE(real_yrs, RTC_DEC_YEAR); +#endif + CMOS_WRITE(yrs, RTC_YEAR); + CMOS_WRITE(mon, RTC_MONTH); + CMOS_WRITE(day, RTC_DAY_OF_MONTH); + CMOS_WRITE(hrs, RTC_HOURS); + CMOS_WRITE(min, RTC_MINUTES); + CMOS_WRITE(sec, RTC_SECONDS); + + CMOS_WRITE(save_control, RTC_CONTROL); + CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); + + spin_unlock_irqrestore(&rtc_lock, flags); + + return 0; +} + +#ifndef set_rtc_time +#define set_rtc_time __set_rtc_time +#endif + +static inline unsigned int get_rtc_ss(void) +{ + struct rtc_time h; + + get_rtc_time(&h); + return h.tm_sec; +} + +static inline int get_rtc_pll(struct rtc_pll_info *pll) +{ + return -EINVAL; +} +static inline int set_rtc_pll(struct rtc_pll_info *pll) +{ + return -EINVAL; +} + +#endif /* __ASM_RTC_H__ */ diff --git a/include/asm-generic/rwsem.h b/include/asm-generic/rwsem.h new file mode 100644 index 000000000..d48bf5a95 --- /dev/null +++ b/include/asm-generic/rwsem.h @@ -0,0 +1,132 @@ +#ifndef _ASM_GENERIC_RWSEM_H +#define _ASM_GENERIC_RWSEM_H + +#ifndef _LINUX_RWSEM_H +#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead." +#endif + +#ifdef __KERNEL__ + +/* + * R/W semaphores originally for PPC using the stuff in lib/rwsem.c. + * Adapted largely from include/asm-i386/rwsem.h + * by Paul Mackerras <paulus@samba.org>. + */ + +/* + * the semaphore definition + */ +#ifdef CONFIG_64BIT +# define RWSEM_ACTIVE_MASK 0xffffffffL +#else +# define RWSEM_ACTIVE_MASK 0x0000ffffL +#endif + +#define RWSEM_UNLOCKED_VALUE 0x00000000L +#define RWSEM_ACTIVE_BIAS 0x00000001L +#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1) +#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS +#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) + +/* + * lock for reading + */ +static inline void __down_read(struct rw_semaphore *sem) +{ + if (unlikely(atomic_long_inc_return((atomic_long_t *)&sem->count) <= 0)) + rwsem_down_read_failed(sem); +} + +static inline int __down_read_trylock(struct rw_semaphore *sem) +{ + long tmp; + + while ((tmp = sem->count) >= 0) { + if (tmp == cmpxchg(&sem->count, tmp, + tmp + RWSEM_ACTIVE_READ_BIAS)) { + return 1; + } + } + return 0; +} + +/* + * lock for writing + */ +static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) +{ + long tmp; + + tmp = atomic_long_add_return(RWSEM_ACTIVE_WRITE_BIAS, + (atomic_long_t *)&sem->count); + if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS)) + rwsem_down_write_failed(sem); +} + +static inline void __down_write(struct rw_semaphore *sem) +{ + __down_write_nested(sem, 0); +} + +static inline int __down_write_trylock(struct rw_semaphore *sem) +{ + long tmp; + + tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, + RWSEM_ACTIVE_WRITE_BIAS); + return tmp == RWSEM_UNLOCKED_VALUE; +} + +/* + * unlock after reading + */ +static inline void __up_read(struct rw_semaphore *sem) +{ + long tmp; + + tmp = atomic_long_dec_return((atomic_long_t *)&sem->count); + if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)) + rwsem_wake(sem); +} + +/* + * unlock after writing + */ +static inline void __up_write(struct rw_semaphore *sem) +{ + if (unlikely(atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS, + (atomic_long_t *)&sem->count) < 0)) + rwsem_wake(sem); +} + +/* + * implement atomic add functionality + */ +static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem) +{ + atomic_long_add(delta, (atomic_long_t *)&sem->count); +} + +/* + * downgrade write lock to read lock + */ +static inline void __downgrade_write(struct rw_semaphore *sem) +{ + long tmp; + + tmp = atomic_long_add_return(-RWSEM_WAITING_BIAS, + (atomic_long_t *)&sem->count); + if (tmp < 0) + rwsem_downgrade_wake(sem); +} + +/* + * implement exchange and add functionality + */ +static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem) +{ + return atomic_long_add_return(delta, (atomic_long_t *)&sem->count); +} + +#endif /* __KERNEL__ */ +#endif /* _ASM_GENERIC_RWSEM_H */ diff --git a/include/asm-generic/scatterlist.h b/include/asm-generic/scatterlist.h new file mode 100644 index 000000000..5de07355f --- /dev/null +++ b/include/asm-generic/scatterlist.h @@ -0,0 +1,34 @@ +#ifndef __ASM_GENERIC_SCATTERLIST_H +#define __ASM_GENERIC_SCATTERLIST_H + +#include <linux/types.h> + +struct scatterlist { +#ifdef CONFIG_DEBUG_SG + unsigned long sg_magic; +#endif + unsigned long page_link; + unsigned int offset; + unsigned int length; + dma_addr_t dma_address; +#ifdef CONFIG_NEED_SG_DMA_LENGTH + unsigned int dma_length; +#endif +}; + +/* + * These macros should be used after a dma_map_sg call has been done + * to get bus addresses of each of the SG entries and their lengths. + * You should only work with the number of sg entries pci_map_sg + * returns, or alternatively stop on the first sg_dma_len(sg) which + * is 0. + */ +#define sg_dma_address(sg) ((sg)->dma_address) + +#ifdef CONFIG_NEED_SG_DMA_LENGTH +#define sg_dma_len(sg) ((sg)->dma_length) +#else +#define sg_dma_len(sg) ((sg)->length) +#endif + +#endif /* __ASM_GENERIC_SCATTERLIST_H */ diff --git a/include/asm-generic/seccomp.h b/include/asm-generic/seccomp.h new file mode 100644 index 000000000..c9ccafa0d --- /dev/null +++ b/include/asm-generic/seccomp.h @@ -0,0 +1,32 @@ +/* + * include/asm-generic/seccomp.h + * + * Copyright (C) 2014 Linaro Limited + * Author: AKASHI Takahiro <takahiro.akashi@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _ASM_GENERIC_SECCOMP_H +#define _ASM_GENERIC_SECCOMP_H + +#include <linux/unistd.h> + +#if defined(CONFIG_COMPAT) && !defined(__NR_seccomp_read_32) +#define __NR_seccomp_read_32 __NR_read +#define __NR_seccomp_write_32 __NR_write +#define __NR_seccomp_exit_32 __NR_exit +#ifndef __NR_seccomp_sigreturn_32 +#define __NR_seccomp_sigreturn_32 __NR_rt_sigreturn +#endif +#endif /* CONFIG_COMPAT && ! already defined */ + +#define __NR_seccomp_read __NR_read +#define __NR_seccomp_write __NR_write +#define __NR_seccomp_exit __NR_exit +#ifndef __NR_seccomp_sigreturn +#define __NR_seccomp_sigreturn __NR_rt_sigreturn +#endif + +#endif /* _ASM_GENERIC_SECCOMP_H */ diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h new file mode 100644 index 000000000..b58fd667f --- /dev/null +++ b/include/asm-generic/sections.h @@ -0,0 +1,66 @@ +#ifndef _ASM_GENERIC_SECTIONS_H_ +#define _ASM_GENERIC_SECTIONS_H_ + +/* References to section boundaries */ + +#include <linux/compiler.h> + +/* + * Usage guidelines: + * _text, _data: architecture specific, don't use them in arch-independent code + * [_stext, _etext]: contains .text.* sections, may also contain .rodata.* + * and/or .init.* sections + * [_sdata, _edata]: contains .data.* sections, may also contain .rodata.* + * and/or .init.* sections. + * [__start_rodata, __end_rodata]: contains .rodata.* sections + * [__init_begin, __init_end]: contains .init.* sections, but .init.text.* + * may be out of this range on some architectures. + * [_sinittext, _einittext]: contains .init.text.* sections + * [__bss_start, __bss_stop]: contains BSS sections + * + * Following global variables are optional and may be unavailable on some + * architectures and/or kernel configurations. + * _text, _data + * __kprobes_text_start, __kprobes_text_end + * __entry_text_start, __entry_text_end + * __ctors_start, __ctors_end + */ +extern char _text[], _stext[], _etext[]; +extern char _data[], _sdata[], _edata[]; +extern char __bss_start[], __bss_stop[]; +extern char __init_begin[], __init_end[]; +extern char _sinittext[], _einittext[]; +extern char _end[]; +extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[]; +extern char __kprobes_text_start[], __kprobes_text_end[]; +extern char __entry_text_start[], __entry_text_end[]; +extern char __start_rodata[], __end_rodata[]; + +/* Start and end of .ctors section - used for constructor calls. */ +extern char __ctors_start[], __ctors_end[]; + +extern __visible const void __nosave_begin, __nosave_end; + +/* function descriptor handling (if any). Override + * in asm/sections.h */ +#ifndef dereference_function_descriptor +#define dereference_function_descriptor(p) (p) +#endif + +/* random extra sections (if any). Override + * in asm/sections.h */ +#ifndef arch_is_kernel_text +static inline int arch_is_kernel_text(unsigned long addr) +{ + return 0; +} +#endif + +#ifndef arch_is_kernel_data +static inline int arch_is_kernel_data(unsigned long addr) +{ + return 0; +} +#endif + +#endif /* _ASM_GENERIC_SECTIONS_H_ */ diff --git a/include/asm-generic/segment.h b/include/asm-generic/segment.h new file mode 100644 index 000000000..5580eace6 --- /dev/null +++ b/include/asm-generic/segment.h @@ -0,0 +1,9 @@ +#ifndef __ASM_GENERIC_SEGMENT_H +#define __ASM_GENERIC_SEGMENT_H +/* + * Only here because we have some old header files that expect it... + * + * New architectures probably don't want to have their own version. + */ + +#endif /* __ASM_GENERIC_SEGMENT_H */ diff --git a/include/asm-generic/serial.h b/include/asm-generic/serial.h new file mode 100644 index 000000000..5e291090f --- /dev/null +++ b/include/asm-generic/serial.h @@ -0,0 +1,13 @@ +#ifndef __ASM_GENERIC_SERIAL_H +#define __ASM_GENERIC_SERIAL_H + +/* + * This should not be an architecture specific #define, oh well. + * + * Traditionally, it just describes i8250 and related serial ports + * that have this clock rate. + */ + +#define BASE_BAUD (1843200 / 16) + +#endif /* __ASM_GENERIC_SERIAL_H */ diff --git a/include/asm-generic/siginfo.h b/include/asm-generic/siginfo.h new file mode 100644 index 000000000..3d1a3af5c --- /dev/null +++ b/include/asm-generic/siginfo.h @@ -0,0 +1,37 @@ +#ifndef _ASM_GENERIC_SIGINFO_H +#define _ASM_GENERIC_SIGINFO_H + +#include <uapi/asm-generic/siginfo.h> + +#define __SI_MASK 0xffff0000u +#define __SI_KILL (0 << 16) +#define __SI_TIMER (1 << 16) +#define __SI_POLL (2 << 16) +#define __SI_FAULT (3 << 16) +#define __SI_CHLD (4 << 16) +#define __SI_RT (5 << 16) +#define __SI_MESGQ (6 << 16) +#define __SI_SYS (7 << 16) +#define __SI_CODE(T,N) ((T) | ((N) & 0xffff)) + +struct siginfo; +void do_schedule_next_timer(struct siginfo *info); + +#ifndef HAVE_ARCH_COPY_SIGINFO + +#include <linux/string.h> + +static inline void copy_siginfo(struct siginfo *to, struct siginfo *from) +{ + if (from->si_code < 0) + memcpy(to, from, sizeof(*to)); + else + /* _sigchld is currently the largest know union member */ + memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld)); +} + +#endif + +extern int copy_siginfo_to_user(struct siginfo __user *to, const struct siginfo *from); + +#endif diff --git a/include/asm-generic/signal.h b/include/asm-generic/signal.h new file mode 100644 index 000000000..d840c90a1 --- /dev/null +++ b/include/asm-generic/signal.h @@ -0,0 +1,14 @@ +#ifndef __ASM_GENERIC_SIGNAL_H +#define __ASM_GENERIC_SIGNAL_H + +#include <uapi/asm-generic/signal.h> + +#ifndef __ASSEMBLY__ +#ifdef SA_RESTORER +#endif + +#include <asm/sigcontext.h> +#undef __HAVE_ARCH_SIG_BITOPS + +#endif /* __ASSEMBLY__ */ +#endif /* _ASM_GENERIC_SIGNAL_H */ diff --git a/include/asm-generic/simd.h b/include/asm-generic/simd.h new file mode 100644 index 000000000..f57eb7b5c --- /dev/null +++ b/include/asm-generic/simd.h @@ -0,0 +1,14 @@ + +#include <linux/hardirq.h> + +/* + * may_use_simd - whether it is allowable at this time to issue SIMD + * instructions or access the SIMD register file + * + * As architectures typically don't preserve the SIMD register file when + * taking an interrupt, !in_interrupt() should be a reasonable default. + */ +static __must_check inline bool may_use_simd(void) +{ + return !in_interrupt(); +} diff --git a/include/asm-generic/sizes.h b/include/asm-generic/sizes.h new file mode 100644 index 000000000..1dcfad962 --- /dev/null +++ b/include/asm-generic/sizes.h @@ -0,0 +1,2 @@ +/* This is a placeholder, to be removed over time */ +#include <linux/sizes.h> diff --git a/include/asm-generic/spinlock.h b/include/asm-generic/spinlock.h new file mode 100644 index 000000000..1547a03ac --- /dev/null +++ b/include/asm-generic/spinlock.h @@ -0,0 +1,11 @@ +#ifndef __ASM_GENERIC_SPINLOCK_H +#define __ASM_GENERIC_SPINLOCK_H +/* + * You need to implement asm/spinlock.h for SMP support. The generic + * version does not handle SMP. + */ +#ifdef CONFIG_SMP +#error need an architecture specific asm/spinlock.h +#endif + +#endif /* __ASM_GENERIC_SPINLOCK_H */ diff --git a/include/asm-generic/statfs.h b/include/asm-generic/statfs.h new file mode 100644 index 000000000..4b934e9ec --- /dev/null +++ b/include/asm-generic/statfs.h @@ -0,0 +1,7 @@ +#ifndef _GENERIC_STATFS_H +#define _GENERIC_STATFS_H + +#include <uapi/asm-generic/statfs.h> + +typedef __kernel_fsid_t fsid_t; +#endif diff --git a/include/asm-generic/string.h b/include/asm-generic/string.h new file mode 100644 index 000000000..de5e02014 --- /dev/null +++ b/include/asm-generic/string.h @@ -0,0 +1,10 @@ +#ifndef __ASM_GENERIC_STRING_H +#define __ASM_GENERIC_STRING_H +/* + * The kernel provides all required functions in lib/string.c + * + * Architectures probably want to provide at least their own optimized + * memcpy and memset functions though. + */ + +#endif /* __ASM_GENERIC_STRING_H */ diff --git a/include/asm-generic/switch_to.h b/include/asm-generic/switch_to.h new file mode 100644 index 000000000..052c4ac04 --- /dev/null +++ b/include/asm-generic/switch_to.h @@ -0,0 +1,30 @@ +/* Generic task switch macro wrapper, based on MN10300 definitions. + * + * It should be possible to use these on really simple architectures, + * but it serves more as a starting point for new ports. + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +#ifndef __ASM_GENERIC_SWITCH_TO_H +#define __ASM_GENERIC_SWITCH_TO_H + +#include <linux/thread_info.h> + +/* + * Context switching is now performed out-of-line in switch_to.S + */ +extern struct task_struct *__switch_to(struct task_struct *, + struct task_struct *); + +#define switch_to(prev, next, last) \ + do { \ + ((last) = __switch_to((prev), (next))); \ + } while (0) + +#endif /* __ASM_GENERIC_SWITCH_TO_H */ diff --git a/include/asm-generic/syscall.h b/include/asm-generic/syscall.h new file mode 100644 index 000000000..0c938a435 --- /dev/null +++ b/include/asm-generic/syscall.h @@ -0,0 +1,157 @@ +/* + * Access to user system call parameters and results + * + * Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved. + * + * This copyrighted material is made available to anyone wishing to use, + * modify, copy, or redistribute it subject to the terms and conditions + * of the GNU General Public License v.2. + * + * This file is a stub providing documentation for what functions + * asm-ARCH/syscall.h files need to define. Most arch definitions + * will be simple inlines. + * + * All of these functions expect to be called with no locks, + * and only when the caller is sure that the task of interest + * cannot return to user mode while we are looking at it. + */ + +#ifndef _ASM_SYSCALL_H +#define _ASM_SYSCALL_H 1 + +struct task_struct; +struct pt_regs; + +/** + * syscall_get_nr - find what system call a task is executing + * @task: task of interest, must be blocked + * @regs: task_pt_regs() of @task + * + * If @task is executing a system call or is at system call + * tracing about to attempt one, returns the system call number. + * If @task is not executing a system call, i.e. it's blocked + * inside the kernel for a fault or signal, returns -1. + * + * Note this returns int even on 64-bit machines. Only 32 bits of + * system call number can be meaningful. If the actual arch value + * is 64 bits, this truncates to 32 bits so 0xffffffff means -1. + * + * It's only valid to call this when @task is known to be blocked. + */ +int syscall_get_nr(struct task_struct *task, struct pt_regs *regs); + +/** + * syscall_rollback - roll back registers after an aborted system call + * @task: task of interest, must be in system call exit tracing + * @regs: task_pt_regs() of @task + * + * It's only valid to call this when @task is stopped for system + * call exit tracing (due to TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT), + * after tracehook_report_syscall_entry() returned nonzero to prevent + * the system call from taking place. + * + * This rolls back the register state in @regs so it's as if the + * system call instruction was a no-op. The registers containing + * the system call number and arguments are as they were before the + * system call instruction. This may not be the same as what the + * register state looked like at system call entry tracing. + */ +void syscall_rollback(struct task_struct *task, struct pt_regs *regs); + +/** + * syscall_get_error - check result of traced system call + * @task: task of interest, must be blocked + * @regs: task_pt_regs() of @task + * + * Returns 0 if the system call succeeded, or -ERRORCODE if it failed. + * + * It's only valid to call this when @task is stopped for tracing on exit + * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. + */ +long syscall_get_error(struct task_struct *task, struct pt_regs *regs); + +/** + * syscall_get_return_value - get the return value of a traced system call + * @task: task of interest, must be blocked + * @regs: task_pt_regs() of @task + * + * Returns the return value of the successful system call. + * This value is meaningless if syscall_get_error() returned nonzero. + * + * It's only valid to call this when @task is stopped for tracing on exit + * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. + */ +long syscall_get_return_value(struct task_struct *task, struct pt_regs *regs); + +/** + * syscall_set_return_value - change the return value of a traced system call + * @task: task of interest, must be blocked + * @regs: task_pt_regs() of @task + * @error: negative error code, or zero to indicate success + * @val: user return value if @error is zero + * + * This changes the results of the system call that user mode will see. + * If @error is zero, the user sees a successful system call with a + * return value of @val. If @error is nonzero, it's a negated errno + * code; the user sees a failed system call with this errno code. + * + * It's only valid to call this when @task is stopped for tracing on exit + * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. + */ +void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, + int error, long val); + +/** + * syscall_get_arguments - extract system call parameter values + * @task: task of interest, must be blocked + * @regs: task_pt_regs() of @task + * @i: argument index [0,5] + * @n: number of arguments; n+i must be [1,6]. + * @args: array filled with argument values + * + * Fetches @n arguments to the system call starting with the @i'th argument + * (from 0 through 5). Argument @i is stored in @args[0], and so on. + * An arch inline version is probably optimal when @i and @n are constants. + * + * It's only valid to call this when @task is stopped for tracing on + * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. + * It's invalid to call this with @i + @n > 6; we only support system calls + * taking up to 6 arguments. + */ +void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, + unsigned int i, unsigned int n, unsigned long *args); + +/** + * syscall_set_arguments - change system call parameter value + * @task: task of interest, must be in system call entry tracing + * @regs: task_pt_regs() of @task + * @i: argument index [0,5] + * @n: number of arguments; n+i must be [1,6]. + * @args: array of argument values to store + * + * Changes @n arguments to the system call starting with the @i'th argument. + * Argument @i gets value @args[0], and so on. + * An arch inline version is probably optimal when @i and @n are constants. + * + * It's only valid to call this when @task is stopped for tracing on + * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. + * It's invalid to call this with @i + @n > 6; we only support system calls + * taking up to 6 arguments. + */ +void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, + unsigned int i, unsigned int n, + const unsigned long *args); + +/** + * syscall_get_arch - return the AUDIT_ARCH for the current system call + * + * Returns the AUDIT_ARCH_* based on the system call convention in use. + * + * It's only valid to call this when current is stopped on entry to a system + * call, due to %TIF_SYSCALL_TRACE, %TIF_SYSCALL_AUDIT, or %TIF_SECCOMP. + * + * Architectures which permit CONFIG_HAVE_ARCH_SECCOMP_FILTER must + * provide an implementation of this. + */ +int syscall_get_arch(void); +#endif /* _ASM_SYSCALL_H */ diff --git a/include/asm-generic/syscalls.h b/include/asm-generic/syscalls.h new file mode 100644 index 000000000..1f74be511 --- /dev/null +++ b/include/asm-generic/syscalls.h @@ -0,0 +1,28 @@ +#ifndef __ASM_GENERIC_SYSCALLS_H +#define __ASM_GENERIC_SYSCALLS_H + +#include <linux/compiler.h> +#include <linux/linkage.h> + +/* + * Calling conventions for these system calls can differ, so + * it's possible to override them. + */ + +#ifndef sys_mmap2 +asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, + unsigned long fd, unsigned long pgoff); +#endif + +#ifndef sys_mmap +asmlinkage long sys_mmap(unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, + unsigned long fd, off_t pgoff); +#endif + +#ifndef sys_rt_sigreturn +asmlinkage long sys_rt_sigreturn(struct pt_regs *regs); +#endif + +#endif /* __ASM_GENERIC_SYSCALLS_H */ diff --git a/include/asm-generic/termios-base.h b/include/asm-generic/termios-base.h new file mode 100644 index 000000000..0a769feb2 --- /dev/null +++ b/include/asm-generic/termios-base.h @@ -0,0 +1,77 @@ +/* termios.h: generic termios/termio user copying/translation + */ + +#ifndef _ASM_GENERIC_TERMIOS_BASE_H +#define _ASM_GENERIC_TERMIOS_BASE_H + +#include <asm/uaccess.h> + +#ifndef __ARCH_TERMIO_GETPUT + +/* + * Translate a "termio" structure into a "termios". Ugh. + */ +static inline int user_termio_to_kernel_termios(struct ktermios *termios, + struct termio __user *termio) +{ + unsigned short tmp; + + if (get_user(tmp, &termio->c_iflag) < 0) + goto fault; + termios->c_iflag = (0xffff0000 & termios->c_iflag) | tmp; + + if (get_user(tmp, &termio->c_oflag) < 0) + goto fault; + termios->c_oflag = (0xffff0000 & termios->c_oflag) | tmp; + + if (get_user(tmp, &termio->c_cflag) < 0) + goto fault; + termios->c_cflag = (0xffff0000 & termios->c_cflag) | tmp; + + if (get_user(tmp, &termio->c_lflag) < 0) + goto fault; + termios->c_lflag = (0xffff0000 & termios->c_lflag) | tmp; + + if (get_user(termios->c_line, &termio->c_line) < 0) + goto fault; + + if (copy_from_user(termios->c_cc, termio->c_cc, NCC) != 0) + goto fault; + + return 0; + + fault: + return -EFAULT; +} + +/* + * Translate a "termios" structure into a "termio". Ugh. + */ +static inline int kernel_termios_to_user_termio(struct termio __user *termio, + struct ktermios *termios) +{ + if (put_user(termios->c_iflag, &termio->c_iflag) < 0 || + put_user(termios->c_oflag, &termio->c_oflag) < 0 || + put_user(termios->c_cflag, &termio->c_cflag) < 0 || + put_user(termios->c_lflag, &termio->c_lflag) < 0 || + put_user(termios->c_line, &termio->c_line) < 0 || + copy_to_user(termio->c_cc, termios->c_cc, NCC) != 0) + return -EFAULT; + + return 0; +} + +#ifndef user_termios_to_kernel_termios +#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios)) +#endif + +#ifndef kernel_termios_to_user_termios +#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios)) +#endif + +#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios)) +#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios)) + +#endif /* __ARCH_TERMIO_GETPUT */ + +#endif /* _ASM_GENERIC_TERMIOS_BASE_H */ diff --git a/include/asm-generic/termios.h b/include/asm-generic/termios.h new file mode 100644 index 000000000..4fa6fe0fc --- /dev/null +++ b/include/asm-generic/termios.h @@ -0,0 +1,107 @@ +#ifndef _ASM_GENERIC_TERMIOS_H +#define _ASM_GENERIC_TERMIOS_H + + +#include <asm/uaccess.h> +#include <uapi/asm-generic/termios.h> + +/* intr=^C quit=^\ erase=del kill=^U + eof=^D vtime=\0 vmin=\1 sxtc=\0 + start=^Q stop=^S susp=^Z eol=\0 + reprint=^R discard=^U werase=^W lnext=^V + eol2=\0 +*/ +#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0" + +/* + * Translate a "termio" structure into a "termios". Ugh. + */ +static inline int user_termio_to_kernel_termios(struct ktermios *termios, + const struct termio __user *termio) +{ + unsigned short tmp; + + if (get_user(tmp, &termio->c_iflag) < 0) + goto fault; + termios->c_iflag = (0xffff0000 & termios->c_iflag) | tmp; + + if (get_user(tmp, &termio->c_oflag) < 0) + goto fault; + termios->c_oflag = (0xffff0000 & termios->c_oflag) | tmp; + + if (get_user(tmp, &termio->c_cflag) < 0) + goto fault; + termios->c_cflag = (0xffff0000 & termios->c_cflag) | tmp; + + if (get_user(tmp, &termio->c_lflag) < 0) + goto fault; + termios->c_lflag = (0xffff0000 & termios->c_lflag) | tmp; + + if (get_user(termios->c_line, &termio->c_line) < 0) + goto fault; + + if (copy_from_user(termios->c_cc, termio->c_cc, NCC) != 0) + goto fault; + + return 0; + + fault: + return -EFAULT; +} + +/* + * Translate a "termios" structure into a "termio". Ugh. + */ +static inline int kernel_termios_to_user_termio(struct termio __user *termio, + struct ktermios *termios) +{ + if (put_user(termios->c_iflag, &termio->c_iflag) < 0 || + put_user(termios->c_oflag, &termio->c_oflag) < 0 || + put_user(termios->c_cflag, &termio->c_cflag) < 0 || + put_user(termios->c_lflag, &termio->c_lflag) < 0 || + put_user(termios->c_line, &termio->c_line) < 0 || + copy_to_user(termio->c_cc, termios->c_cc, NCC) != 0) + return -EFAULT; + + return 0; +} + +#ifdef TCGETS2 +static inline int user_termios_to_kernel_termios(struct ktermios *k, + struct termios2 __user *u) +{ + return copy_from_user(k, u, sizeof(struct termios2)); +} + +static inline int kernel_termios_to_user_termios(struct termios2 __user *u, + struct ktermios *k) +{ + return copy_to_user(u, k, sizeof(struct termios2)); +} + +static inline int user_termios_to_kernel_termios_1(struct ktermios *k, + struct termios __user *u) +{ + return copy_from_user(k, u, sizeof(struct termios)); +} + +static inline int kernel_termios_to_user_termios_1(struct termios __user *u, + struct ktermios *k) +{ + return copy_to_user(u, k, sizeof(struct termios)); +} +#else /* TCGETS2 */ +static inline int user_termios_to_kernel_termios(struct ktermios *k, + struct termios __user *u) +{ + return copy_from_user(k, u, sizeof(struct termios)); +} + +static inline int kernel_termios_to_user_termios(struct termios __user *u, + struct ktermios *k) +{ + return copy_to_user(u, k, sizeof(struct termios)); +} +#endif /* TCGETS2 */ + +#endif /* _ASM_GENERIC_TERMIOS_H */ diff --git a/include/asm-generic/timex.h b/include/asm-generic/timex.h new file mode 100644 index 000000000..b2243cb8d --- /dev/null +++ b/include/asm-generic/timex.h @@ -0,0 +1,22 @@ +#ifndef __ASM_GENERIC_TIMEX_H +#define __ASM_GENERIC_TIMEX_H + +/* + * If you have a cycle counter, return the value here. + */ +typedef unsigned long cycles_t; +#ifndef get_cycles +static inline cycles_t get_cycles(void) +{ + return 0; +} +#endif + +/* + * Architectures are encouraged to implement read_current_timer + * and define this in order to avoid the expensive delay loop + * calibration during boot. + */ +#undef ARCH_HAS_READ_CURRENT_TIMER + +#endif /* __ASM_GENERIC_TIMEX_H */ diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h new file mode 100644 index 000000000..db284bff2 --- /dev/null +++ b/include/asm-generic/tlb.h @@ -0,0 +1,221 @@ +/* include/asm-generic/tlb.h + * + * Generic TLB shootdown code + * + * Copyright 2001 Red Hat, Inc. + * Based on code from mm/memory.c Copyright Linus Torvalds and others. + * + * Copyright 2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _ASM_GENERIC__TLB_H +#define _ASM_GENERIC__TLB_H + +#include <linux/swap.h> +#include <asm/pgalloc.h> +#include <asm/tlbflush.h> + +#ifdef CONFIG_HAVE_RCU_TABLE_FREE +/* + * Semi RCU freeing of the page directories. + * + * This is needed by some architectures to implement software pagetable walkers. + * + * gup_fast() and other software pagetable walkers do a lockless page-table + * walk and therefore needs some synchronization with the freeing of the page + * directories. The chosen means to accomplish that is by disabling IRQs over + * the walk. + * + * Architectures that use IPIs to flush TLBs will then automagically DTRT, + * since we unlink the page, flush TLBs, free the page. Since the disabling of + * IRQs delays the completion of the TLB flush we can never observe an already + * freed page. + * + * Architectures that do not have this (PPC) need to delay the freeing by some + * other means, this is that means. + * + * What we do is batch the freed directory pages (tables) and RCU free them. + * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling + * holds off grace periods. + * + * However, in order to batch these pages we need to allocate storage, this + * allocation is deep inside the MM code and can thus easily fail on memory + * pressure. To guarantee progress we fall back to single table freeing, see + * the implementation of tlb_remove_table_one(). + * + */ +struct mmu_table_batch { + struct rcu_head rcu; + unsigned int nr; + void *tables[0]; +}; + +#define MAX_TABLE_BATCH \ + ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *)) + +extern void tlb_table_flush(struct mmu_gather *tlb); +extern void tlb_remove_table(struct mmu_gather *tlb, void *table); + +#endif + +/* + * If we can't allocate a page to make a big batch of page pointers + * to work on, then just handle a few from the on-stack structure. + */ +#define MMU_GATHER_BUNDLE 8 + +struct mmu_gather_batch { + struct mmu_gather_batch *next; + unsigned int nr; + unsigned int max; + struct page *pages[0]; +}; + +#define MAX_GATHER_BATCH \ + ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *)) + +/* + * Limit the maximum number of mmu_gather batches to reduce a risk of soft + * lockups for non-preemptible kernels on huge machines when a lot of memory + * is zapped during unmapping. + * 10K pages freed at once should be safe even without a preemption point. + */ +#define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH) + +/* struct mmu_gather is an opaque type used by the mm code for passing around + * any data needed by arch specific code for tlb_remove_page. + */ +struct mmu_gather { + struct mm_struct *mm; +#ifdef CONFIG_HAVE_RCU_TABLE_FREE + struct mmu_table_batch *batch; +#endif + unsigned long start; + unsigned long end; + /* we are in the middle of an operation to clear + * a full mm and can make some optimizations */ + unsigned int fullmm : 1, + /* we have performed an operation which + * requires a complete flush of the tlb */ + need_flush_all : 1; + + struct mmu_gather_batch *active; + struct mmu_gather_batch local; + struct page *__pages[MMU_GATHER_BUNDLE]; + unsigned int batch_count; +}; + +#define HAVE_GENERIC_MMU_GATHER + +void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end); +void tlb_flush_mmu(struct mmu_gather *tlb); +void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, + unsigned long end); +int __tlb_remove_page(struct mmu_gather *tlb, struct page *page); + +/* tlb_remove_page + * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when + * required. + */ +static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) +{ + if (!__tlb_remove_page(tlb, page)) + tlb_flush_mmu(tlb); +} + +static inline void __tlb_adjust_range(struct mmu_gather *tlb, + unsigned long address) +{ + tlb->start = min(tlb->start, address); + tlb->end = max(tlb->end, address + PAGE_SIZE); +} + +static inline void __tlb_reset_range(struct mmu_gather *tlb) +{ + if (tlb->fullmm) { + tlb->start = tlb->end = ~0; + } else { + tlb->start = TASK_SIZE; + tlb->end = 0; + } +} + +/* + * In the case of tlb vma handling, we can optimise these away in the + * case where we're doing a full MM flush. When we're doing a munmap, + * the vmas are adjusted to only cover the region to be torn down. + */ +#ifndef tlb_start_vma +#define tlb_start_vma(tlb, vma) do { } while (0) +#endif + +#define __tlb_end_vma(tlb, vma) \ + do { \ + if (!tlb->fullmm && tlb->end) { \ + tlb_flush(tlb); \ + __tlb_reset_range(tlb); \ + } \ + } while (0) + +#ifndef tlb_end_vma +#define tlb_end_vma __tlb_end_vma +#endif + +#ifndef __tlb_remove_tlb_entry +#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) +#endif + +/** + * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation. + * + * Record the fact that pte's were really unmapped by updating the range, + * so we can later optimise away the tlb invalidate. This helps when + * userspace is unmapping already-unmapped pages, which happens quite a lot. + */ +#define tlb_remove_tlb_entry(tlb, ptep, address) \ + do { \ + __tlb_adjust_range(tlb, address); \ + __tlb_remove_tlb_entry(tlb, ptep, address); \ + } while (0) + +/** + * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation + * This is a nop so far, because only x86 needs it. + */ +#ifndef __tlb_remove_pmd_tlb_entry +#define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0) +#endif + +#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \ + do { \ + __tlb_adjust_range(tlb, address); \ + __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \ + } while (0) + +#define pte_free_tlb(tlb, ptep, address) \ + do { \ + __tlb_adjust_range(tlb, address); \ + __pte_free_tlb(tlb, ptep, address); \ + } while (0) + +#ifndef __ARCH_HAS_4LEVEL_HACK +#define pud_free_tlb(tlb, pudp, address) \ + do { \ + __tlb_adjust_range(tlb, address); \ + __pud_free_tlb(tlb, pudp, address); \ + } while (0) +#endif + +#define pmd_free_tlb(tlb, pmdp, address) \ + do { \ + __tlb_adjust_range(tlb, address); \ + __pmd_free_tlb(tlb, pmdp, address); \ + } while (0) + +#define tlb_migrate_finish(mm) do {} while (0) + +#endif /* _ASM_GENERIC__TLB_H */ diff --git a/include/asm-generic/tlbflush.h b/include/asm-generic/tlbflush.h new file mode 100644 index 000000000..d6d0a8843 --- /dev/null +++ b/include/asm-generic/tlbflush.h @@ -0,0 +1,20 @@ +#ifndef __ASM_GENERIC_TLBFLUSH_H +#define __ASM_GENERIC_TLBFLUSH_H +/* + * This is a dummy tlbflush implementation that can be used on all + * nommu architectures. + * If you have an MMU, you need to write your own functions. + */ +#ifdef CONFIG_MMU +#error need to implement an architecture specific asm/tlbflush.h +#endif + +#include <linux/bug.h> + +static inline void flush_tlb_mm(struct mm_struct *mm) +{ + BUG(); +} + + +#endif /* __ASM_GENERIC_TLBFLUSH_H */ diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h new file mode 100644 index 000000000..fc824e282 --- /dev/null +++ b/include/asm-generic/topology.h @@ -0,0 +1,76 @@ +/* + * linux/include/asm-generic/topology.h + * + * Written by: Matthew Dobson, IBM Corporation + * + * Copyright (C) 2002, IBM Corp. + * + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Send feedback to <colpatch@us.ibm.com> + */ +#ifndef _ASM_GENERIC_TOPOLOGY_H +#define _ASM_GENERIC_TOPOLOGY_H + +#ifndef CONFIG_NUMA + +/* Other architectures wishing to use this simple topology API should fill + in the below functions as appropriate in their own <asm/topology.h> file. */ +#ifndef cpu_to_node +#define cpu_to_node(cpu) ((void)(cpu),0) +#endif +#ifndef set_numa_node +#define set_numa_node(node) +#endif +#ifndef set_cpu_numa_node +#define set_cpu_numa_node(cpu, node) +#endif +#ifndef cpu_to_mem +#define cpu_to_mem(cpu) ((void)(cpu),0) +#endif + +#ifndef parent_node +#define parent_node(node) ((void)(node),0) +#endif +#ifndef cpumask_of_node +#define cpumask_of_node(node) ((void)node, cpu_online_mask) +#endif +#ifndef pcibus_to_node +#define pcibus_to_node(bus) ((void)(bus), -1) +#endif + +#ifndef cpumask_of_pcibus +#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ + cpu_all_mask : \ + cpumask_of_node(pcibus_to_node(bus))) +#endif + +#endif /* CONFIG_NUMA */ + +#if !defined(CONFIG_NUMA) || !defined(CONFIG_HAVE_MEMORYLESS_NODES) + +#ifndef set_numa_mem +#define set_numa_mem(node) +#endif +#ifndef set_cpu_numa_mem +#define set_cpu_numa_mem(cpu, node) +#endif + +#endif /* !CONFIG_NUMA || !CONFIG_HAVE_MEMORYLESS_NODES */ + +#endif /* _ASM_GENERIC_TOPOLOGY_H */ diff --git a/include/asm-generic/trace_clock.h b/include/asm-generic/trace_clock.h new file mode 100644 index 000000000..6726f1baf --- /dev/null +++ b/include/asm-generic/trace_clock.h @@ -0,0 +1,16 @@ +#ifndef _ASM_GENERIC_TRACE_CLOCK_H +#define _ASM_GENERIC_TRACE_CLOCK_H +/* + * Arch-specific trace clocks. + */ + +/* + * Additional trace clocks added to the trace_clocks + * array in kernel/trace/trace.c + * None if the architecture has not defined it. + */ +#ifndef ARCH_TRACE_CLOCKS +# define ARCH_TRACE_CLOCKS +#endif + +#endif /* _ASM_GENERIC_TRACE_CLOCK_H */ diff --git a/include/asm-generic/uaccess-unaligned.h b/include/asm-generic/uaccess-unaligned.h new file mode 100644 index 000000000..67deb898f --- /dev/null +++ b/include/asm-generic/uaccess-unaligned.h @@ -0,0 +1,26 @@ +#ifndef __ASM_GENERIC_UACCESS_UNALIGNED_H +#define __ASM_GENERIC_UACCESS_UNALIGNED_H + +/* + * This macro should be used instead of __get_user() when accessing + * values at locations that are not known to be aligned. + */ +#define __get_user_unaligned(x, ptr) \ +({ \ + __typeof__ (*(ptr)) __x; \ + __copy_from_user(&__x, (ptr), sizeof(*(ptr))) ? -EFAULT : 0; \ + (x) = __x; \ +}) + + +/* + * This macro should be used instead of __put_user() when accessing + * values at locations that are not known to be aligned. + */ +#define __put_user_unaligned(x, ptr) \ +({ \ + __typeof__ (*(ptr)) __x = (x); \ + __copy_to_user((ptr), &__x, sizeof(*(ptr))) ? -EFAULT : 0; \ +}) + +#endif /* __ASM_GENERIC_UACCESS_UNALIGNED_H */ diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h new file mode 100644 index 000000000..72d880383 --- /dev/null +++ b/include/asm-generic/uaccess.h @@ -0,0 +1,346 @@ +#ifndef __ASM_GENERIC_UACCESS_H +#define __ASM_GENERIC_UACCESS_H + +/* + * User space memory access functions, these should work + * on any machine that has kernel and user data in the same + * address space, e.g. all NOMMU machines. + */ +#include <linux/sched.h> +#include <linux/string.h> + +#include <asm/segment.h> + +#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) + +#ifndef KERNEL_DS +#define KERNEL_DS MAKE_MM_SEG(~0UL) +#endif + +#ifndef USER_DS +#define USER_DS MAKE_MM_SEG(TASK_SIZE - 1) +#endif + +#ifndef get_fs +#define get_ds() (KERNEL_DS) +#define get_fs() (current_thread_info()->addr_limit) + +static inline void set_fs(mm_segment_t fs) +{ + current_thread_info()->addr_limit = fs; +} +#endif + +#ifndef segment_eq +#define segment_eq(a, b) ((a).seg == (b).seg) +#endif + +#define VERIFY_READ 0 +#define VERIFY_WRITE 1 + +#define access_ok(type, addr, size) __access_ok((unsigned long)(addr),(size)) + +/* + * The architecture should really override this if possible, at least + * doing a check on the get_fs() + */ +#ifndef __access_ok +static inline int __access_ok(unsigned long addr, unsigned long size) +{ + return 1; +} +#endif + +/* + * The exception table consists of pairs of addresses: the first is the + * address of an instruction that is allowed to fault, and the second is + * the address at which the program should continue. No registers are + * modified, so it is entirely up to the continuation code to figure out + * what to do. + * + * All the routines below use bits of fixup code that are out of line + * with the main instruction path. This means when everything is well, + * we don't even have to jump over them. Further, they do not intrude + * on our cache or tlb entries. + */ + +struct exception_table_entry +{ + unsigned long insn, fixup; +}; + +/* Returns 0 if exception not found and fixup otherwise. */ +extern unsigned long search_exception_table(unsigned long); + +/* + * architectures with an MMU should override these two + */ +#ifndef __copy_from_user +static inline __must_check long __copy_from_user(void *to, + const void __user * from, unsigned long n) +{ + if (__builtin_constant_p(n)) { + switch(n) { + case 1: + *(u8 *)to = *(u8 __force *)from; + return 0; + case 2: + *(u16 *)to = *(u16 __force *)from; + return 0; + case 4: + *(u32 *)to = *(u32 __force *)from; + return 0; +#ifdef CONFIG_64BIT + case 8: + *(u64 *)to = *(u64 __force *)from; + return 0; +#endif + default: + break; + } + } + + memcpy(to, (const void __force *)from, n); + return 0; +} +#endif + +#ifndef __copy_to_user +static inline __must_check long __copy_to_user(void __user *to, + const void *from, unsigned long n) +{ + if (__builtin_constant_p(n)) { + switch(n) { + case 1: + *(u8 __force *)to = *(u8 *)from; + return 0; + case 2: + *(u16 __force *)to = *(u16 *)from; + return 0; + case 4: + *(u32 __force *)to = *(u32 *)from; + return 0; +#ifdef CONFIG_64BIT + case 8: + *(u64 __force *)to = *(u64 *)from; + return 0; +#endif + default: + break; + } + } + + memcpy((void __force *)to, from, n); + return 0; +} +#endif + +/* + * These are the main single-value transfer routines. They automatically + * use the right size if we just have the right pointer type. + * This version just falls back to copy_{from,to}_user, which should + * provide a fast-path for small values. + */ +#define __put_user(x, ptr) \ +({ \ + __typeof__(*(ptr)) __x = (x); \ + int __pu_err = -EFAULT; \ + __chk_user_ptr(ptr); \ + switch (sizeof (*(ptr))) { \ + case 1: \ + case 2: \ + case 4: \ + case 8: \ + __pu_err = __put_user_fn(sizeof (*(ptr)), \ + ptr, &__x); \ + break; \ + default: \ + __put_user_bad(); \ + break; \ + } \ + __pu_err; \ +}) + +#define put_user(x, ptr) \ +({ \ + might_fault(); \ + access_ok(VERIFY_WRITE, ptr, sizeof(*ptr)) ? \ + __put_user(x, ptr) : \ + -EFAULT; \ +}) + +#ifndef __put_user_fn + +static inline int __put_user_fn(size_t size, void __user *ptr, void *x) +{ + size = __copy_to_user(ptr, x, size); + return size ? -EFAULT : size; +} + +#define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k) + +#endif + +extern int __put_user_bad(void) __attribute__((noreturn)); + +#define __get_user(x, ptr) \ +({ \ + int __gu_err = -EFAULT; \ + __chk_user_ptr(ptr); \ + switch (sizeof(*(ptr))) { \ + case 1: { \ + unsigned char __x; \ + __gu_err = __get_user_fn(sizeof (*(ptr)), \ + ptr, &__x); \ + (x) = *(__force __typeof__(*(ptr)) *) &__x; \ + break; \ + }; \ + case 2: { \ + unsigned short __x; \ + __gu_err = __get_user_fn(sizeof (*(ptr)), \ + ptr, &__x); \ + (x) = *(__force __typeof__(*(ptr)) *) &__x; \ + break; \ + }; \ + case 4: { \ + unsigned int __x; \ + __gu_err = __get_user_fn(sizeof (*(ptr)), \ + ptr, &__x); \ + (x) = *(__force __typeof__(*(ptr)) *) &__x; \ + break; \ + }; \ + case 8: { \ + unsigned long long __x; \ + __gu_err = __get_user_fn(sizeof (*(ptr)), \ + ptr, &__x); \ + (x) = *(__force __typeof__(*(ptr)) *) &__x; \ + break; \ + }; \ + default: \ + __get_user_bad(); \ + break; \ + } \ + __gu_err; \ +}) + +#define get_user(x, ptr) \ +({ \ + might_fault(); \ + access_ok(VERIFY_READ, ptr, sizeof(*ptr)) ? \ + __get_user(x, ptr) : \ + -EFAULT; \ +}) + +#ifndef __get_user_fn +static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) +{ + size = __copy_from_user(x, ptr, size); + return size ? -EFAULT : size; +} + +#define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k) + +#endif + +extern int __get_user_bad(void) __attribute__((noreturn)); + +#ifndef __copy_from_user_inatomic +#define __copy_from_user_inatomic __copy_from_user +#endif + +#ifndef __copy_to_user_inatomic +#define __copy_to_user_inatomic __copy_to_user +#endif + +static inline long copy_from_user(void *to, + const void __user * from, unsigned long n) +{ + might_fault(); + if (access_ok(VERIFY_READ, from, n)) + return __copy_from_user(to, from, n); + else + return n; +} + +static inline long copy_to_user(void __user *to, + const void *from, unsigned long n) +{ + might_fault(); + if (access_ok(VERIFY_WRITE, to, n)) + return __copy_to_user(to, from, n); + else + return n; +} + +/* + * Copy a null terminated string from userspace. + */ +#ifndef __strncpy_from_user +static inline long +__strncpy_from_user(char *dst, const char __user *src, long count) +{ + char *tmp; + strncpy(dst, (const char __force *)src, count); + for (tmp = dst; *tmp && count > 0; tmp++, count--) + ; + return (tmp - dst); +} +#endif + +static inline long +strncpy_from_user(char *dst, const char __user *src, long count) +{ + if (!access_ok(VERIFY_READ, src, 1)) + return -EFAULT; + return __strncpy_from_user(dst, src, count); +} + +/* + * Return the size of a string (including the ending 0) + * + * Return 0 on exception, a value greater than N if too long + */ +#ifndef __strnlen_user +#define __strnlen_user(s, n) (strnlen((s), (n)) + 1) +#endif + +/* + * Unlike strnlen, strnlen_user includes the nul terminator in + * its returned count. Callers should check for a returned value + * greater than N as an indication the string is too long. + */ +static inline long strnlen_user(const char __user *src, long n) +{ + if (!access_ok(VERIFY_READ, src, 1)) + return 0; + return __strnlen_user(src, n); +} + +static inline long strlen_user(const char __user *src) +{ + return strnlen_user(src, 32767); +} + +/* + * Zero Userspace + */ +#ifndef __clear_user +static inline __must_check unsigned long +__clear_user(void __user *to, unsigned long n) +{ + memset((void __force *)to, 0, n); + return 0; +} +#endif + +static inline __must_check unsigned long +clear_user(void __user *to, unsigned long n) +{ + might_fault(); + if (!access_ok(VERIFY_WRITE, to, n)) + return n; + + return __clear_user(to, n); +} + +#endif /* __ASM_GENERIC_UACCESS_H */ diff --git a/include/asm-generic/unaligned.h b/include/asm-generic/unaligned.h new file mode 100644 index 000000000..1ac097279 --- /dev/null +++ b/include/asm-generic/unaligned.h @@ -0,0 +1,35 @@ +#ifndef __ASM_GENERIC_UNALIGNED_H +#define __ASM_GENERIC_UNALIGNED_H + +/* + * This is the most generic implementation of unaligned accesses + * and should work almost anywhere. + */ +#include <asm/byteorder.h> + +/* Set by the arch if it can handle unaligned accesses in hardware. */ +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +# include <linux/unaligned/access_ok.h> +#endif + +#if defined(__LITTLE_ENDIAN) +# ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +# include <linux/unaligned/le_struct.h> +# include <linux/unaligned/be_byteshift.h> +# endif +# include <linux/unaligned/generic.h> +# define get_unaligned __get_unaligned_le +# define put_unaligned __put_unaligned_le +#elif defined(__BIG_ENDIAN) +# ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +# include <linux/unaligned/be_struct.h> +# include <linux/unaligned/le_byteshift.h> +# endif +# include <linux/unaligned/generic.h> +# define get_unaligned __get_unaligned_be +# define put_unaligned __put_unaligned_be +#else +# error need to define endianess +#endif + +#endif /* __ASM_GENERIC_UNALIGNED_H */ diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h new file mode 100644 index 000000000..cccc86ecf --- /dev/null +++ b/include/asm-generic/unistd.h @@ -0,0 +1,12 @@ +#include <uapi/asm-generic/unistd.h> +#include <linux/export.h> + +/* + * These are required system calls, we should + * invert the logic eventually and let them + * be selected by default. + */ +#if __BITS_PER_LONG == 32 +#define __ARCH_WANT_STAT64 +#define __ARCH_WANT_SYS_LLSEEK +#endif diff --git a/include/asm-generic/user.h b/include/asm-generic/user.h new file mode 100644 index 000000000..35638c347 --- /dev/null +++ b/include/asm-generic/user.h @@ -0,0 +1,8 @@ +#ifndef __ASM_GENERIC_USER_H +#define __ASM_GENERIC_USER_H +/* + * This file may define a 'struct user' structure. However, it is only + * used for a.out files, which are not supported on new architectures. + */ + +#endif /* __ASM_GENERIC_USER_H */ diff --git a/include/asm-generic/vga.h b/include/asm-generic/vga.h new file mode 100644 index 000000000..36c8ff520 --- /dev/null +++ b/include/asm-generic/vga.h @@ -0,0 +1,24 @@ +/* + * Access to VGA videoram + * + * (c) 1998 Martin Mares <mj@ucw.cz> + */ +#ifndef __ASM_GENERIC_VGA_H +#define __ASM_GENERIC_VGA_H + +/* + * On most architectures that support VGA, we can just + * recalculate addresses and then access the videoram + * directly without any black magic. + * + * Everyone else needs to ioremap the address and use + * proper I/O accesses. + */ +#ifndef VGA_MAP_MEM +#define VGA_MAP_MEM(x, s) (unsigned long)phys_to_virt(x) +#endif + +#define vga_readb(x) (*(x)) +#define vga_writeb(x, y) (*(y) = (x)) + +#endif /* _ASM_GENERIC_VGA_H */ diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h new file mode 100644 index 000000000..8bd374d3c --- /dev/null +++ b/include/asm-generic/vmlinux.lds.h @@ -0,0 +1,830 @@ +/* + * Helper macros to support writing architecture specific + * linker scripts. + * + * A minimal linker scripts has following content: + * [This is a sample, architectures may have special requiriements] + * + * OUTPUT_FORMAT(...) + * OUTPUT_ARCH(...) + * ENTRY(...) + * SECTIONS + * { + * . = START; + * __init_begin = .; + * HEAD_TEXT_SECTION + * INIT_TEXT_SECTION(PAGE_SIZE) + * INIT_DATA_SECTION(...) + * PERCPU_SECTION(CACHELINE_SIZE) + * __init_end = .; + * + * _stext = .; + * TEXT_SECTION = 0 + * _etext = .; + * + * _sdata = .; + * RO_DATA_SECTION(PAGE_SIZE) + * RW_DATA_SECTION(...) + * _edata = .; + * + * EXCEPTION_TABLE(...) + * NOTES + * + * BSS_SECTION(0, 0, 0) + * _end = .; + * + * STABS_DEBUG + * DWARF_DEBUG + * + * DISCARDS // must be the last + * } + * + * [__init_begin, __init_end] is the init section that may be freed after init + * // __init_begin and __init_end should be page aligned, so that we can + * // free the whole .init memory + * [_stext, _etext] is the text section + * [_sdata, _edata] is the data section + * + * Some of the included output section have their own set of constants. + * Examples are: [__initramfs_start, __initramfs_end] for initramfs and + * [__nosave_begin, __nosave_end] for the nosave data + */ + +#ifndef LOAD_OFFSET +#define LOAD_OFFSET 0 +#endif + +#include <linux/export.h> + +/* Align . to a 8 byte boundary equals to maximum function alignment. */ +#define ALIGN_FUNCTION() . = ALIGN(8) + +/* + * Align to a 32 byte boundary equal to the + * alignment gcc 4.5 uses for a struct + */ +#define STRUCT_ALIGNMENT 32 +#define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT) + +/* The actual configuration determine if the init/exit sections + * are handled as text/data or they can be discarded (which + * often happens at runtime) + */ +#ifdef CONFIG_HOTPLUG_CPU +#define CPU_KEEP(sec) *(.cpu##sec) +#define CPU_DISCARD(sec) +#else +#define CPU_KEEP(sec) +#define CPU_DISCARD(sec) *(.cpu##sec) +#endif + +#if defined(CONFIG_MEMORY_HOTPLUG) +#define MEM_KEEP(sec) *(.mem##sec) +#define MEM_DISCARD(sec) +#else +#define MEM_KEEP(sec) +#define MEM_DISCARD(sec) *(.mem##sec) +#endif + +#ifdef CONFIG_FTRACE_MCOUNT_RECORD +#define MCOUNT_REC() . = ALIGN(8); \ + VMLINUX_SYMBOL(__start_mcount_loc) = .; \ + *(__mcount_loc) \ + VMLINUX_SYMBOL(__stop_mcount_loc) = .; +#else +#define MCOUNT_REC() +#endif + +#ifdef CONFIG_TRACE_BRANCH_PROFILING +#define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \ + *(_ftrace_annotated_branch) \ + VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .; +#else +#define LIKELY_PROFILE() +#endif + +#ifdef CONFIG_PROFILE_ALL_BRANCHES +#define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \ + *(_ftrace_branch) \ + VMLINUX_SYMBOL(__stop_branch_profile) = .; +#else +#define BRANCH_PROFILE() +#endif + +#ifdef CONFIG_KPROBES +#define KPROBE_BLACKLIST() . = ALIGN(8); \ + VMLINUX_SYMBOL(__start_kprobe_blacklist) = .; \ + *(_kprobe_blacklist) \ + VMLINUX_SYMBOL(__stop_kprobe_blacklist) = .; +#else +#define KPROBE_BLACKLIST() +#endif + +#ifdef CONFIG_EVENT_TRACING +#define FTRACE_EVENTS() . = ALIGN(8); \ + VMLINUX_SYMBOL(__start_ftrace_events) = .; \ + *(_ftrace_events) \ + VMLINUX_SYMBOL(__stop_ftrace_events) = .; \ + VMLINUX_SYMBOL(__start_ftrace_enum_maps) = .; \ + *(_ftrace_enum_map) \ + VMLINUX_SYMBOL(__stop_ftrace_enum_maps) = .; +#else +#define FTRACE_EVENTS() +#endif + +#ifdef CONFIG_TRACING +#define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \ + *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \ + VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .; +#define TRACEPOINT_STR() VMLINUX_SYMBOL(__start___tracepoint_str) = .; \ + *(__tracepoint_str) /* Trace_printk fmt' pointer */ \ + VMLINUX_SYMBOL(__stop___tracepoint_str) = .; +#else +#define TRACE_PRINTKS() +#define TRACEPOINT_STR() +#endif + +#ifdef CONFIG_FTRACE_SYSCALLS +#define TRACE_SYSCALLS() . = ALIGN(8); \ + VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \ + *(__syscalls_metadata) \ + VMLINUX_SYMBOL(__stop_syscalls_metadata) = .; +#else +#define TRACE_SYSCALLS() +#endif + +#ifdef CONFIG_SERIAL_EARLYCON +#define EARLYCON_TABLE() STRUCT_ALIGN(); \ + VMLINUX_SYMBOL(__earlycon_table) = .; \ + *(__earlycon_table) \ + *(__earlycon_table_end) +#else +#define EARLYCON_TABLE() +#endif + +#define ___OF_TABLE(cfg, name) _OF_TABLE_##cfg(name) +#define __OF_TABLE(cfg, name) ___OF_TABLE(cfg, name) +#define OF_TABLE(cfg, name) __OF_TABLE(config_enabled(cfg), name) +#define _OF_TABLE_0(name) +#define _OF_TABLE_1(name) \ + . = ALIGN(8); \ + VMLINUX_SYMBOL(__##name##_of_table) = .; \ + *(__##name##_of_table) \ + *(__##name##_of_table_end) + +#define CLKSRC_OF_TABLES() OF_TABLE(CONFIG_CLKSRC_OF, clksrc) +#define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip) +#define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk) +#define IOMMU_OF_TABLES() OF_TABLE(CONFIG_OF_IOMMU, iommu) +#define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem) +#define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method) +#define CPUIDLE_METHOD_OF_TABLES() OF_TABLE(CONFIG_CPU_IDLE, cpuidle_method) +#define EARLYCON_OF_TABLES() OF_TABLE(CONFIG_SERIAL_EARLYCON, earlycon) + +#define KERNEL_DTB() \ + STRUCT_ALIGN(); \ + VMLINUX_SYMBOL(__dtb_start) = .; \ + *(.dtb.init.rodata) \ + VMLINUX_SYMBOL(__dtb_end) = .; + +/* .data section */ +#define DATA_DATA \ + *(.data) \ + *(.ref.data) \ + *(.data..shared_aligned) /* percpu related */ \ + MEM_KEEP(init.data) \ + MEM_KEEP(exit.data) \ + *(.data.unlikely) \ + STRUCT_ALIGN(); \ + *(__tracepoints) \ + /* implement dynamic printk debug */ \ + . = ALIGN(8); \ + VMLINUX_SYMBOL(__start___jump_table) = .; \ + *(__jump_table) \ + VMLINUX_SYMBOL(__stop___jump_table) = .; \ + . = ALIGN(8); \ + VMLINUX_SYMBOL(__start___verbose) = .; \ + *(__verbose) \ + VMLINUX_SYMBOL(__stop___verbose) = .; \ + LIKELY_PROFILE() \ + BRANCH_PROFILE() \ + TRACE_PRINTKS() \ + TRACEPOINT_STR() + +/* + * Data section helpers + */ +#define NOSAVE_DATA \ + . = ALIGN(PAGE_SIZE); \ + VMLINUX_SYMBOL(__nosave_begin) = .; \ + *(.data..nosave) \ + . = ALIGN(PAGE_SIZE); \ + VMLINUX_SYMBOL(__nosave_end) = .; + +#define PAGE_ALIGNED_DATA(page_align) \ + . = ALIGN(page_align); \ + *(.data..page_aligned) + +#define READ_MOSTLY_DATA(align) \ + . = ALIGN(align); \ + *(.data..read_mostly) \ + . = ALIGN(align); + +#define CACHELINE_ALIGNED_DATA(align) \ + . = ALIGN(align); \ + *(.data..cacheline_aligned) + +#define INIT_TASK_DATA(align) \ + . = ALIGN(align); \ + *(.data..init_task) + +/* + * Read only Data + */ +#define RO_DATA_SECTION(align) \ + . = ALIGN((align)); \ + .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start_rodata) = .; \ + *(.rodata) *(.rodata.*) \ + *(__vermagic) /* Kernel version magic */ \ + . = ALIGN(8); \ + VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \ + *(__tracepoints_ptrs) /* Tracepoints: pointer array */\ + VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = .; \ + *(__tracepoints_strings)/* Tracepoints: strings */ \ + } \ + \ + .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \ + *(.rodata1) \ + } \ + \ + BUG_TABLE \ + \ + /* PCI quirks */ \ + .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ + *(.pci_fixup_early) \ + VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \ + VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \ + *(.pci_fixup_header) \ + VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \ + VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \ + *(.pci_fixup_final) \ + VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \ + VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \ + *(.pci_fixup_enable) \ + VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \ + VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \ + *(.pci_fixup_resume) \ + VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \ + VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \ + *(.pci_fixup_resume_early) \ + VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \ + VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \ + *(.pci_fixup_suspend) \ + VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \ + VMLINUX_SYMBOL(__start_pci_fixups_suspend_late) = .; \ + *(.pci_fixup_suspend_late) \ + VMLINUX_SYMBOL(__end_pci_fixups_suspend_late) = .; \ + } \ + \ + /* Built-in firmware blobs */ \ + .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start_builtin_fw) = .; \ + *(.builtin_fw) \ + VMLINUX_SYMBOL(__end_builtin_fw) = .; \ + } \ + \ + TRACEDATA \ + \ + /* Kernel symbol table: Normal symbols */ \ + __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start___ksymtab) = .; \ + *(SORT(___ksymtab+*)) \ + VMLINUX_SYMBOL(__stop___ksymtab) = .; \ + } \ + \ + /* Kernel symbol table: GPL-only symbols */ \ + __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \ + *(SORT(___ksymtab_gpl+*)) \ + VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \ + } \ + \ + /* Kernel symbol table: Normal unused symbols */ \ + __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \ + *(SORT(___ksymtab_unused+*)) \ + VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \ + } \ + \ + /* Kernel symbol table: GPL-only unused symbols */ \ + __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \ + *(SORT(___ksymtab_unused_gpl+*)) \ + VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \ + } \ + \ + /* Kernel symbol table: GPL-future-only symbols */ \ + __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \ + *(SORT(___ksymtab_gpl_future+*)) \ + VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \ + } \ + \ + /* Kernel symbol table: Normal symbols */ \ + __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start___kcrctab) = .; \ + *(SORT(___kcrctab+*)) \ + VMLINUX_SYMBOL(__stop___kcrctab) = .; \ + } \ + \ + /* Kernel symbol table: GPL-only symbols */ \ + __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \ + *(SORT(___kcrctab_gpl+*)) \ + VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \ + } \ + \ + /* Kernel symbol table: Normal unused symbols */ \ + __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \ + *(SORT(___kcrctab_unused+*)) \ + VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \ + } \ + \ + /* Kernel symbol table: GPL-only unused symbols */ \ + __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \ + *(SORT(___kcrctab_unused_gpl+*)) \ + VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \ + } \ + \ + /* Kernel symbol table: GPL-future-only symbols */ \ + __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \ + *(SORT(___kcrctab_gpl_future+*)) \ + VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \ + } \ + \ + /* Kernel symbol table: strings */ \ + __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \ + *(__ksymtab_strings) \ + } \ + \ + /* __*init sections */ \ + __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \ + *(.ref.rodata) \ + MEM_KEEP(init.rodata) \ + MEM_KEEP(exit.rodata) \ + } \ + \ + /* Built-in module parameters. */ \ + __param : AT(ADDR(__param) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start___param) = .; \ + *(__param) \ + VMLINUX_SYMBOL(__stop___param) = .; \ + } \ + \ + /* Built-in module versions. */ \ + __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start___modver) = .; \ + *(__modver) \ + VMLINUX_SYMBOL(__stop___modver) = .; \ + . = ALIGN((align)); \ + VMLINUX_SYMBOL(__end_rodata) = .; \ + } \ + . = ALIGN((align)); + +/* RODATA & RO_DATA provided for backward compatibility. + * All archs are supposed to use RO_DATA() */ +#define RODATA RO_DATA_SECTION(4096) +#define RO_DATA(align) RO_DATA_SECTION(align) + +#define SECURITY_INIT \ + .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__security_initcall_start) = .; \ + *(.security_initcall.init) \ + VMLINUX_SYMBOL(__security_initcall_end) = .; \ + } + +/* .text section. Map to function alignment to avoid address changes + * during second ld run in second ld pass when generating System.map */ +#define TEXT_TEXT \ + ALIGN_FUNCTION(); \ + *(.text.hot) \ + *(.text .text.fixup) \ + *(.ref.text) \ + MEM_KEEP(init.text) \ + MEM_KEEP(exit.text) \ + *(.text.unlikely) + + +/* sched.text is aling to function alignment to secure we have same + * address even at second ld pass when generating System.map */ +#define SCHED_TEXT \ + ALIGN_FUNCTION(); \ + VMLINUX_SYMBOL(__sched_text_start) = .; \ + *(.sched.text) \ + VMLINUX_SYMBOL(__sched_text_end) = .; + +/* spinlock.text is aling to function alignment to secure we have same + * address even at second ld pass when generating System.map */ +#define LOCK_TEXT \ + ALIGN_FUNCTION(); \ + VMLINUX_SYMBOL(__lock_text_start) = .; \ + *(.spinlock.text) \ + VMLINUX_SYMBOL(__lock_text_end) = .; + +#define KPROBES_TEXT \ + ALIGN_FUNCTION(); \ + VMLINUX_SYMBOL(__kprobes_text_start) = .; \ + *(.kprobes.text) \ + VMLINUX_SYMBOL(__kprobes_text_end) = .; + +#define ENTRY_TEXT \ + ALIGN_FUNCTION(); \ + VMLINUX_SYMBOL(__entry_text_start) = .; \ + *(.entry.text) \ + VMLINUX_SYMBOL(__entry_text_end) = .; + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +#define IRQENTRY_TEXT \ + ALIGN_FUNCTION(); \ + VMLINUX_SYMBOL(__irqentry_text_start) = .; \ + *(.irqentry.text) \ + VMLINUX_SYMBOL(__irqentry_text_end) = .; +#else +#define IRQENTRY_TEXT +#endif + +/* Section used for early init (in .S files) */ +#define HEAD_TEXT *(.head.text) + +#define HEAD_TEXT_SECTION \ + .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \ + HEAD_TEXT \ + } + +/* + * Exception table + */ +#define EXCEPTION_TABLE(align) \ + . = ALIGN(align); \ + __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start___ex_table) = .; \ + *(__ex_table) \ + VMLINUX_SYMBOL(__stop___ex_table) = .; \ + } + +/* + * Init task + */ +#define INIT_TASK_DATA_SECTION(align) \ + . = ALIGN(align); \ + .data..init_task : AT(ADDR(.data..init_task) - LOAD_OFFSET) { \ + INIT_TASK_DATA(align) \ + } + +#ifdef CONFIG_CONSTRUCTORS +#define KERNEL_CTORS() . = ALIGN(8); \ + VMLINUX_SYMBOL(__ctors_start) = .; \ + *(.ctors) \ + *(SORT(.init_array.*)) \ + *(.init_array) \ + VMLINUX_SYMBOL(__ctors_end) = .; +#else +#define KERNEL_CTORS() +#endif + +/* init and exit section handling */ +#define INIT_DATA \ + *(.init.data) \ + MEM_DISCARD(init.data) \ + KERNEL_CTORS() \ + MCOUNT_REC() \ + *(.init.rodata) \ + FTRACE_EVENTS() \ + TRACE_SYSCALLS() \ + KPROBE_BLACKLIST() \ + MEM_DISCARD(init.rodata) \ + CLK_OF_TABLES() \ + RESERVEDMEM_OF_TABLES() \ + CLKSRC_OF_TABLES() \ + IOMMU_OF_TABLES() \ + CPU_METHOD_OF_TABLES() \ + CPUIDLE_METHOD_OF_TABLES() \ + KERNEL_DTB() \ + IRQCHIP_OF_MATCH_TABLE() \ + EARLYCON_TABLE() \ + EARLYCON_OF_TABLES() + +#define INIT_TEXT \ + *(.init.text) \ + MEM_DISCARD(init.text) + +#define EXIT_DATA \ + *(.exit.data) \ + MEM_DISCARD(exit.data) \ + MEM_DISCARD(exit.rodata) + +#define EXIT_TEXT \ + *(.exit.text) \ + MEM_DISCARD(exit.text) + +#define EXIT_CALL \ + *(.exitcall.exit) + +/* + * bss (Block Started by Symbol) - uninitialized data + * zeroed during startup + */ +#define SBSS(sbss_align) \ + . = ALIGN(sbss_align); \ + .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \ + *(.sbss) \ + *(.scommon) \ + } + +/* + * Allow archectures to redefine BSS_FIRST_SECTIONS to add extra + * sections to the front of bss. + */ +#ifndef BSS_FIRST_SECTIONS +#define BSS_FIRST_SECTIONS +#endif + +#define BSS(bss_align) \ + . = ALIGN(bss_align); \ + .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \ + BSS_FIRST_SECTIONS \ + *(.bss..page_aligned) \ + *(.dynbss) \ + *(.bss) \ + *(COMMON) \ + } + +/* + * DWARF debug sections. + * Symbols in the DWARF debugging sections are relative to + * the beginning of the section so we begin them at 0. + */ +#define DWARF_DEBUG \ + /* DWARF 1 */ \ + .debug 0 : { *(.debug) } \ + .line 0 : { *(.line) } \ + /* GNU DWARF 1 extensions */ \ + .debug_srcinfo 0 : { *(.debug_srcinfo) } \ + .debug_sfnames 0 : { *(.debug_sfnames) } \ + /* DWARF 1.1 and DWARF 2 */ \ + .debug_aranges 0 : { *(.debug_aranges) } \ + .debug_pubnames 0 : { *(.debug_pubnames) } \ + /* DWARF 2 */ \ + .debug_info 0 : { *(.debug_info \ + .gnu.linkonce.wi.*) } \ + .debug_abbrev 0 : { *(.debug_abbrev) } \ + .debug_line 0 : { *(.debug_line) } \ + .debug_frame 0 : { *(.debug_frame) } \ + .debug_str 0 : { *(.debug_str) } \ + .debug_loc 0 : { *(.debug_loc) } \ + .debug_macinfo 0 : { *(.debug_macinfo) } \ + /* SGI/MIPS DWARF 2 extensions */ \ + .debug_weaknames 0 : { *(.debug_weaknames) } \ + .debug_funcnames 0 : { *(.debug_funcnames) } \ + .debug_typenames 0 : { *(.debug_typenames) } \ + .debug_varnames 0 : { *(.debug_varnames) } \ + + /* Stabs debugging sections. */ +#define STABS_DEBUG \ + .stab 0 : { *(.stab) } \ + .stabstr 0 : { *(.stabstr) } \ + .stab.excl 0 : { *(.stab.excl) } \ + .stab.exclstr 0 : { *(.stab.exclstr) } \ + .stab.index 0 : { *(.stab.index) } \ + .stab.indexstr 0 : { *(.stab.indexstr) } \ + .comment 0 : { *(.comment) } + +#ifdef CONFIG_GENERIC_BUG +#define BUG_TABLE \ + . = ALIGN(8); \ + __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start___bug_table) = .; \ + *(__bug_table) \ + VMLINUX_SYMBOL(__stop___bug_table) = .; \ + } +#else +#define BUG_TABLE +#endif + +#ifdef CONFIG_PM_TRACE +#define TRACEDATA \ + . = ALIGN(4); \ + .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__tracedata_start) = .; \ + *(.tracedata) \ + VMLINUX_SYMBOL(__tracedata_end) = .; \ + } +#else +#define TRACEDATA +#endif + +#define NOTES \ + .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start_notes) = .; \ + *(.note.*) \ + VMLINUX_SYMBOL(__stop_notes) = .; \ + } + +#define INIT_SETUP(initsetup_align) \ + . = ALIGN(initsetup_align); \ + VMLINUX_SYMBOL(__setup_start) = .; \ + *(.init.setup) \ + VMLINUX_SYMBOL(__setup_end) = .; + +#define INIT_CALLS_LEVEL(level) \ + VMLINUX_SYMBOL(__initcall##level##_start) = .; \ + *(.initcall##level##.init) \ + *(.initcall##level##s.init) \ + +#define INIT_CALLS \ + VMLINUX_SYMBOL(__initcall_start) = .; \ + *(.initcallearly.init) \ + INIT_CALLS_LEVEL(0) \ + INIT_CALLS_LEVEL(1) \ + INIT_CALLS_LEVEL(2) \ + INIT_CALLS_LEVEL(3) \ + INIT_CALLS_LEVEL(4) \ + INIT_CALLS_LEVEL(5) \ + INIT_CALLS_LEVEL(rootfs) \ + INIT_CALLS_LEVEL(6) \ + INIT_CALLS_LEVEL(7) \ + VMLINUX_SYMBOL(__initcall_end) = .; + +#define CON_INITCALL \ + VMLINUX_SYMBOL(__con_initcall_start) = .; \ + *(.con_initcall.init) \ + VMLINUX_SYMBOL(__con_initcall_end) = .; + +#define SECURITY_INITCALL \ + VMLINUX_SYMBOL(__security_initcall_start) = .; \ + *(.security_initcall.init) \ + VMLINUX_SYMBOL(__security_initcall_end) = .; + +#ifdef CONFIG_BLK_DEV_INITRD +#define INIT_RAM_FS \ + . = ALIGN(4); \ + VMLINUX_SYMBOL(__initramfs_start) = .; \ + *(.init.ramfs) \ + . = ALIGN(8); \ + *(.init.ramfs.info) +#else +#define INIT_RAM_FS +#endif + +/* + * Default discarded sections. + * + * Some archs want to discard exit text/data at runtime rather than + * link time due to cross-section references such as alt instructions, + * bug table, eh_frame, etc. DISCARDS must be the last of output + * section definitions so that such archs put those in earlier section + * definitions. + */ +#define DISCARDS \ + /DISCARD/ : { \ + EXIT_TEXT \ + EXIT_DATA \ + EXIT_CALL \ + *(.discard) \ + *(.discard.*) \ + } + +/** + * PERCPU_INPUT - the percpu input sections + * @cacheline: cacheline size + * + * The core percpu section names and core symbols which do not rely + * directly upon load addresses. + * + * @cacheline is used to align subsections to avoid false cacheline + * sharing between subsections for different purposes. + */ +#define PERCPU_INPUT(cacheline) \ + VMLINUX_SYMBOL(__per_cpu_start) = .; \ + *(.data..percpu..first) \ + . = ALIGN(PAGE_SIZE); \ + *(.data..percpu..page_aligned) \ + . = ALIGN(cacheline); \ + *(.data..percpu..read_mostly) \ + . = ALIGN(cacheline); \ + *(.data..percpu) \ + *(.data..percpu..shared_aligned) \ + VMLINUX_SYMBOL(__per_cpu_end) = .; + +/** + * PERCPU_VADDR - define output section for percpu area + * @cacheline: cacheline size + * @vaddr: explicit base address (optional) + * @phdr: destination PHDR (optional) + * + * Macro which expands to output section for percpu area. + * + * @cacheline is used to align subsections to avoid false cacheline + * sharing between subsections for different purposes. + * + * If @vaddr is not blank, it specifies explicit base address and all + * percpu symbols will be offset from the given address. If blank, + * @vaddr always equals @laddr + LOAD_OFFSET. + * + * @phdr defines the output PHDR to use if not blank. Be warned that + * output PHDR is sticky. If @phdr is specified, the next output + * section in the linker script will go there too. @phdr should have + * a leading colon. + * + * Note that this macros defines __per_cpu_load as an absolute symbol. + * If there is no need to put the percpu section at a predetermined + * address, use PERCPU_SECTION. + */ +#define PERCPU_VADDR(cacheline, vaddr, phdr) \ + VMLINUX_SYMBOL(__per_cpu_load) = .; \ + .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \ + - LOAD_OFFSET) { \ + PERCPU_INPUT(cacheline) \ + } phdr \ + . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu); + +/** + * PERCPU_SECTION - define output section for percpu area, simple version + * @cacheline: cacheline size + * + * Align to PAGE_SIZE and outputs output section for percpu area. This + * macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and + * __per_cpu_start will be identical. + * + * This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,) + * except that __per_cpu_load is defined as a relative symbol against + * .data..percpu which is required for relocatable x86_32 configuration. + */ +#define PERCPU_SECTION(cacheline) \ + . = ALIGN(PAGE_SIZE); \ + .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__per_cpu_load) = .; \ + PERCPU_INPUT(cacheline) \ + } + + +/* + * Definition of the high level *_SECTION macros + * They will fit only a subset of the architectures + */ + + +/* + * Writeable data. + * All sections are combined in a single .data section. + * The sections following CONSTRUCTORS are arranged so their + * typical alignment matches. + * A cacheline is typical/always less than a PAGE_SIZE so + * the sections that has this restriction (or similar) + * is located before the ones requiring PAGE_SIZE alignment. + * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which + * matches the requirement of PAGE_ALIGNED_DATA. + * + * use 0 as page_align if page_aligned data is not used */ +#define RW_DATA_SECTION(cacheline, pagealigned, inittask) \ + . = ALIGN(PAGE_SIZE); \ + .data : AT(ADDR(.data) - LOAD_OFFSET) { \ + INIT_TASK_DATA(inittask) \ + NOSAVE_DATA \ + PAGE_ALIGNED_DATA(pagealigned) \ + CACHELINE_ALIGNED_DATA(cacheline) \ + READ_MOSTLY_DATA(cacheline) \ + DATA_DATA \ + CONSTRUCTORS \ + } + +#define INIT_TEXT_SECTION(inittext_align) \ + . = ALIGN(inittext_align); \ + .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(_sinittext) = .; \ + INIT_TEXT \ + VMLINUX_SYMBOL(_einittext) = .; \ + } + +#define INIT_DATA_SECTION(initsetup_align) \ + .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \ + INIT_DATA \ + INIT_SETUP(initsetup_align) \ + INIT_CALLS \ + CON_INITCALL \ + SECURITY_INITCALL \ + INIT_RAM_FS \ + } + +#define BSS_SECTION(sbss_align, bss_align, stop_align) \ + . = ALIGN(sbss_align); \ + VMLINUX_SYMBOL(__bss_start) = .; \ + SBSS(sbss_align) \ + BSS(bss_align) \ + . = ALIGN(stop_align); \ + VMLINUX_SYMBOL(__bss_stop) = .; diff --git a/include/asm-generic/vtime.h b/include/asm-generic/vtime.h new file mode 100644 index 000000000..b1a49677f --- /dev/null +++ b/include/asm-generic/vtime.h @@ -0,0 +1 @@ +/* no content, but patch(1) dislikes empty files */ diff --git a/include/asm-generic/word-at-a-time.h b/include/asm-generic/word-at-a-time.h new file mode 100644 index 000000000..94f9ea8ab --- /dev/null +++ b/include/asm-generic/word-at-a-time.h @@ -0,0 +1,56 @@ +#ifndef _ASM_WORD_AT_A_TIME_H +#define _ASM_WORD_AT_A_TIME_H + +/* + * This says "generic", but it's actually big-endian only. + * Little-endian can use more efficient versions of these + * interfaces, see for example + * arch/x86/include/asm/word-at-a-time.h + * for those. + */ + +#include <linux/kernel.h> + +struct word_at_a_time { + const unsigned long high_bits, low_bits; +}; + +#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0xfe) + 1, REPEAT_BYTE(0x7f) } + +/* Bit set in the bytes that have a zero */ +static inline long prep_zero_mask(unsigned long val, unsigned long rhs, const struct word_at_a_time *c) +{ + unsigned long mask = (val & c->low_bits) + c->low_bits; + return ~(mask | rhs); +} + +#define create_zero_mask(mask) (mask) + +static inline long find_zero(unsigned long mask) +{ + long byte = 0; +#ifdef CONFIG_64BIT + if (mask >> 32) + mask >>= 32; + else + byte = 4; +#endif + if (mask >> 16) + mask >>= 16; + else + byte += 2; + return (mask >> 8) ? byte : byte + 1; +} + +static inline bool has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c) +{ + unsigned long rhs = val | c->low_bits; + *data = rhs; + return (val + c->high_bits) & ~rhs; +} + +#ifndef zero_bytemask +#define zero_bytemask(mask) (~1ul << __fls(mask)) +#endif + +#endif /* _ASM_WORD_AT_A_TIME_H */ diff --git a/include/asm-generic/xor.h b/include/asm-generic/xor.h new file mode 100644 index 000000000..b4d843225 --- /dev/null +++ b/include/asm-generic/xor.h @@ -0,0 +1,718 @@ +/* + * include/asm-generic/xor.h + * + * Generic optimized RAID-5 checksumming functions. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * You should have received a copy of the GNU General Public License + * (for example /usr/src/linux/COPYING); if not, write to the Free + * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/prefetch.h> + +static void +xor_8regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) +{ + long lines = bytes / (sizeof (long)) / 8; + + do { + p1[0] ^= p2[0]; + p1[1] ^= p2[1]; + p1[2] ^= p2[2]; + p1[3] ^= p2[3]; + p1[4] ^= p2[4]; + p1[5] ^= p2[5]; + p1[6] ^= p2[6]; + p1[7] ^= p2[7]; + p1 += 8; + p2 += 8; + } while (--lines > 0); +} + +static void +xor_8regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3) +{ + long lines = bytes / (sizeof (long)) / 8; + + do { + p1[0] ^= p2[0] ^ p3[0]; + p1[1] ^= p2[1] ^ p3[1]; + p1[2] ^= p2[2] ^ p3[2]; + p1[3] ^= p2[3] ^ p3[3]; + p1[4] ^= p2[4] ^ p3[4]; + p1[5] ^= p2[5] ^ p3[5]; + p1[6] ^= p2[6] ^ p3[6]; + p1[7] ^= p2[7] ^ p3[7]; + p1 += 8; + p2 += 8; + p3 += 8; + } while (--lines > 0); +} + +static void +xor_8regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3, unsigned long *p4) +{ + long lines = bytes / (sizeof (long)) / 8; + + do { + p1[0] ^= p2[0] ^ p3[0] ^ p4[0]; + p1[1] ^= p2[1] ^ p3[1] ^ p4[1]; + p1[2] ^= p2[2] ^ p3[2] ^ p4[2]; + p1[3] ^= p2[3] ^ p3[3] ^ p4[3]; + p1[4] ^= p2[4] ^ p3[4] ^ p4[4]; + p1[5] ^= p2[5] ^ p3[5] ^ p4[5]; + p1[6] ^= p2[6] ^ p3[6] ^ p4[6]; + p1[7] ^= p2[7] ^ p3[7] ^ p4[7]; + p1 += 8; + p2 += 8; + p3 += 8; + p4 += 8; + } while (--lines > 0); +} + +static void +xor_8regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3, unsigned long *p4, unsigned long *p5) +{ + long lines = bytes / (sizeof (long)) / 8; + + do { + p1[0] ^= p2[0] ^ p3[0] ^ p4[0] ^ p5[0]; + p1[1] ^= p2[1] ^ p3[1] ^ p4[1] ^ p5[1]; + p1[2] ^= p2[2] ^ p3[2] ^ p4[2] ^ p5[2]; + p1[3] ^= p2[3] ^ p3[3] ^ p4[3] ^ p5[3]; + p1[4] ^= p2[4] ^ p3[4] ^ p4[4] ^ p5[4]; + p1[5] ^= p2[5] ^ p3[5] ^ p4[5] ^ p5[5]; + p1[6] ^= p2[6] ^ p3[6] ^ p4[6] ^ p5[6]; + p1[7] ^= p2[7] ^ p3[7] ^ p4[7] ^ p5[7]; + p1 += 8; + p2 += 8; + p3 += 8; + p4 += 8; + p5 += 8; + } while (--lines > 0); +} + +static void +xor_32regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) +{ + long lines = bytes / (sizeof (long)) / 8; + + do { + register long d0, d1, d2, d3, d4, d5, d6, d7; + d0 = p1[0]; /* Pull the stuff into registers */ + d1 = p1[1]; /* ... in bursts, if possible. */ + d2 = p1[2]; + d3 = p1[3]; + d4 = p1[4]; + d5 = p1[5]; + d6 = p1[6]; + d7 = p1[7]; + d0 ^= p2[0]; + d1 ^= p2[1]; + d2 ^= p2[2]; + d3 ^= p2[3]; + d4 ^= p2[4]; + d5 ^= p2[5]; + d6 ^= p2[6]; + d7 ^= p2[7]; + p1[0] = d0; /* Store the result (in bursts) */ + p1[1] = d1; + p1[2] = d2; + p1[3] = d3; + p1[4] = d4; + p1[5] = d5; + p1[6] = d6; + p1[7] = d7; + p1 += 8; + p2 += 8; + } while (--lines > 0); +} + +static void +xor_32regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3) +{ + long lines = bytes / (sizeof (long)) / 8; + + do { + register long d0, d1, d2, d3, d4, d5, d6, d7; + d0 = p1[0]; /* Pull the stuff into registers */ + d1 = p1[1]; /* ... in bursts, if possible. */ + d2 = p1[2]; + d3 = p1[3]; + d4 = p1[4]; + d5 = p1[5]; + d6 = p1[6]; + d7 = p1[7]; + d0 ^= p2[0]; + d1 ^= p2[1]; + d2 ^= p2[2]; + d3 ^= p2[3]; + d4 ^= p2[4]; + d5 ^= p2[5]; + d6 ^= p2[6]; + d7 ^= p2[7]; + d0 ^= p3[0]; + d1 ^= p3[1]; + d2 ^= p3[2]; + d3 ^= p3[3]; + d4 ^= p3[4]; + d5 ^= p3[5]; + d6 ^= p3[6]; + d7 ^= p3[7]; + p1[0] = d0; /* Store the result (in bursts) */ + p1[1] = d1; + p1[2] = d2; + p1[3] = d3; + p1[4] = d4; + p1[5] = d5; + p1[6] = d6; + p1[7] = d7; + p1 += 8; + p2 += 8; + p3 += 8; + } while (--lines > 0); +} + +static void +xor_32regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3, unsigned long *p4) +{ + long lines = bytes / (sizeof (long)) / 8; + + do { + register long d0, d1, d2, d3, d4, d5, d6, d7; + d0 = p1[0]; /* Pull the stuff into registers */ + d1 = p1[1]; /* ... in bursts, if possible. */ + d2 = p1[2]; + d3 = p1[3]; + d4 = p1[4]; + d5 = p1[5]; + d6 = p1[6]; + d7 = p1[7]; + d0 ^= p2[0]; + d1 ^= p2[1]; + d2 ^= p2[2]; + d3 ^= p2[3]; + d4 ^= p2[4]; + d5 ^= p2[5]; + d6 ^= p2[6]; + d7 ^= p2[7]; + d0 ^= p3[0]; + d1 ^= p3[1]; + d2 ^= p3[2]; + d3 ^= p3[3]; + d4 ^= p3[4]; + d5 ^= p3[5]; + d6 ^= p3[6]; + d7 ^= p3[7]; + d0 ^= p4[0]; + d1 ^= p4[1]; + d2 ^= p4[2]; + d3 ^= p4[3]; + d4 ^= p4[4]; + d5 ^= p4[5]; + d6 ^= p4[6]; + d7 ^= p4[7]; + p1[0] = d0; /* Store the result (in bursts) */ + p1[1] = d1; + p1[2] = d2; + p1[3] = d3; + p1[4] = d4; + p1[5] = d5; + p1[6] = d6; + p1[7] = d7; + p1 += 8; + p2 += 8; + p3 += 8; + p4 += 8; + } while (--lines > 0); +} + +static void +xor_32regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3, unsigned long *p4, unsigned long *p5) +{ + long lines = bytes / (sizeof (long)) / 8; + + do { + register long d0, d1, d2, d3, d4, d5, d6, d7; + d0 = p1[0]; /* Pull the stuff into registers */ + d1 = p1[1]; /* ... in bursts, if possible. */ + d2 = p1[2]; + d3 = p1[3]; + d4 = p1[4]; + d5 = p1[5]; + d6 = p1[6]; + d7 = p1[7]; + d0 ^= p2[0]; + d1 ^= p2[1]; + d2 ^= p2[2]; + d3 ^= p2[3]; + d4 ^= p2[4]; + d5 ^= p2[5]; + d6 ^= p2[6]; + d7 ^= p2[7]; + d0 ^= p3[0]; + d1 ^= p3[1]; + d2 ^= p3[2]; + d3 ^= p3[3]; + d4 ^= p3[4]; + d5 ^= p3[5]; + d6 ^= p3[6]; + d7 ^= p3[7]; + d0 ^= p4[0]; + d1 ^= p4[1]; + d2 ^= p4[2]; + d3 ^= p4[3]; + d4 ^= p4[4]; + d5 ^= p4[5]; + d6 ^= p4[6]; + d7 ^= p4[7]; + d0 ^= p5[0]; + d1 ^= p5[1]; + d2 ^= p5[2]; + d3 ^= p5[3]; + d4 ^= p5[4]; + d5 ^= p5[5]; + d6 ^= p5[6]; + d7 ^= p5[7]; + p1[0] = d0; /* Store the result (in bursts) */ + p1[1] = d1; + p1[2] = d2; + p1[3] = d3; + p1[4] = d4; + p1[5] = d5; + p1[6] = d6; + p1[7] = d7; + p1 += 8; + p2 += 8; + p3 += 8; + p4 += 8; + p5 += 8; + } while (--lines > 0); +} + +static void +xor_8regs_p_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) +{ + long lines = bytes / (sizeof (long)) / 8 - 1; + prefetchw(p1); + prefetch(p2); + + do { + prefetchw(p1+8); + prefetch(p2+8); + once_more: + p1[0] ^= p2[0]; + p1[1] ^= p2[1]; + p1[2] ^= p2[2]; + p1[3] ^= p2[3]; + p1[4] ^= p2[4]; + p1[5] ^= p2[5]; + p1[6] ^= p2[6]; + p1[7] ^= p2[7]; + p1 += 8; + p2 += 8; + } while (--lines > 0); + if (lines == 0) + goto once_more; +} + +static void +xor_8regs_p_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3) +{ + long lines = bytes / (sizeof (long)) / 8 - 1; + prefetchw(p1); + prefetch(p2); + prefetch(p3); + + do { + prefetchw(p1+8); + prefetch(p2+8); + prefetch(p3+8); + once_more: + p1[0] ^= p2[0] ^ p3[0]; + p1[1] ^= p2[1] ^ p3[1]; + p1[2] ^= p2[2] ^ p3[2]; + p1[3] ^= p2[3] ^ p3[3]; + p1[4] ^= p2[4] ^ p3[4]; + p1[5] ^= p2[5] ^ p3[5]; + p1[6] ^= p2[6] ^ p3[6]; + p1[7] ^= p2[7] ^ p3[7]; + p1 += 8; + p2 += 8; + p3 += 8; + } while (--lines > 0); + if (lines == 0) + goto once_more; +} + +static void +xor_8regs_p_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3, unsigned long *p4) +{ + long lines = bytes / (sizeof (long)) / 8 - 1; + + prefetchw(p1); + prefetch(p2); + prefetch(p3); + prefetch(p4); + + do { + prefetchw(p1+8); + prefetch(p2+8); + prefetch(p3+8); + prefetch(p4+8); + once_more: + p1[0] ^= p2[0] ^ p3[0] ^ p4[0]; + p1[1] ^= p2[1] ^ p3[1] ^ p4[1]; + p1[2] ^= p2[2] ^ p3[2] ^ p4[2]; + p1[3] ^= p2[3] ^ p3[3] ^ p4[3]; + p1[4] ^= p2[4] ^ p3[4] ^ p4[4]; + p1[5] ^= p2[5] ^ p3[5] ^ p4[5]; + p1[6] ^= p2[6] ^ p3[6] ^ p4[6]; + p1[7] ^= p2[7] ^ p3[7] ^ p4[7]; + p1 += 8; + p2 += 8; + p3 += 8; + p4 += 8; + } while (--lines > 0); + if (lines == 0) + goto once_more; +} + +static void +xor_8regs_p_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3, unsigned long *p4, unsigned long *p5) +{ + long lines = bytes / (sizeof (long)) / 8 - 1; + + prefetchw(p1); + prefetch(p2); + prefetch(p3); + prefetch(p4); + prefetch(p5); + + do { + prefetchw(p1+8); + prefetch(p2+8); + prefetch(p3+8); + prefetch(p4+8); + prefetch(p5+8); + once_more: + p1[0] ^= p2[0] ^ p3[0] ^ p4[0] ^ p5[0]; + p1[1] ^= p2[1] ^ p3[1] ^ p4[1] ^ p5[1]; + p1[2] ^= p2[2] ^ p3[2] ^ p4[2] ^ p5[2]; + p1[3] ^= p2[3] ^ p3[3] ^ p4[3] ^ p5[3]; + p1[4] ^= p2[4] ^ p3[4] ^ p4[4] ^ p5[4]; + p1[5] ^= p2[5] ^ p3[5] ^ p4[5] ^ p5[5]; + p1[6] ^= p2[6] ^ p3[6] ^ p4[6] ^ p5[6]; + p1[7] ^= p2[7] ^ p3[7] ^ p4[7] ^ p5[7]; + p1 += 8; + p2 += 8; + p3 += 8; + p4 += 8; + p5 += 8; + } while (--lines > 0); + if (lines == 0) + goto once_more; +} + +static void +xor_32regs_p_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) +{ + long lines = bytes / (sizeof (long)) / 8 - 1; + + prefetchw(p1); + prefetch(p2); + + do { + register long d0, d1, d2, d3, d4, d5, d6, d7; + + prefetchw(p1+8); + prefetch(p2+8); + once_more: + d0 = p1[0]; /* Pull the stuff into registers */ + d1 = p1[1]; /* ... in bursts, if possible. */ + d2 = p1[2]; + d3 = p1[3]; + d4 = p1[4]; + d5 = p1[5]; + d6 = p1[6]; + d7 = p1[7]; + d0 ^= p2[0]; + d1 ^= p2[1]; + d2 ^= p2[2]; + d3 ^= p2[3]; + d4 ^= p2[4]; + d5 ^= p2[5]; + d6 ^= p2[6]; + d7 ^= p2[7]; + p1[0] = d0; /* Store the result (in bursts) */ + p1[1] = d1; + p1[2] = d2; + p1[3] = d3; + p1[4] = d4; + p1[5] = d5; + p1[6] = d6; + p1[7] = d7; + p1 += 8; + p2 += 8; + } while (--lines > 0); + if (lines == 0) + goto once_more; +} + +static void +xor_32regs_p_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3) +{ + long lines = bytes / (sizeof (long)) / 8 - 1; + + prefetchw(p1); + prefetch(p2); + prefetch(p3); + + do { + register long d0, d1, d2, d3, d4, d5, d6, d7; + + prefetchw(p1+8); + prefetch(p2+8); + prefetch(p3+8); + once_more: + d0 = p1[0]; /* Pull the stuff into registers */ + d1 = p1[1]; /* ... in bursts, if possible. */ + d2 = p1[2]; + d3 = p1[3]; + d4 = p1[4]; + d5 = p1[5]; + d6 = p1[6]; + d7 = p1[7]; + d0 ^= p2[0]; + d1 ^= p2[1]; + d2 ^= p2[2]; + d3 ^= p2[3]; + d4 ^= p2[4]; + d5 ^= p2[5]; + d6 ^= p2[6]; + d7 ^= p2[7]; + d0 ^= p3[0]; + d1 ^= p3[1]; + d2 ^= p3[2]; + d3 ^= p3[3]; + d4 ^= p3[4]; + d5 ^= p3[5]; + d6 ^= p3[6]; + d7 ^= p3[7]; + p1[0] = d0; /* Store the result (in bursts) */ + p1[1] = d1; + p1[2] = d2; + p1[3] = d3; + p1[4] = d4; + p1[5] = d5; + p1[6] = d6; + p1[7] = d7; + p1 += 8; + p2 += 8; + p3 += 8; + } while (--lines > 0); + if (lines == 0) + goto once_more; +} + +static void +xor_32regs_p_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3, unsigned long *p4) +{ + long lines = bytes / (sizeof (long)) / 8 - 1; + + prefetchw(p1); + prefetch(p2); + prefetch(p3); + prefetch(p4); + + do { + register long d0, d1, d2, d3, d4, d5, d6, d7; + + prefetchw(p1+8); + prefetch(p2+8); + prefetch(p3+8); + prefetch(p4+8); + once_more: + d0 = p1[0]; /* Pull the stuff into registers */ + d1 = p1[1]; /* ... in bursts, if possible. */ + d2 = p1[2]; + d3 = p1[3]; + d4 = p1[4]; + d5 = p1[5]; + d6 = p1[6]; + d7 = p1[7]; + d0 ^= p2[0]; + d1 ^= p2[1]; + d2 ^= p2[2]; + d3 ^= p2[3]; + d4 ^= p2[4]; + d5 ^= p2[5]; + d6 ^= p2[6]; + d7 ^= p2[7]; + d0 ^= p3[0]; + d1 ^= p3[1]; + d2 ^= p3[2]; + d3 ^= p3[3]; + d4 ^= p3[4]; + d5 ^= p3[5]; + d6 ^= p3[6]; + d7 ^= p3[7]; + d0 ^= p4[0]; + d1 ^= p4[1]; + d2 ^= p4[2]; + d3 ^= p4[3]; + d4 ^= p4[4]; + d5 ^= p4[5]; + d6 ^= p4[6]; + d7 ^= p4[7]; + p1[0] = d0; /* Store the result (in bursts) */ + p1[1] = d1; + p1[2] = d2; + p1[3] = d3; + p1[4] = d4; + p1[5] = d5; + p1[6] = d6; + p1[7] = d7; + p1 += 8; + p2 += 8; + p3 += 8; + p4 += 8; + } while (--lines > 0); + if (lines == 0) + goto once_more; +} + +static void +xor_32regs_p_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3, unsigned long *p4, unsigned long *p5) +{ + long lines = bytes / (sizeof (long)) / 8 - 1; + + prefetchw(p1); + prefetch(p2); + prefetch(p3); + prefetch(p4); + prefetch(p5); + + do { + register long d0, d1, d2, d3, d4, d5, d6, d7; + + prefetchw(p1+8); + prefetch(p2+8); + prefetch(p3+8); + prefetch(p4+8); + prefetch(p5+8); + once_more: + d0 = p1[0]; /* Pull the stuff into registers */ + d1 = p1[1]; /* ... in bursts, if possible. */ + d2 = p1[2]; + d3 = p1[3]; + d4 = p1[4]; + d5 = p1[5]; + d6 = p1[6]; + d7 = p1[7]; + d0 ^= p2[0]; + d1 ^= p2[1]; + d2 ^= p2[2]; + d3 ^= p2[3]; + d4 ^= p2[4]; + d5 ^= p2[5]; + d6 ^= p2[6]; + d7 ^= p2[7]; + d0 ^= p3[0]; + d1 ^= p3[1]; + d2 ^= p3[2]; + d3 ^= p3[3]; + d4 ^= p3[4]; + d5 ^= p3[5]; + d6 ^= p3[6]; + d7 ^= p3[7]; + d0 ^= p4[0]; + d1 ^= p4[1]; + d2 ^= p4[2]; + d3 ^= p4[3]; + d4 ^= p4[4]; + d5 ^= p4[5]; + d6 ^= p4[6]; + d7 ^= p4[7]; + d0 ^= p5[0]; + d1 ^= p5[1]; + d2 ^= p5[2]; + d3 ^= p5[3]; + d4 ^= p5[4]; + d5 ^= p5[5]; + d6 ^= p5[6]; + d7 ^= p5[7]; + p1[0] = d0; /* Store the result (in bursts) */ + p1[1] = d1; + p1[2] = d2; + p1[3] = d3; + p1[4] = d4; + p1[5] = d5; + p1[6] = d6; + p1[7] = d7; + p1 += 8; + p2 += 8; + p3 += 8; + p4 += 8; + p5 += 8; + } while (--lines > 0); + if (lines == 0) + goto once_more; +} + +static struct xor_block_template xor_block_8regs = { + .name = "8regs", + .do_2 = xor_8regs_2, + .do_3 = xor_8regs_3, + .do_4 = xor_8regs_4, + .do_5 = xor_8regs_5, +}; + +static struct xor_block_template xor_block_32regs = { + .name = "32regs", + .do_2 = xor_32regs_2, + .do_3 = xor_32regs_3, + .do_4 = xor_32regs_4, + .do_5 = xor_32regs_5, +}; + +static struct xor_block_template xor_block_8regs_p __maybe_unused = { + .name = "8regs_prefetch", + .do_2 = xor_8regs_p_2, + .do_3 = xor_8regs_p_3, + .do_4 = xor_8regs_p_4, + .do_5 = xor_8regs_p_5, +}; + +static struct xor_block_template xor_block_32regs_p __maybe_unused = { + .name = "32regs_prefetch", + .do_2 = xor_32regs_p_2, + .do_3 = xor_32regs_p_3, + .do_4 = xor_32regs_p_4, + .do_5 = xor_32regs_p_5, +}; + +#define XOR_TRY_TEMPLATES \ + do { \ + xor_speed(&xor_block_8regs); \ + xor_speed(&xor_block_8regs_p); \ + xor_speed(&xor_block_32regs); \ + xor_speed(&xor_block_32regs_p); \ + } while (0) |