summaryrefslogtreecommitdiff
path: root/arch/avr32/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/avr32/include/asm')
-rw-r--r--arch/avr32/include/asm/Kbuild23
-rw-r--r--arch/avr32/include/asm/addrspace.h43
-rw-r--r--arch/avr32/include/asm/asm-offsets.h1
-rw-r--r--arch/avr32/include/asm/asm.h102
-rw-r--r--arch/avr32/include/asm/atomic.h187
-rw-r--r--arch/avr32/include/asm/barrier.h22
-rw-r--r--arch/avr32/include/asm/bitops.h314
-rw-r--r--arch/avr32/include/asm/bug.h78
-rw-r--r--arch/avr32/include/asm/bugs.h15
-rw-r--r--arch/avr32/include/asm/cache.h38
-rw-r--r--arch/avr32/include/asm/cacheflush.h132
-rw-r--r--arch/avr32/include/asm/checksum.h152
-rw-r--r--arch/avr32/include/asm/cmpxchg.h117
-rw-r--r--arch/avr32/include/asm/current.h15
-rw-r--r--arch/avr32/include/asm/dma-mapping.h349
-rw-r--r--arch/avr32/include/asm/dma.h8
-rw-r--r--arch/avr32/include/asm/elf.h105
-rw-r--r--arch/avr32/include/asm/fb.h21
-rw-r--r--arch/avr32/include/asm/ftrace.h1
-rw-r--r--arch/avr32/include/asm/gpio.h6
-rw-r--r--arch/avr32/include/asm/hardirq.h6
-rw-r--r--arch/avr32/include/asm/hw_irq.h9
-rw-r--r--arch/avr32/include/asm/io.h327
-rw-r--r--arch/avr32/include/asm/irq.h24
-rw-r--r--arch/avr32/include/asm/irqflags.h61
-rw-r--r--arch/avr32/include/asm/kdebug.h12
-rw-r--r--arch/avr32/include/asm/kmap_types.h10
-rw-r--r--arch/avr32/include/asm/kprobes.h49
-rw-r--r--arch/avr32/include/asm/linkage.h7
-rw-r--r--arch/avr32/include/asm/mmu.h10
-rw-r--r--arch/avr32/include/asm/mmu_context.h148
-rw-r--r--arch/avr32/include/asm/module.h26
-rw-r--r--arch/avr32/include/asm/mutex.h9
-rw-r--r--arch/avr32/include/asm/ocd.h543
-rw-r--r--arch/avr32/include/asm/page.h104
-rw-r--r--arch/avr32/include/asm/pci.h10
-rw-r--r--arch/avr32/include/asm/pgalloc.h102
-rw-r--r--arch/avr32/include/asm/pgtable-2level.h47
-rw-r--r--arch/avr32/include/asm/pgtable.h347
-rw-r--r--arch/avr32/include/asm/processor.h167
-rw-r--r--arch/avr32/include/asm/ptrace.h45
-rw-r--r--arch/avr32/include/asm/serial.h13
-rw-r--r--arch/avr32/include/asm/setup.h144
-rw-r--r--arch/avr32/include/asm/shmparam.h6
-rw-r--r--arch/avr32/include/asm/signal.h31
-rw-r--r--arch/avr32/include/asm/string.h17
-rw-r--r--arch/avr32/include/asm/switch_to.h46
-rw-r--r--arch/avr32/include/asm/syscalls.h21
-rw-r--r--arch/avr32/include/asm/sysreg.h291
-rw-r--r--arch/avr32/include/asm/termios.h23
-rw-r--r--arch/avr32/include/asm/thread_info.h103
-rw-r--r--arch/avr32/include/asm/timex.h39
-rw-r--r--arch/avr32/include/asm/tlb.h32
-rw-r--r--arch/avr32/include/asm/tlbflush.h32
-rw-r--r--arch/avr32/include/asm/traps.h23
-rw-r--r--arch/avr32/include/asm/types.h19
-rw-r--r--arch/avr32/include/asm/uaccess.h324
-rw-r--r--arch/avr32/include/asm/ucontext.h12
-rw-r--r--arch/avr32/include/asm/unaligned.h21
-rw-r--r--arch/avr32/include/asm/unistd.h44
-rw-r--r--arch/avr32/include/asm/user.h65
61 files changed, 5098 insertions, 0 deletions
diff --git a/arch/avr32/include/asm/Kbuild b/arch/avr32/include/asm/Kbuild
new file mode 100644
index 000000000..528d70d47
--- /dev/null
+++ b/arch/avr32/include/asm/Kbuild
@@ -0,0 +1,23 @@
+
+generic-y += clkdev.h
+generic-y += cputime.h
+generic-y += delay.h
+generic-y += device.h
+generic-y += div64.h
+generic-y += emergency-restart.h
+generic-y += exec.h
+generic-y += futex.h
+generic-y += irq_regs.h
+generic-y += irq_work.h
+generic-y += local.h
+generic-y += local64.h
+generic-y += mcs_spinlock.h
+generic-y += param.h
+generic-y += percpu.h
+generic-y += preempt.h
+generic-y += scatterlist.h
+generic-y += sections.h
+generic-y += topology.h
+generic-y += trace_clock.h
+generic-y += vga.h
+generic-y += xor.h
diff --git a/arch/avr32/include/asm/addrspace.h b/arch/avr32/include/asm/addrspace.h
new file mode 100644
index 000000000..366794858
--- /dev/null
+++ b/arch/avr32/include/asm/addrspace.h
@@ -0,0 +1,43 @@
+/*
+ * Defitions for the address spaces of the AVR32 CPUs. Heavily based on
+ * include/asm-sh/addrspace.h
+ *
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_AVR32_ADDRSPACE_H
+#define __ASM_AVR32_ADDRSPACE_H
+
+#ifdef CONFIG_MMU
+
+/* Memory segments when segmentation is enabled */
+#define P0SEG 0x00000000
+#define P1SEG 0x80000000
+#define P2SEG 0xa0000000
+#define P3SEG 0xc0000000
+#define P4SEG 0xe0000000
+
+/* Returns the privileged segment base of a given address */
+#define PXSEG(a) (((unsigned long)(a)) & 0xe0000000)
+
+/* Returns the physical address of a PnSEG (n=1,2) address */
+#define PHYSADDR(a) (((unsigned long)(a)) & 0x1fffffff)
+
+/*
+ * Map an address to a certain privileged segment
+ */
+#define P1SEGADDR(a) ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) \
+ | P1SEG))
+#define P2SEGADDR(a) ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) \
+ | P2SEG))
+#define P3SEGADDR(a) ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) \
+ | P3SEG))
+#define P4SEGADDR(a) ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) \
+ | P4SEG))
+
+#endif /* CONFIG_MMU */
+
+#endif /* __ASM_AVR32_ADDRSPACE_H */
diff --git a/arch/avr32/include/asm/asm-offsets.h b/arch/avr32/include/asm/asm-offsets.h
new file mode 100644
index 000000000..d370ee36a
--- /dev/null
+++ b/arch/avr32/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/avr32/include/asm/asm.h b/arch/avr32/include/asm/asm.h
new file mode 100644
index 000000000..a2c64f404
--- /dev/null
+++ b/arch/avr32/include/asm/asm.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_AVR32_ASM_H__
+#define __ASM_AVR32_ASM_H__
+
+#include <asm/sysreg.h>
+#include <asm/asm-offsets.h>
+#include <asm/thread_info.h>
+
+#define mask_interrupts ssrf SYSREG_GM_OFFSET
+#define mask_exceptions ssrf SYSREG_EM_OFFSET
+#define unmask_interrupts csrf SYSREG_GM_OFFSET
+#define unmask_exceptions csrf SYSREG_EM_OFFSET
+
+#ifdef CONFIG_FRAME_POINTER
+ .macro save_fp
+ st.w --sp, r7
+ .endm
+ .macro restore_fp
+ ld.w r7, sp++
+ .endm
+ .macro zero_fp
+ mov r7, 0
+ .endm
+#else
+ .macro save_fp
+ .endm
+ .macro restore_fp
+ .endm
+ .macro zero_fp
+ .endm
+#endif
+ .macro get_thread_info reg
+ mov \reg, sp
+ andl \reg, ~(THREAD_SIZE - 1) & 0xffff
+ .endm
+
+ /* Save and restore registers */
+ .macro save_min sr, tmp=lr
+ pushm lr
+ mfsr \tmp, \sr
+ zero_fp
+ st.w --sp, \tmp
+ .endm
+
+ .macro restore_min sr, tmp=lr
+ ld.w \tmp, sp++
+ mtsr \sr, \tmp
+ popm lr
+ .endm
+
+ .macro save_half sr, tmp=lr
+ save_fp
+ pushm r8-r9,r10,r11,r12,lr
+ zero_fp
+ mfsr \tmp, \sr
+ st.w --sp, \tmp
+ .endm
+
+ .macro restore_half sr, tmp=lr
+ ld.w \tmp, sp++
+ mtsr \sr, \tmp
+ popm r8-r9,r10,r11,r12,lr
+ restore_fp
+ .endm
+
+ .macro save_full_user sr, tmp=lr
+ stmts --sp, r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12,sp,lr
+ st.w --sp, lr
+ zero_fp
+ mfsr \tmp, \sr
+ st.w --sp, \tmp
+ .endm
+
+ .macro restore_full_user sr, tmp=lr
+ ld.w \tmp, sp++
+ mtsr \sr, \tmp
+ ld.w lr, sp++
+ ldmts sp++, r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12,sp,lr
+ .endm
+
+ /* uaccess macros */
+ .macro branch_if_kernel scratch, label
+ get_thread_info \scratch
+ ld.w \scratch, \scratch[TI_flags]
+ bld \scratch, TIF_USERSPACE
+ brcc \label
+ .endm
+
+ .macro ret_if_privileged scratch, addr, size, ret
+ sub \scratch, \size, 1
+ add \scratch, \addr
+ retcs \ret
+ retmi \ret
+ .endm
+
+#endif /* __ASM_AVR32_ASM_H__ */
diff --git a/arch/avr32/include/asm/atomic.h b/arch/avr32/include/asm/atomic.h
new file mode 100644
index 000000000..2d07ce1c5
--- /dev/null
+++ b/arch/avr32/include/asm/atomic.h
@@ -0,0 +1,187 @@
+/*
+ * Atomic operations that C can't guarantee us. Useful for
+ * resource counting etc.
+ *
+ * But use these as seldom as possible since they are slower than
+ * regular operations.
+ *
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_AVR32_ATOMIC_H
+#define __ASM_AVR32_ATOMIC_H
+
+#include <linux/types.h>
+#include <asm/cmpxchg.h>
+
+#define ATOMIC_INIT(i) { (i) }
+
+#define atomic_read(v) ACCESS_ONCE((v)->counter)
+#define atomic_set(v, i) (((v)->counter) = i)
+
+#define ATOMIC_OP_RETURN(op, asm_op, asm_con) \
+static inline int __atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ int result; \
+ \
+ asm volatile( \
+ "/* atomic_" #op "_return */\n" \
+ "1: ssrf 5\n" \
+ " ld.w %0, %2\n" \
+ " " #asm_op " %0, %3\n" \
+ " stcond %1, %0\n" \
+ " brne 1b" \
+ : "=&r" (result), "=o" (v->counter) \
+ : "m" (v->counter), #asm_con (i) \
+ : "cc"); \
+ \
+ return result; \
+}
+
+ATOMIC_OP_RETURN(sub, sub, rKs21)
+ATOMIC_OP_RETURN(add, add, r)
+
+#undef ATOMIC_OP_RETURN
+
+/*
+ * Probably found the reason why we want to use sub with the signed 21-bit
+ * limit, it uses one less register than the add instruction that can add up to
+ * 32-bit values.
+ *
+ * Both instructions are 32-bit, to use a 16-bit instruction the immediate is
+ * very small; 4 bit.
+ *
+ * sub 32-bit, type IV, takes a register and subtracts a 21-bit immediate.
+ * add 32-bit, type II, adds two register values together.
+ */
+#define IS_21BIT_CONST(i) \
+ (__builtin_constant_p(i) && ((i) >= -1048575) && ((i) <= 1048576))
+
+/*
+ * atomic_add_return - add integer to atomic variable
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+ *
+ * Atomically adds @i to @v. Returns the resulting value.
+ */
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+ if (IS_21BIT_CONST(i))
+ return __atomic_sub_return(-i, v);
+
+ return __atomic_add_return(i, v);
+}
+
+/*
+ * atomic_sub_return - subtract the atomic variable
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ *
+ * Atomically subtracts @i from @v. Returns the resulting value.
+ */
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+ if (IS_21BIT_CONST(i))
+ return __atomic_sub_return(i, v);
+
+ return __atomic_add_return(-i, v);
+}
+
+/*
+ * __atomic_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns the old value of @v.
+*/
+static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+{
+ int tmp, old = atomic_read(v);
+
+ if (IS_21BIT_CONST(a)) {
+ asm volatile(
+ "/* __atomic_sub_unless */\n"
+ "1: ssrf 5\n"
+ " ld.w %0, %2\n"
+ " cp.w %0, %4\n"
+ " breq 1f\n"
+ " sub %0, %3\n"
+ " stcond %1, %0\n"
+ " brne 1b\n"
+ "1:"
+ : "=&r"(tmp), "=o"(v->counter)
+ : "m"(v->counter), "rKs21"(-a), "rKs21"(u)
+ : "cc", "memory");
+ } else {
+ asm volatile(
+ "/* __atomic_add_unless */\n"
+ "1: ssrf 5\n"
+ " ld.w %0, %2\n"
+ " cp.w %0, %4\n"
+ " breq 1f\n"
+ " add %0, %3\n"
+ " stcond %1, %0\n"
+ " brne 1b\n"
+ "1:"
+ : "=&r"(tmp), "=o"(v->counter)
+ : "m"(v->counter), "r"(a), "ir"(u)
+ : "cc", "memory");
+ }
+
+ return old;
+}
+
+#undef IS_21BIT_CONST
+
+/*
+ * atomic_sub_if_positive - conditionally subtract integer from atomic variable
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ *
+ * Atomically test @v and subtract @i if @v is greater or equal than @i.
+ * The function returns the old value of @v minus @i.
+ */
+static inline int atomic_sub_if_positive(int i, atomic_t *v)
+{
+ int result;
+
+ asm volatile(
+ "/* atomic_sub_if_positive */\n"
+ "1: ssrf 5\n"
+ " ld.w %0, %2\n"
+ " sub %0, %3\n"
+ " brlt 1f\n"
+ " stcond %1, %0\n"
+ " brne 1b\n"
+ "1:"
+ : "=&r"(result), "=o"(v->counter)
+ : "m"(v->counter), "ir"(i)
+ : "cc", "memory");
+
+ return result;
+}
+
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
+
+#define atomic_sub(i, v) (void)atomic_sub_return(i, v)
+#define atomic_add(i, v) (void)atomic_add_return(i, v)
+#define atomic_dec(v) atomic_sub(1, (v))
+#define atomic_inc(v) atomic_add(1, (v))
+
+#define atomic_dec_return(v) atomic_sub_return(1, v)
+#define atomic_inc_return(v) atomic_add_return(1, v)
+
+#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
+#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
+#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
+#define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
+
+#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v)
+
+#endif /* __ASM_AVR32_ATOMIC_H */
diff --git a/arch/avr32/include/asm/barrier.h b/arch/avr32/include/asm/barrier.h
new file mode 100644
index 000000000..715100790
--- /dev/null
+++ b/arch/avr32/include/asm/barrier.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_AVR32_BARRIER_H
+#define __ASM_AVR32_BARRIER_H
+
+/*
+ * Weirdest thing ever.. no full barrier, but it has a write barrier!
+ */
+#define wmb() asm volatile("sync 0" : : : "memory")
+
+#ifdef CONFIG_SMP
+# error "The AVR32 port does not support SMP"
+#endif
+
+#include <asm-generic/barrier.h>
+
+#endif /* __ASM_AVR32_BARRIER_H */
diff --git a/arch/avr32/include/asm/bitops.h b/arch/avr32/include/asm/bitops.h
new file mode 100644
index 000000000..910d5374c
--- /dev/null
+++ b/arch/avr32/include/asm/bitops.h
@@ -0,0 +1,314 @@
+/*
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_AVR32_BITOPS_H
+#define __ASM_AVR32_BITOPS_H
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+#include <asm/byteorder.h>
+#include <asm/barrier.h>
+
+/*
+ * set_bit - Atomically set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This function is atomic and may not be reordered. See __set_bit()
+ * if you do not require the atomic guarantees.
+ *
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static inline void set_bit(int nr, volatile void * addr)
+{
+ unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG;
+ unsigned long tmp;
+
+ if (__builtin_constant_p(nr)) {
+ asm volatile(
+ "1: ssrf 5\n"
+ " ld.w %0, %2\n"
+ " sbr %0, %3\n"
+ " stcond %1, %0\n"
+ " brne 1b"
+ : "=&r"(tmp), "=o"(*p)
+ : "m"(*p), "i"(nr)
+ : "cc");
+ } else {
+ unsigned long mask = 1UL << (nr % BITS_PER_LONG);
+ asm volatile(
+ "1: ssrf 5\n"
+ " ld.w %0, %2\n"
+ " or %0, %3\n"
+ " stcond %1, %0\n"
+ " brne 1b"
+ : "=&r"(tmp), "=o"(*p)
+ : "m"(*p), "r"(mask)
+ : "cc");
+ }
+}
+
+/*
+ * clear_bit - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * clear_bit() is atomic and may not be reordered. However, it does
+ * not contain a memory barrier, so if it is used for locking purposes,
+ * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
+ * in order to ensure changes are visible on other processors.
+ */
+static inline void clear_bit(int nr, volatile void * addr)
+{
+ unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG;
+ unsigned long tmp;
+
+ if (__builtin_constant_p(nr)) {
+ asm volatile(
+ "1: ssrf 5\n"
+ " ld.w %0, %2\n"
+ " cbr %0, %3\n"
+ " stcond %1, %0\n"
+ " brne 1b"
+ : "=&r"(tmp), "=o"(*p)
+ : "m"(*p), "i"(nr)
+ : "cc");
+ } else {
+ unsigned long mask = 1UL << (nr % BITS_PER_LONG);
+ asm volatile(
+ "1: ssrf 5\n"
+ " ld.w %0, %2\n"
+ " andn %0, %3\n"
+ " stcond %1, %0\n"
+ " brne 1b"
+ : "=&r"(tmp), "=o"(*p)
+ : "m"(*p), "r"(mask)
+ : "cc");
+ }
+}
+
+/*
+ * change_bit - Toggle a bit in memory
+ * @nr: Bit to change
+ * @addr: Address to start counting from
+ *
+ * change_bit() is atomic and may not be reordered.
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static inline void change_bit(int nr, volatile void * addr)
+{
+ unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG;
+ unsigned long mask = 1UL << (nr % BITS_PER_LONG);
+ unsigned long tmp;
+
+ asm volatile(
+ "1: ssrf 5\n"
+ " ld.w %0, %2\n"
+ " eor %0, %3\n"
+ " stcond %1, %0\n"
+ " brne 1b"
+ : "=&r"(tmp), "=o"(*p)
+ : "m"(*p), "r"(mask)
+ : "cc");
+}
+
+/*
+ * test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It also implies a memory barrier.
+ */
+static inline int test_and_set_bit(int nr, volatile void * addr)
+{
+ unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG;
+ unsigned long mask = 1UL << (nr % BITS_PER_LONG);
+ unsigned long tmp, old;
+
+ if (__builtin_constant_p(nr)) {
+ asm volatile(
+ "1: ssrf 5\n"
+ " ld.w %0, %3\n"
+ " mov %2, %0\n"
+ " sbr %0, %4\n"
+ " stcond %1, %0\n"
+ " brne 1b"
+ : "=&r"(tmp), "=o"(*p), "=&r"(old)
+ : "m"(*p), "i"(nr)
+ : "memory", "cc");
+ } else {
+ asm volatile(
+ "1: ssrf 5\n"
+ " ld.w %2, %3\n"
+ " or %0, %2, %4\n"
+ " stcond %1, %0\n"
+ " brne 1b"
+ : "=&r"(tmp), "=o"(*p), "=&r"(old)
+ : "m"(*p), "r"(mask)
+ : "memory", "cc");
+ }
+
+ return (old & mask) != 0;
+}
+
+/*
+ * test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It also implies a memory barrier.
+ */
+static inline int test_and_clear_bit(int nr, volatile void * addr)
+{
+ unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG;
+ unsigned long mask = 1UL << (nr % BITS_PER_LONG);
+ unsigned long tmp, old;
+
+ if (__builtin_constant_p(nr)) {
+ asm volatile(
+ "1: ssrf 5\n"
+ " ld.w %0, %3\n"
+ " mov %2, %0\n"
+ " cbr %0, %4\n"
+ " stcond %1, %0\n"
+ " brne 1b"
+ : "=&r"(tmp), "=o"(*p), "=&r"(old)
+ : "m"(*p), "i"(nr)
+ : "memory", "cc");
+ } else {
+ asm volatile(
+ "1: ssrf 5\n"
+ " ld.w %0, %3\n"
+ " mov %2, %0\n"
+ " andn %0, %4\n"
+ " stcond %1, %0\n"
+ " brne 1b"
+ : "=&r"(tmp), "=o"(*p), "=&r"(old)
+ : "m"(*p), "r"(mask)
+ : "memory", "cc");
+ }
+
+ return (old & mask) != 0;
+}
+
+/*
+ * test_and_change_bit - Change a bit and return its old value
+ * @nr: Bit to change
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It also implies a memory barrier.
+ */
+static inline int test_and_change_bit(int nr, volatile void * addr)
+{
+ unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG;
+ unsigned long mask = 1UL << (nr % BITS_PER_LONG);
+ unsigned long tmp, old;
+
+ asm volatile(
+ "1: ssrf 5\n"
+ " ld.w %2, %3\n"
+ " eor %0, %2, %4\n"
+ " stcond %1, %0\n"
+ " brne 1b"
+ : "=&r"(tmp), "=o"(*p), "=&r"(old)
+ : "m"(*p), "r"(mask)
+ : "memory", "cc");
+
+ return (old & mask) != 0;
+}
+
+#include <asm-generic/bitops/non-atomic.h>
+
+/* Find First bit Set */
+static inline unsigned long __ffs(unsigned long word)
+{
+ unsigned long result;
+
+ asm("brev %1\n\t"
+ "clz %0,%1"
+ : "=r"(result), "=&r"(word)
+ : "1"(word));
+ return result;
+}
+
+/* Find First Zero */
+static inline unsigned long ffz(unsigned long word)
+{
+ return __ffs(~word);
+}
+
+/* Find Last bit Set */
+static inline int fls(unsigned long word)
+{
+ unsigned long result;
+
+ asm("clz %0,%1" : "=r"(result) : "r"(word));
+ return 32 - result;
+}
+
+static inline int __fls(unsigned long word)
+{
+ return fls(word) - 1;
+}
+
+unsigned long find_first_zero_bit(const unsigned long *addr,
+ unsigned long size);
+#define find_first_zero_bit find_first_zero_bit
+
+unsigned long find_next_zero_bit(const unsigned long *addr,
+ unsigned long size,
+ unsigned long offset);
+#define find_next_zero_bit find_next_zero_bit
+
+unsigned long find_first_bit(const unsigned long *addr,
+ unsigned long size);
+#define find_first_bit find_first_bit
+
+unsigned long find_next_bit(const unsigned long *addr,
+ unsigned long size,
+ unsigned long offset);
+#define find_next_bit find_next_bit
+
+/*
+ * ffs: find first bit set. This is defined the same way as
+ * the libc and compiler builtin ffs routines, therefore
+ * differs in spirit from the above ffz (man ffs).
+ *
+ * The difference is that bit numbering starts at 1, and if no bit is set,
+ * the function returns 0.
+ */
+static inline int ffs(unsigned long word)
+{
+ if(word == 0)
+ return 0;
+ return __ffs(word) + 1;
+}
+
+#include <asm-generic/bitops/fls64.h>
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
+
+extern unsigned long find_next_zero_bit_le(const void *addr,
+ unsigned long size, unsigned long offset);
+#define find_next_zero_bit_le find_next_zero_bit_le
+
+extern unsigned long find_next_bit_le(const void *addr,
+ unsigned long size, unsigned long offset);
+#define find_next_bit_le find_next_bit_le
+
+#include <asm-generic/bitops/le.h>
+#include <asm-generic/bitops/ext2-atomic.h>
+
+#endif /* __ASM_AVR32_BITOPS_H */
diff --git a/arch/avr32/include/asm/bug.h b/arch/avr32/include/asm/bug.h
new file mode 100644
index 000000000..85a92d099
--- /dev/null
+++ b/arch/avr32/include/asm/bug.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_AVR32_BUG_H
+#define __ASM_AVR32_BUG_H
+
+#ifdef CONFIG_BUG
+
+/*
+ * According to our Chief Architect, this compact opcode is very
+ * unlikely to ever be implemented.
+ */
+#define AVR32_BUG_OPCODE 0x5df0
+
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+
+#define _BUG_OR_WARN(flags) \
+ asm volatile( \
+ "1: .hword %0\n" \
+ " .section __bug_table,\"a\",@progbits\n" \
+ "2: .long 1b\n" \
+ " .long %1\n" \
+ " .short %2\n" \
+ " .short %3\n" \
+ " .org 2b + %4\n" \
+ " .previous" \
+ : \
+ : "i"(AVR32_BUG_OPCODE), "i"(__FILE__), \
+ "i"(__LINE__), "i"(flags), \
+ "i"(sizeof(struct bug_entry)))
+
+#else
+
+#define _BUG_OR_WARN(flags) \
+ asm volatile( \
+ "1: .hword %0\n" \
+ " .section __bug_table,\"a\",@progbits\n" \
+ "2: .long 1b\n" \
+ " .short %1\n" \
+ " .org 2b + %2\n" \
+ " .previous" \
+ : \
+ : "i"(AVR32_BUG_OPCODE), "i"(flags), \
+ "i"(sizeof(struct bug_entry)))
+
+#endif /* CONFIG_DEBUG_BUGVERBOSE */
+
+#define BUG() \
+ do { \
+ _BUG_OR_WARN(0); \
+ unreachable(); \
+ } while (0)
+
+#define WARN_ON(condition) \
+ ({ \
+ int __ret_warn_on = !!(condition); \
+ if (unlikely(__ret_warn_on)) \
+ _BUG_OR_WARN(BUGFLAG_WARNING); \
+ unlikely(__ret_warn_on); \
+ })
+
+#define HAVE_ARCH_BUG
+#define HAVE_ARCH_WARN_ON
+
+#endif /* CONFIG_BUG */
+
+#include <asm-generic/bug.h>
+
+struct pt_regs;
+void die(const char *str, struct pt_regs *regs, long err);
+void _exception(long signr, struct pt_regs *regs, int code,
+ unsigned long addr);
+
+#endif /* __ASM_AVR32_BUG_H */
diff --git a/arch/avr32/include/asm/bugs.h b/arch/avr32/include/asm/bugs.h
new file mode 100644
index 000000000..278661bbd
--- /dev/null
+++ b/arch/avr32/include/asm/bugs.h
@@ -0,0 +1,15 @@
+/*
+ * This is included by init/main.c to check for architecture-dependent bugs.
+ *
+ * Needs:
+ * void check_bugs(void);
+ */
+#ifndef __ASM_AVR32_BUGS_H
+#define __ASM_AVR32_BUGS_H
+
+static void __init check_bugs(void)
+{
+ boot_cpu_data.loops_per_jiffy = loops_per_jiffy;
+}
+
+#endif /* __ASM_AVR32_BUGS_H */
diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
new file mode 100644
index 000000000..c3a58a189
--- /dev/null
+++ b/arch/avr32/include/asm/cache.h
@@ -0,0 +1,38 @@
+#ifndef __ASM_AVR32_CACHE_H
+#define __ASM_AVR32_CACHE_H
+
+#define L1_CACHE_SHIFT 5
+#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+
+/*
+ * Memory returned by kmalloc() may be used for DMA, so we must make
+ * sure that all such allocations are cache aligned. Otherwise,
+ * unrelated code may cause parts of the buffer to be read into the
+ * cache before the transfer is done, causing old data to be seen by
+ * the CPU.
+ */
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+
+#ifndef __ASSEMBLER__
+struct cache_info {
+ unsigned int ways;
+ unsigned int sets;
+ unsigned int linesz;
+};
+#endif /* __ASSEMBLER */
+
+/* Cache operation constants */
+#define ICACHE_FLUSH 0x00
+#define ICACHE_INVALIDATE 0x01
+#define ICACHE_LOCK 0x02
+#define ICACHE_UNLOCK 0x03
+#define ICACHE_PREFETCH 0x04
+
+#define DCACHE_FLUSH 0x08
+#define DCACHE_LOCK 0x09
+#define DCACHE_UNLOCK 0x0a
+#define DCACHE_INVALIDATE 0x0b
+#define DCACHE_CLEAN 0x0c
+#define DCACHE_CLEAN_INVAL 0x0d
+
+#endif /* __ASM_AVR32_CACHE_H */
diff --git a/arch/avr32/include/asm/cacheflush.h b/arch/avr32/include/asm/cacheflush.h
new file mode 100644
index 000000000..96e53820b
--- /dev/null
+++ b/arch/avr32/include/asm/cacheflush.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_AVR32_CACHEFLUSH_H
+#define __ASM_AVR32_CACHEFLUSH_H
+
+/* Keep includes the same across arches. */
+#include <linux/mm.h>
+
+#define CACHE_OP_ICACHE_INVALIDATE 0x01
+#define CACHE_OP_DCACHE_INVALIDATE 0x0b
+#define CACHE_OP_DCACHE_CLEAN 0x0c
+#define CACHE_OP_DCACHE_CLEAN_INVAL 0x0d
+
+/*
+ * Invalidate any cacheline containing virtual address vaddr without
+ * writing anything back to memory.
+ *
+ * Note that this function may corrupt unrelated data structures when
+ * applied on buffers that are not cacheline aligned in both ends.
+ */
+static inline void invalidate_dcache_line(void *vaddr)
+{
+ asm volatile("cache %0[0], %1"
+ :
+ : "r"(vaddr), "n"(CACHE_OP_DCACHE_INVALIDATE)
+ : "memory");
+}
+
+/*
+ * Make sure any cacheline containing virtual address vaddr is written
+ * to memory.
+ */
+static inline void clean_dcache_line(void *vaddr)
+{
+ asm volatile("cache %0[0], %1"
+ :
+ : "r"(vaddr), "n"(CACHE_OP_DCACHE_CLEAN)
+ : "memory");
+}
+
+/*
+ * Make sure any cacheline containing virtual address vaddr is written
+ * to memory and then invalidate it.
+ */
+static inline void flush_dcache_line(void *vaddr)
+{
+ asm volatile("cache %0[0], %1"
+ :
+ : "r"(vaddr), "n"(CACHE_OP_DCACHE_CLEAN_INVAL)
+ : "memory");
+}
+
+/*
+ * Invalidate any instruction cacheline containing virtual address
+ * vaddr.
+ */
+static inline void invalidate_icache_line(void *vaddr)
+{
+ asm volatile("cache %0[0], %1"
+ :
+ : "r"(vaddr), "n"(CACHE_OP_ICACHE_INVALIDATE)
+ : "memory");
+}
+
+/*
+ * Applies the above functions on all lines that are touched by the
+ * specified virtual address range.
+ */
+void invalidate_dcache_region(void *start, size_t len);
+void clean_dcache_region(void *start, size_t len);
+void flush_dcache_region(void *start, size_t len);
+void invalidate_icache_region(void *start, size_t len);
+
+/*
+ * Make sure any pending writes are completed before continuing.
+ */
+#define flush_write_buffer() asm volatile("sync 0" : : : "memory")
+
+/*
+ * The following functions are called when a virtual mapping changes.
+ * We do not need to flush anything in this case.
+ */
+#define flush_cache_all() do { } while (0)
+#define flush_cache_mm(mm) do { } while (0)
+#define flush_cache_dup_mm(mm) do { } while (0)
+#define flush_cache_range(vma, start, end) do { } while (0)
+#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
+#define flush_cache_vmap(start, end) do { } while (0)
+#define flush_cache_vunmap(start, end) do { } while (0)
+
+/*
+ * I think we need to implement this one to be able to reliably
+ * execute pages from RAMDISK. However, if we implement the
+ * flush_dcache_*() functions, it might not be needed anymore.
+ *
+ * #define flush_icache_page(vma, page) do { } while (0)
+ */
+extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
+
+/*
+ * These are (I think) related to D-cache aliasing. We might need to
+ * do something here, but only for certain configurations. No such
+ * configurations exist at this time.
+ */
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
+#define flush_dcache_page(page) do { } while (0)
+#define flush_dcache_mmap_lock(page) do { } while (0)
+#define flush_dcache_mmap_unlock(page) do { } while (0)
+
+/*
+ * These are for I/D cache coherency. In this case, we do need to
+ * flush with all configurations.
+ */
+extern void flush_icache_range(unsigned long start, unsigned long end);
+
+extern void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long vaddr, void *dst, const void *src,
+ unsigned long len);
+
+static inline void copy_from_user_page(struct vm_area_struct *vma,
+ struct page *page, unsigned long vaddr, void *dst,
+ const void *src, unsigned long len)
+{
+ memcpy(dst, src, len);
+}
+
+#endif /* __ASM_AVR32_CACHEFLUSH_H */
diff --git a/arch/avr32/include/asm/checksum.h b/arch/avr32/include/asm/checksum.h
new file mode 100644
index 000000000..4ddbfd248
--- /dev/null
+++ b/arch/avr32/include/asm/checksum.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_AVR32_CHECKSUM_H
+#define __ASM_AVR32_CHECKSUM_H
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+__wsum csum_partial(const void *buff, int len, __wsum sum);
+
+/*
+ * the same as csum_partial, but copies from src while it
+ * checksums, and handles user-space pointer exceptions correctly, when needed.
+ *
+ * here even more important to align src and dst on a 32-bit (or even
+ * better 64-bit) boundary
+ */
+__wsum csum_partial_copy_generic(const void *src, void *dst, int len,
+ __wsum sum, int *src_err_ptr,
+ int *dst_err_ptr);
+
+/*
+ * Note: when you get a NULL pointer exception here this means someone
+ * passed in an incorrect kernel address to one of these functions.
+ *
+ * If you use these functions directly please don't forget the
+ * access_ok().
+ */
+static inline
+__wsum csum_partial_copy_nocheck(const void *src, void *dst,
+ int len, __wsum sum)
+{
+ return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
+}
+
+static inline
+__wsum csum_partial_copy_from_user(const void __user *src, void *dst,
+ int len, __wsum sum, int *err_ptr)
+{
+ return csum_partial_copy_generic((const void __force *)src, dst, len,
+ sum, err_ptr, NULL);
+}
+
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries.
+ */
+static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
+{
+ unsigned int sum, tmp;
+
+ __asm__ __volatile__(
+ " ld.w %0, %1++\n"
+ " ld.w %3, %1++\n"
+ " sub %2, 4\n"
+ " add %0, %3\n"
+ " ld.w %3, %1++\n"
+ " adc %0, %0, %3\n"
+ " ld.w %3, %1++\n"
+ " adc %0, %0, %3\n"
+ " acr %0\n"
+ "1: ld.w %3, %1++\n"
+ " add %0, %3\n"
+ " acr %0\n"
+ " sub %2, 1\n"
+ " brne 1b\n"
+ " lsl %3, %0, 16\n"
+ " andl %0, 0\n"
+ " mov %2, 0xffff\n"
+ " add %0, %3\n"
+ " adc %0, %0, %2\n"
+ " com %0\n"
+ " lsr %0, 16\n"
+ : "=r"(sum), "=r"(iph), "=r"(ihl), "=r"(tmp)
+ : "1"(iph), "2"(ihl)
+ : "memory", "cc");
+ return (__force __sum16)sum;
+}
+
+/*
+ * Fold a partial checksum
+ */
+
+static inline __sum16 csum_fold(__wsum sum)
+{
+ unsigned int tmp;
+
+ asm(" bfextu %1, %0, 0, 16\n"
+ " lsr %0, 16\n"
+ " add %0, %1\n"
+ " bfextu %1, %0, 16, 16\n"
+ " add %0, %1"
+ : "=&r"(sum), "=&r"(tmp)
+ : "0"(sum));
+
+ return (__force __sum16)~sum;
+}
+
+static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
+ unsigned short len,
+ unsigned short proto,
+ __wsum sum)
+{
+ asm(" add %0, %1\n"
+ " adc %0, %0, %2\n"
+ " adc %0, %0, %3\n"
+ " acr %0"
+ : "=r"(sum)
+ : "r"(daddr), "r"(saddr), "r"(len + proto),
+ "0"(sum)
+ : "cc");
+
+ return sum;
+}
+
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
+ */
+static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
+ unsigned short len,
+ unsigned short proto,
+ __wsum sum)
+{
+ return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
+}
+
+/*
+ * this routine is used for miscellaneous IP-like checksums, mainly
+ * in icmp.c
+ */
+
+static inline __sum16 ip_compute_csum(const void *buff, int len)
+{
+ return csum_fold(csum_partial(buff, len, 0));
+}
+
+#endif /* __ASM_AVR32_CHECKSUM_H */
diff --git a/arch/avr32/include/asm/cmpxchg.h b/arch/avr32/include/asm/cmpxchg.h
new file mode 100644
index 000000000..962a6aeab
--- /dev/null
+++ b/arch/avr32/include/asm/cmpxchg.h
@@ -0,0 +1,117 @@
+/*
+ * Atomic operations that C can't guarantee us. Useful for
+ * resource counting etc.
+ *
+ * But use these as seldom as possible since they are slower than
+ * regular operations.
+ *
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_AVR32_CMPXCHG_H
+#define __ASM_AVR32_CMPXCHG_H
+
+#define xchg(ptr,x) \
+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+
+extern void __xchg_called_with_bad_pointer(void);
+
+static inline unsigned long xchg_u32(u32 val, volatile u32 *m)
+{
+ u32 ret;
+
+ asm volatile("xchg %[ret], %[m], %[val]"
+ : [ret] "=&r"(ret), "=m"(*m)
+ : "m"(*m), [m] "r"(m), [val] "r"(val)
+ : "memory");
+ return ret;
+}
+
+static inline unsigned long __xchg(unsigned long x,
+ volatile void *ptr,
+ int size)
+{
+ switch(size) {
+ case 4:
+ return xchg_u32(x, ptr);
+ default:
+ __xchg_called_with_bad_pointer();
+ return x;
+ }
+}
+
+static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old,
+ unsigned long new)
+{
+ __u32 ret;
+
+ asm volatile(
+ "1: ssrf 5\n"
+ " ld.w %[ret], %[m]\n"
+ " cp.w %[ret], %[old]\n"
+ " brne 2f\n"
+ " stcond %[m], %[new]\n"
+ " brne 1b\n"
+ "2:\n"
+ : [ret] "=&r"(ret), [m] "=m"(*m)
+ : "m"(m), [old] "ir"(old), [new] "r"(new)
+ : "memory", "cc");
+ return ret;
+}
+
+extern unsigned long __cmpxchg_u64_unsupported_on_32bit_kernels(
+ volatile int * m, unsigned long old, unsigned long new);
+#define __cmpxchg_u64 __cmpxchg_u64_unsupported_on_32bit_kernels
+
+/* This function doesn't exist, so you'll get a linker error
+ if something tries to do an invalid cmpxchg(). */
+extern void __cmpxchg_called_with_bad_pointer(void);
+
+#define __HAVE_ARCH_CMPXCHG 1
+
+static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+ unsigned long new, int size)
+{
+ switch (size) {
+ case 4:
+ return __cmpxchg_u32(ptr, old, new);
+ case 8:
+ return __cmpxchg_u64(ptr, old, new);
+ }
+
+ __cmpxchg_called_with_bad_pointer();
+ return old;
+}
+
+#define cmpxchg(ptr, old, new) \
+ ((typeof(*(ptr)))__cmpxchg((ptr), (unsigned long)(old), \
+ (unsigned long)(new), \
+ sizeof(*(ptr))))
+
+#include <asm-generic/cmpxchg-local.h>
+
+static inline unsigned long __cmpxchg_local(volatile void *ptr,
+ unsigned long old,
+ unsigned long new, int size)
+{
+ switch (size) {
+ case 4:
+ return __cmpxchg_u32(ptr, old, new);
+ default:
+ return __cmpxchg_local_generic(ptr, old, new, size);
+ }
+
+ return old;
+}
+
+#define cmpxchg_local(ptr, old, new) \
+ ((typeof(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(old), \
+ (unsigned long)(new), \
+ sizeof(*(ptr))))
+
+#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+
+#endif /* __ASM_AVR32_CMPXCHG_H */
diff --git a/arch/avr32/include/asm/current.h b/arch/avr32/include/asm/current.h
new file mode 100644
index 000000000..c7b0549ea
--- /dev/null
+++ b/arch/avr32/include/asm/current.h
@@ -0,0 +1,15 @@
+#ifndef __ASM_AVR32_CURRENT_H
+#define __ASM_AVR32_CURRENT_H
+
+#include <linux/thread_info.h>
+
+struct task_struct;
+
+inline static struct task_struct * get_current(void)
+{
+ return current_thread_info()->task;
+}
+
+#define current get_current()
+
+#endif /* __ASM_AVR32_CURRENT_H */
diff --git a/arch/avr32/include/asm/dma-mapping.h b/arch/avr32/include/asm/dma-mapping.h
new file mode 100644
index 000000000..b3d18f9f3
--- /dev/null
+++ b/arch/avr32/include/asm/dma-mapping.h
@@ -0,0 +1,349 @@
+#ifndef __ASM_AVR32_DMA_MAPPING_H
+#define __ASM_AVR32_DMA_MAPPING_H
+
+#include <linux/mm.h>
+#include <linux/device.h>
+#include <linux/scatterlist.h>
+#include <asm/processor.h>
+#include <asm/cacheflush.h>
+#include <asm/io.h>
+
+extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ int direction);
+
+/*
+ * Return whether the given device DMA address mask can be supported
+ * properly. For example, if your device can only drive the low 24-bits
+ * during bus mastering, then you would pass 0x00ffffff as the mask
+ * to this function.
+ */
+static inline int dma_supported(struct device *dev, u64 mask)
+{
+ /* Fix when needed. I really don't know of any limitations */
+ return 1;
+}
+
+static inline int dma_set_mask(struct device *dev, u64 dma_mask)
+{
+ if (!dev->dma_mask || !dma_supported(dev, dma_mask))
+ return -EIO;
+
+ *dev->dma_mask = dma_mask;
+ return 0;
+}
+
+/*
+ * dma_map_single can't fail as it is implemented now.
+ */
+static inline int dma_mapping_error(struct device *dev, dma_addr_t addr)
+{
+ return 0;
+}
+
+/**
+ * dma_alloc_coherent - allocate consistent memory for DMA
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @size: required memory size
+ * @handle: bus-specific DMA address
+ *
+ * Allocate some uncached, unbuffered memory for a device for
+ * performing DMA. This function allocates pages, and will
+ * return the CPU-viewed address, and sets @handle to be the
+ * device-viewed address.
+ */
+extern void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t gfp);
+
+/**
+ * dma_free_coherent - free memory allocated by dma_alloc_coherent
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @size: size of memory originally requested in dma_alloc_coherent
+ * @cpu_addr: CPU-view address returned from dma_alloc_coherent
+ * @handle: device-view address returned from dma_alloc_coherent
+ *
+ * Free (and unmap) a DMA buffer previously allocated by
+ * dma_alloc_coherent().
+ *
+ * References to memory and mappings associated with cpu_addr/handle
+ * during and after this call executing are illegal.
+ */
+extern void dma_free_coherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t handle);
+
+/**
+ * dma_alloc_writecombine - allocate write-combining memory for DMA
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @size: required memory size
+ * @handle: bus-specific DMA address
+ *
+ * Allocate some uncached, buffered memory for a device for
+ * performing DMA. This function allocates pages, and will
+ * return the CPU-viewed address, and sets @handle to be the
+ * device-viewed address.
+ */
+extern void *dma_alloc_writecombine(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t gfp);
+
+/**
+ * dma_free_coherent - free memory allocated by dma_alloc_writecombine
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @size: size of memory originally requested in dma_alloc_writecombine
+ * @cpu_addr: CPU-view address returned from dma_alloc_writecombine
+ * @handle: device-view address returned from dma_alloc_writecombine
+ *
+ * Free (and unmap) a DMA buffer previously allocated by
+ * dma_alloc_writecombine().
+ *
+ * References to memory and mappings associated with cpu_addr/handle
+ * during and after this call executing are illegal.
+ */
+extern void dma_free_writecombine(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t handle);
+
+/**
+ * dma_map_single - map a single buffer for streaming DMA
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @cpu_addr: CPU direct mapped address of buffer
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * Ensure that any data held in the cache is appropriately discarded
+ * or written back.
+ *
+ * The device owns this memory once this call has completed. The CPU
+ * can regain ownership by calling dma_unmap_single() or dma_sync_single().
+ */
+static inline dma_addr_t
+dma_map_single(struct device *dev, void *cpu_addr, size_t size,
+ enum dma_data_direction direction)
+{
+ dma_cache_sync(dev, cpu_addr, size, direction);
+ return virt_to_bus(cpu_addr);
+}
+
+/**
+ * dma_unmap_single - unmap a single buffer previously mapped
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @handle: DMA address of buffer
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * Unmap a single streaming mode DMA translation. The handle and size
+ * must match what was provided in the previous dma_map_single() call.
+ * All other usages are undefined.
+ *
+ * After this call, reads by the CPU to the buffer are guaranteed to see
+ * whatever the device wrote there.
+ */
+static inline void
+dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction direction)
+{
+
+}
+
+/**
+ * dma_map_page - map a portion of a page for streaming DMA
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @page: page that buffer resides in
+ * @offset: offset into page for start of buffer
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * Ensure that any data held in the cache is appropriately discarded
+ * or written back.
+ *
+ * The device owns this memory once this call has completed. The CPU
+ * can regain ownership by calling dma_unmap_page() or dma_sync_single().
+ */
+static inline dma_addr_t
+dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ return dma_map_single(dev, page_address(page) + offset,
+ size, direction);
+}
+
+/**
+ * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @handle: DMA address of buffer
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * Unmap a single streaming mode DMA translation. The handle and size
+ * must match what was provided in the previous dma_map_single() call.
+ * All other usages are undefined.
+ *
+ * After this call, reads by the CPU to the buffer are guaranteed to see
+ * whatever the device wrote there.
+ */
+static inline void
+dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
+ enum dma_data_direction direction)
+{
+ dma_unmap_single(dev, dma_address, size, direction);
+}
+
+/**
+ * dma_map_sg - map a set of SG buffers for streaming mode DMA
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @sg: list of buffers
+ * @nents: number of buffers to map
+ * @dir: DMA transfer direction
+ *
+ * Map a set of buffers described by scatterlist in streaming
+ * mode for DMA. This is the scatter-gather version of the
+ * above pci_map_single interface. Here the scatter gather list
+ * elements are each tagged with the appropriate dma address
+ * and length. They are obtained via sg_dma_{address,length}(SG).
+ *
+ * NOTE: An implementation may be able to use a smaller number of
+ * DMA address/length pairs than there are SG table elements.
+ * (for example via virtual mapping capabilities)
+ * The routine returns the number of addr/length pairs actually
+ * used, at most nents.
+ *
+ * Device ownership issues as mentioned above for pci_map_single are
+ * the same here.
+ */
+static inline int
+dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction direction)
+{
+ int i;
+
+ for (i = 0; i < nents; i++) {
+ char *virt;
+
+ sg[i].dma_address = page_to_bus(sg_page(&sg[i])) + sg[i].offset;
+ virt = sg_virt(&sg[i]);
+ dma_cache_sync(dev, virt, sg[i].length, direction);
+ }
+
+ return nents;
+}
+
+/**
+ * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @sg: list of buffers
+ * @nents: number of buffers to map
+ * @dir: DMA transfer direction
+ *
+ * Unmap a set of streaming mode DMA translations.
+ * Again, CPU read rules concerning calls here are the same as for
+ * pci_unmap_single() above.
+ */
+static inline void
+dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
+ enum dma_data_direction direction)
+{
+
+}
+
+/**
+ * dma_sync_single_for_cpu
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @handle: DMA address of buffer
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * Make physical memory consistent for a single streaming mode DMA
+ * translation after a transfer.
+ *
+ * If you perform a dma_map_single() but wish to interrogate the
+ * buffer using the cpu, yet do not wish to teardown the DMA mapping,
+ * you must call this function before doing so. At the next point you
+ * give the DMA address back to the card, you must first perform a
+ * dma_sync_single_for_device, and then the device again owns the
+ * buffer.
+ */
+static inline void
+dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction direction)
+{
+ /*
+ * No need to do anything since the CPU isn't supposed to
+ * touch this memory after we flushed it at mapping- or
+ * sync-for-device time.
+ */
+}
+
+static inline void
+dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction direction)
+{
+ dma_cache_sync(dev, bus_to_virt(dma_handle), size, direction);
+}
+
+static inline void
+dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ /* just sync everything, that's all the pci API can do */
+ dma_sync_single_for_cpu(dev, dma_handle, offset+size, direction);
+}
+
+static inline void
+dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ /* just sync everything, that's all the pci API can do */
+ dma_sync_single_for_device(dev, dma_handle, offset+size, direction);
+}
+
+/**
+ * dma_sync_sg_for_cpu
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @sg: list of buffers
+ * @nents: number of buffers to map
+ * @dir: DMA transfer direction
+ *
+ * Make physical memory consistent for a set of streaming
+ * mode DMA translations after a transfer.
+ *
+ * The same as dma_sync_single_for_* but for a scatter-gather list,
+ * same rules and usage.
+ */
+static inline void
+dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction)
+{
+ /*
+ * No need to do anything since the CPU isn't supposed to
+ * touch this memory after we flushed it at mapping- or
+ * sync-for-device time.
+ */
+}
+
+static inline void
+dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction)
+{
+ int i;
+
+ for (i = 0; i < nents; i++) {
+ dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, direction);
+ }
+}
+
+/* Now for the API extensions over the pci_ one */
+
+#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+
+/* drivers/base/dma-mapping.c */
+extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size);
+extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr,
+ size_t size);
+
+#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
+#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
+
+#endif /* __ASM_AVR32_DMA_MAPPING_H */
diff --git a/arch/avr32/include/asm/dma.h b/arch/avr32/include/asm/dma.h
new file mode 100644
index 000000000..9e9120559
--- /dev/null
+++ b/arch/avr32/include/asm/dma.h
@@ -0,0 +1,8 @@
+#ifndef __ASM_AVR32_DMA_H
+#define __ASM_AVR32_DMA_H
+
+/* The maximum address that we can perform a DMA transfer to on this platform.
+ * Not really applicable to AVR32, but some functions need it. */
+#define MAX_DMA_ADDRESS 0xffffffff
+
+#endif /* __ASM_AVR32_DMA_H */
diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
new file mode 100644
index 000000000..0388ece75
--- /dev/null
+++ b/arch/avr32/include/asm/elf.h
@@ -0,0 +1,105 @@
+#ifndef __ASM_AVR32_ELF_H
+#define __ASM_AVR32_ELF_H
+
+/* AVR32 relocation numbers */
+#define R_AVR32_NONE 0
+#define R_AVR32_32 1
+#define R_AVR32_16 2
+#define R_AVR32_8 3
+#define R_AVR32_32_PCREL 4
+#define R_AVR32_16_PCREL 5
+#define R_AVR32_8_PCREL 6
+#define R_AVR32_DIFF32 7
+#define R_AVR32_DIFF16 8
+#define R_AVR32_DIFF8 9
+#define R_AVR32_GOT32 10
+#define R_AVR32_GOT16 11
+#define R_AVR32_GOT8 12
+#define R_AVR32_21S 13
+#define R_AVR32_16U 14
+#define R_AVR32_16S 15
+#define R_AVR32_8S 16
+#define R_AVR32_8S_EXT 17
+#define R_AVR32_22H_PCREL 18
+#define R_AVR32_18W_PCREL 19
+#define R_AVR32_16B_PCREL 20
+#define R_AVR32_16N_PCREL 21
+#define R_AVR32_14UW_PCREL 22
+#define R_AVR32_11H_PCREL 23
+#define R_AVR32_10UW_PCREL 24
+#define R_AVR32_9H_PCREL 25
+#define R_AVR32_9UW_PCREL 26
+#define R_AVR32_HI16 27
+#define R_AVR32_LO16 28
+#define R_AVR32_GOTPC 29
+#define R_AVR32_GOTCALL 30
+#define R_AVR32_LDA_GOT 31
+#define R_AVR32_GOT21S 32
+#define R_AVR32_GOT18SW 33
+#define R_AVR32_GOT16S 34
+#define R_AVR32_GOT7UW 35
+#define R_AVR32_32_CPENT 36
+#define R_AVR32_CPCALL 37
+#define R_AVR32_16_CP 38
+#define R_AVR32_9W_CP 39
+#define R_AVR32_RELATIVE 40
+#define R_AVR32_GLOB_DAT 41
+#define R_AVR32_JMP_SLOT 42
+#define R_AVR32_ALIGN 43
+
+/*
+ * ELF register definitions..
+ */
+
+#include <asm/ptrace.h>
+#include <asm/user.h>
+
+typedef unsigned long elf_greg_t;
+
+#define ELF_NGREG (sizeof (struct pt_regs) / sizeof (elf_greg_t))
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+typedef struct user_fpu_struct elf_fpregset_t;
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch(x) ( (x)->e_machine == EM_AVR32 )
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_CLASS ELFCLASS32
+#ifdef __LITTLE_ENDIAN__
+#define ELF_DATA ELFDATA2LSB
+#else
+#define ELF_DATA ELFDATA2MSB
+#endif
+#define ELF_ARCH EM_AVR32
+
+#define ELF_EXEC_PAGESIZE 4096
+
+/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
+ use of this is to invoke "./ld.so someprog" to test out a new version of
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+
+
+/* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. This could be done in user space,
+ but it's not easy, and we've already done it here. */
+
+#define ELF_HWCAP (0)
+
+/* This yields a string that ld.so will use to load implementation
+ specific libraries for optimization. This is more specific in
+ intent than poking at uname or /proc/cpuinfo.
+
+ For the moment, we have only optimizations for the Intel generations,
+ but that could change... */
+
+#define ELF_PLATFORM (NULL)
+
+#endif /* __ASM_AVR32_ELF_H */
diff --git a/arch/avr32/include/asm/fb.h b/arch/avr32/include/asm/fb.h
new file mode 100644
index 000000000..41baf84ad
--- /dev/null
+++ b/arch/avr32/include/asm/fb.h
@@ -0,0 +1,21 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+
+#include <linux/fb.h>
+#include <linux/fs.h>
+#include <asm/page.h>
+
+static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+ unsigned long off)
+{
+ vma->vm_page_prot = __pgprot((pgprot_val(vma->vm_page_prot)
+ & ~_PAGE_CACHABLE)
+ | (_PAGE_BUFFER | _PAGE_DIRTY));
+}
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+ return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/arch/avr32/include/asm/ftrace.h b/arch/avr32/include/asm/ftrace.h
new file mode 100644
index 000000000..40a8c178f
--- /dev/null
+++ b/arch/avr32/include/asm/ftrace.h
@@ -0,0 +1 @@
+/* empty */
diff --git a/arch/avr32/include/asm/gpio.h b/arch/avr32/include/asm/gpio.h
new file mode 100644
index 000000000..b771f7105
--- /dev/null
+++ b/arch/avr32/include/asm/gpio.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_AVR32_GPIO_H
+#define __ASM_AVR32_GPIO_H
+
+#include <mach/gpio.h>
+
+#endif /* __ASM_AVR32_GPIO_H */
diff --git a/arch/avr32/include/asm/hardirq.h b/arch/avr32/include/asm/hardirq.h
new file mode 100644
index 000000000..9e36e3ff7
--- /dev/null
+++ b/arch/avr32/include/asm/hardirq.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_AVR32_HARDIRQ_H
+#define __ASM_AVR32_HARDIRQ_H
+#ifndef __ASSEMBLY__
+#include <asm-generic/hardirq.h>
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_AVR32_HARDIRQ_H */
diff --git a/arch/avr32/include/asm/hw_irq.h b/arch/avr32/include/asm/hw_irq.h
new file mode 100644
index 000000000..a36f9fcb8
--- /dev/null
+++ b/arch/avr32/include/asm/hw_irq.h
@@ -0,0 +1,9 @@
+#ifndef __ASM_AVR32_HW_IRQ_H
+#define __ASM_AVR32_HW_IRQ_H
+
+static inline void hw_resend_irq(struct irq_chip *h, unsigned int i)
+{
+ /* Nothing to do */
+}
+
+#endif /* __ASM_AVR32_HW_IRQ_H */
diff --git a/arch/avr32/include/asm/io.h b/arch/avr32/include/asm/io.h
new file mode 100644
index 000000000..4f5ec2bb7
--- /dev/null
+++ b/arch/avr32/include/asm/io.h
@@ -0,0 +1,327 @@
+#ifndef __ASM_AVR32_IO_H
+#define __ASM_AVR32_IO_H
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include <asm/addrspace.h>
+#include <asm/byteorder.h>
+
+#include <mach/io.h>
+
+/* virt_to_phys will only work when address is in P1 or P2 */
+static __inline__ unsigned long virt_to_phys(volatile void *address)
+{
+ return PHYSADDR(address);
+}
+
+static __inline__ void * phys_to_virt(unsigned long address)
+{
+ return (void *)P1SEGADDR(address);
+}
+
+#define cached_to_phys(addr) ((unsigned long)PHYSADDR(addr))
+#define uncached_to_phys(addr) ((unsigned long)PHYSADDR(addr))
+#define phys_to_cached(addr) ((void *)P1SEGADDR(addr))
+#define phys_to_uncached(addr) ((void *)P2SEGADDR(addr))
+
+/*
+ * Generic IO read/write. These perform native-endian accesses. Note
+ * that some architectures will want to re-define __raw_{read,write}w.
+ */
+extern void __raw_writesb(void __iomem *addr, const void *data, int bytelen);
+extern void __raw_writesw(void __iomem *addr, const void *data, int wordlen);
+extern void __raw_writesl(void __iomem *addr, const void *data, int longlen);
+
+extern void __raw_readsb(const void __iomem *addr, void *data, int bytelen);
+extern void __raw_readsw(const void __iomem *addr, void *data, int wordlen);
+extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);
+
+static inline void __raw_writeb(u8 v, volatile void __iomem *addr)
+{
+ *(volatile u8 __force *)addr = v;
+}
+static inline void __raw_writew(u16 v, volatile void __iomem *addr)
+{
+ *(volatile u16 __force *)addr = v;
+}
+static inline void __raw_writel(u32 v, volatile void __iomem *addr)
+{
+ *(volatile u32 __force *)addr = v;
+}
+
+static inline u8 __raw_readb(const volatile void __iomem *addr)
+{
+ return *(const volatile u8 __force *)addr;
+}
+static inline u16 __raw_readw(const volatile void __iomem *addr)
+{
+ return *(const volatile u16 __force *)addr;
+}
+static inline u32 __raw_readl(const volatile void __iomem *addr)
+{
+ return *(const volatile u32 __force *)addr;
+}
+
+/* Convert I/O port address to virtual address */
+#ifndef __io
+# define __io(p) ((void *)phys_to_uncached(p))
+#endif
+
+/*
+ * Not really sure about the best way to slow down I/O on
+ * AVR32. Defining it as a no-op until we have an actual test case.
+ */
+#define SLOW_DOWN_IO do { } while (0)
+
+#define __BUILD_MEMORY_SINGLE(pfx, bwl, type) \
+static inline void \
+pfx##write##bwl(type val, volatile void __iomem *addr) \
+{ \
+ volatile type *__addr; \
+ type __val; \
+ \
+ __addr = (void *)__swizzle_addr_##bwl((unsigned long)(addr)); \
+ __val = pfx##ioswab##bwl(__addr, val); \
+ \
+ BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
+ \
+ *__addr = __val; \
+} \
+ \
+static inline type pfx##read##bwl(const volatile void __iomem *addr) \
+{ \
+ volatile type *__addr; \
+ type __val; \
+ \
+ __addr = (void *)__swizzle_addr_##bwl((unsigned long)(addr)); \
+ \
+ BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
+ \
+ __val = *__addr; \
+ return pfx##ioswab##bwl(__addr, __val); \
+}
+
+#define __BUILD_IOPORT_SINGLE(pfx, bwl, type, p, slow) \
+static inline void pfx##out##bwl##p(type val, unsigned long port) \
+{ \
+ volatile type *__addr; \
+ type __val; \
+ \
+ __addr = __io(__swizzle_addr_##bwl(port)); \
+ __val = pfx##ioswab##bwl(__addr, val); \
+ \
+ BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
+ \
+ *__addr = __val; \
+ slow; \
+} \
+ \
+static inline type pfx##in##bwl##p(unsigned long port) \
+{ \
+ volatile type *__addr; \
+ type __val; \
+ \
+ __addr = __io(__swizzle_addr_##bwl(port)); \
+ \
+ BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
+ \
+ __val = *__addr; \
+ slow; \
+ \
+ return pfx##ioswab##bwl(__addr, __val); \
+}
+
+#define __BUILD_MEMORY_PFX(bus, bwl, type) \
+ __BUILD_MEMORY_SINGLE(bus, bwl, type)
+
+#define BUILDIO_MEM(bwl, type) \
+ __BUILD_MEMORY_PFX(, bwl, type) \
+ __BUILD_MEMORY_PFX(__mem_, bwl, type)
+
+#define __BUILD_IOPORT_PFX(bus, bwl, type) \
+ __BUILD_IOPORT_SINGLE(bus, bwl, type, ,) \
+ __BUILD_IOPORT_SINGLE(bus, bwl, type, _p, SLOW_DOWN_IO)
+
+#define BUILDIO_IOPORT(bwl, type) \
+ __BUILD_IOPORT_PFX(, bwl, type) \
+ __BUILD_IOPORT_PFX(__mem_, bwl, type)
+
+BUILDIO_MEM(b, u8)
+BUILDIO_MEM(w, u16)
+BUILDIO_MEM(l, u32)
+
+BUILDIO_IOPORT(b, u8)
+BUILDIO_IOPORT(w, u16)
+BUILDIO_IOPORT(l, u32)
+
+#define readb_relaxed readb
+#define readw_relaxed readw
+#define readl_relaxed readl
+
+#define readb_be __raw_readb
+#define readw_be __raw_readw
+#define readl_be __raw_readl
+
+#define writeb_relaxed writeb
+#define writew_relaxed writew
+#define writel_relaxed writel
+
+#define writeb_be __raw_writeb
+#define writew_be __raw_writew
+#define writel_be __raw_writel
+
+#define __BUILD_MEMORY_STRING(bwl, type) \
+static inline void writes##bwl(volatile void __iomem *addr, \
+ const void *data, unsigned int count) \
+{ \
+ const type *__data = data; \
+ \
+ while (count--) \
+ __mem_write##bwl(*__data++, addr); \
+} \
+ \
+static inline void reads##bwl(const volatile void __iomem *addr, \
+ void *data, unsigned int count) \
+{ \
+ type *__data = data; \
+ \
+ while (count--) \
+ *__data++ = __mem_read##bwl(addr); \
+}
+
+#define __BUILD_IOPORT_STRING(bwl, type) \
+static inline void outs##bwl(unsigned long port, const void *data, \
+ unsigned int count) \
+{ \
+ const type *__data = data; \
+ \
+ while (count--) \
+ __mem_out##bwl(*__data++, port); \
+} \
+ \
+static inline void ins##bwl(unsigned long port, void *data, \
+ unsigned int count) \
+{ \
+ type *__data = data; \
+ \
+ while (count--) \
+ *__data++ = __mem_in##bwl(port); \
+}
+
+#define BUILDSTRING(bwl, type) \
+ __BUILD_MEMORY_STRING(bwl, type) \
+ __BUILD_IOPORT_STRING(bwl, type)
+
+BUILDSTRING(b, u8)
+BUILDSTRING(w, u16)
+BUILDSTRING(l, u32)
+
+/*
+ * io{read,write}{8,16,32} macros in both le (for PCI style consumers) and native be
+ */
+#ifndef ioread8
+
+#define ioread8(p) ((unsigned int)readb(p))
+
+#define ioread16(p) ((unsigned int)readw(p))
+#define ioread16be(p) ((unsigned int)__raw_readw(p))
+
+#define ioread32(p) ((unsigned int)readl(p))
+#define ioread32be(p) ((unsigned int)__raw_readl(p))
+
+#define iowrite8(v,p) writeb(v, p)
+
+#define iowrite16(v,p) writew(v, p)
+#define iowrite16be(v,p) __raw_writew(v, p)
+
+#define iowrite32(v,p) writel(v, p)
+#define iowrite32be(v,p) __raw_writel(v, p)
+
+#define ioread8_rep(p,d,c) readsb(p,d,c)
+#define ioread16_rep(p,d,c) readsw(p,d,c)
+#define ioread32_rep(p,d,c) readsl(p,d,c)
+
+#define iowrite8_rep(p,s,c) writesb(p,s,c)
+#define iowrite16_rep(p,s,c) writesw(p,s,c)
+#define iowrite32_rep(p,s,c) writesl(p,s,c)
+
+#endif
+
+static inline void memcpy_fromio(void * to, const volatile void __iomem *from,
+ unsigned long count)
+{
+ memcpy(to, (const void __force *)from, count);
+}
+
+static inline void memcpy_toio(volatile void __iomem *to, const void * from,
+ unsigned long count)
+{
+ memcpy((void __force *)to, from, count);
+}
+
+static inline void memset_io(volatile void __iomem *addr, unsigned char val,
+ unsigned long count)
+{
+ memset((void __force *)addr, val, count);
+}
+
+#define mmiowb()
+
+#define IO_SPACE_LIMIT 0xffffffff
+
+extern void __iomem *__ioremap(unsigned long offset, size_t size,
+ unsigned long flags);
+extern void __iounmap(void __iomem *addr);
+
+/*
+ * ioremap - map bus memory into CPU space
+ * @offset bus address of the memory
+ * @size size of the resource to map
+ *
+ * ioremap performs a platform specific sequence of operations to make
+ * bus memory CPU accessible via the readb/.../writel functions and
+ * the other mmio helpers. The returned address is not guaranteed to
+ * be usable directly as a virtual address.
+ */
+#define ioremap(offset, size) \
+ __ioremap((offset), (size), 0)
+
+#define ioremap_nocache(offset, size) \
+ __ioremap((offset), (size), 0)
+
+#define iounmap(addr) \
+ __iounmap(addr)
+
+#define ioremap_wc ioremap_nocache
+
+#define cached(addr) P1SEGADDR(addr)
+#define uncached(addr) P2SEGADDR(addr)
+
+#define virt_to_bus virt_to_phys
+#define bus_to_virt phys_to_virt
+#define page_to_bus page_to_phys
+#define bus_to_page phys_to_page
+
+/*
+ * Create a virtual mapping cookie for an IO port range. There exists
+ * no such thing as port-based I/O on AVR32, so a regular ioremap()
+ * should do what we need.
+ */
+#define ioport_map(port, nr) ioremap(port, nr)
+#define ioport_unmap(port) iounmap(port)
+
+/*
+ * Convert a physical pointer to a virtual kernel pointer for /dev/mem
+ * access
+ */
+#define xlate_dev_mem_ptr(p) __va(p)
+
+/*
+ * Convert a virtual cached pointer to an uncached pointer
+ */
+#define xlate_dev_kmem_ptr(p) p
+
+#endif /* __ASM_AVR32_IO_H */
diff --git a/arch/avr32/include/asm/irq.h b/arch/avr32/include/asm/irq.h
new file mode 100644
index 000000000..6fa8913f8
--- /dev/null
+++ b/arch/avr32/include/asm/irq.h
@@ -0,0 +1,24 @@
+#ifndef __ASM_AVR32_IRQ_H
+#define __ASM_AVR32_IRQ_H
+
+#define NR_INTERNAL_IRQS 64
+
+#include <mach/irq.h>
+
+#ifndef NR_IRQS
+#define NR_IRQS (NR_INTERNAL_IRQS)
+#endif
+
+#define irq_canonicalize(i) (i)
+
+#ifndef __ASSEMBLER__
+int nmi_enable(void);
+void nmi_disable(void);
+
+/*
+ * Returns a bitmask of pending interrupts in a group.
+ */
+extern unsigned long intc_get_pending(unsigned int group);
+#endif
+
+#endif /* __ASM_AVR32_IOCTLS_H */
diff --git a/arch/avr32/include/asm/irqflags.h b/arch/avr32/include/asm/irqflags.h
new file mode 100644
index 000000000..006e94873
--- /dev/null
+++ b/arch/avr32/include/asm/irqflags.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_AVR32_IRQFLAGS_H
+#define __ASM_AVR32_IRQFLAGS_H
+
+#include <linux/types.h>
+#include <asm/sysreg.h>
+
+static inline unsigned long arch_local_save_flags(void)
+{
+ return sysreg_read(SR);
+}
+
+/*
+ * This will restore ALL status register flags, not only the interrupt
+ * mask flag.
+ *
+ * The empty asm statement informs the compiler of this fact while
+ * also serving as a barrier.
+ */
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+ sysreg_write(SR, flags);
+ asm volatile("" : : : "memory", "cc");
+}
+
+static inline void arch_local_irq_disable(void)
+{
+ asm volatile("ssrf %0" : : "n"(SYSREG_GM_OFFSET) : "memory");
+}
+
+static inline void arch_local_irq_enable(void)
+{
+ asm volatile("csrf %0" : : "n"(SYSREG_GM_OFFSET) : "memory");
+}
+
+static inline bool arch_irqs_disabled_flags(unsigned long flags)
+{
+ return (flags & SYSREG_BIT(GM)) != 0;
+}
+
+static inline bool arch_irqs_disabled(void)
+{
+ return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
+static inline unsigned long arch_local_irq_save(void)
+{
+ unsigned long flags = arch_local_save_flags();
+
+ arch_local_irq_disable();
+
+ return flags;
+}
+
+#endif /* __ASM_AVR32_IRQFLAGS_H */
diff --git a/arch/avr32/include/asm/kdebug.h b/arch/avr32/include/asm/kdebug.h
new file mode 100644
index 000000000..f930ce286
--- /dev/null
+++ b/arch/avr32/include/asm/kdebug.h
@@ -0,0 +1,12 @@
+#ifndef __ASM_AVR32_KDEBUG_H
+#define __ASM_AVR32_KDEBUG_H
+
+/* Grossly misnamed. */
+enum die_val {
+ DIE_BREAKPOINT,
+ DIE_SSTEP,
+ DIE_NMI,
+ DIE_OOPS,
+};
+
+#endif /* __ASM_AVR32_KDEBUG_H */
diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
new file mode 100644
index 000000000..479330b89
--- /dev/null
+++ b/arch/avr32/include/asm/kmap_types.h
@@ -0,0 +1,10 @@
+#ifndef __ASM_AVR32_KMAP_TYPES_H
+#define __ASM_AVR32_KMAP_TYPES_H
+
+#ifdef CONFIG_DEBUG_HIGHMEM
+# define KM_TYPE_NR 29
+#else
+# define KM_TYPE_NR 14
+#endif
+
+#endif /* __ASM_AVR32_KMAP_TYPES_H */
diff --git a/arch/avr32/include/asm/kprobes.h b/arch/avr32/include/asm/kprobes.h
new file mode 100644
index 000000000..45f563ed7
--- /dev/null
+++ b/arch/avr32/include/asm/kprobes.h
@@ -0,0 +1,49 @@
+/*
+ * Kernel Probes (KProbes)
+ *
+ * Copyright (C) 2005-2006 Atmel Corporation
+ * Copyright (C) IBM Corporation, 2002, 2004
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_AVR32_KPROBES_H
+#define __ASM_AVR32_KPROBES_H
+
+#include <linux/types.h>
+
+typedef u16 kprobe_opcode_t;
+#define BREAKPOINT_INSTRUCTION 0xd673 /* breakpoint */
+#define MAX_INSN_SIZE 2
+#define MAX_STACK_SIZE 64 /* 32 would probably be OK */
+
+#define kretprobe_blacklist_size 0
+
+#define arch_remove_kprobe(p) do { } while (0)
+
+/* Architecture specific copy of original instruction */
+struct arch_specific_insn {
+ kprobe_opcode_t insn[MAX_INSN_SIZE];
+};
+
+struct prev_kprobe {
+ struct kprobe *kp;
+ unsigned int status;
+};
+
+/* per-cpu kprobe control block */
+struct kprobe_ctlblk {
+ unsigned int kprobe_status;
+ struct prev_kprobe prev_kprobe;
+ struct pt_regs jprobe_saved_regs;
+ char jprobes_stack[MAX_STACK_SIZE];
+};
+
+extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
+extern int kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data);
+
+#define flush_insn_slot(p) do { } while (0)
+
+#endif /* __ASM_AVR32_KPROBES_H */
diff --git a/arch/avr32/include/asm/linkage.h b/arch/avr32/include/asm/linkage.h
new file mode 100644
index 000000000..f7b285e91
--- /dev/null
+++ b/arch/avr32/include/asm/linkage.h
@@ -0,0 +1,7 @@
+#ifndef __ASM_LINKAGE_H
+#define __ASM_LINKAGE_H
+
+#define __ALIGN .balign 2
+#define __ALIGN_STR ".balign 2"
+
+#endif /* __ASM_LINKAGE_H */
diff --git a/arch/avr32/include/asm/mmu.h b/arch/avr32/include/asm/mmu.h
new file mode 100644
index 000000000..60c2d2650
--- /dev/null
+++ b/arch/avr32/include/asm/mmu.h
@@ -0,0 +1,10 @@
+#ifndef __ASM_AVR32_MMU_H
+#define __ASM_AVR32_MMU_H
+
+/* Default "unsigned long" context */
+typedef unsigned long mm_context_t;
+
+#define MMU_ITLB_ENTRIES 64
+#define MMU_DTLB_ENTRIES 64
+
+#endif /* __ASM_AVR32_MMU_H */
diff --git a/arch/avr32/include/asm/mmu_context.h b/arch/avr32/include/asm/mmu_context.h
new file mode 100644
index 000000000..27ff23407
--- /dev/null
+++ b/arch/avr32/include/asm/mmu_context.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * ASID handling taken from SH implementation.
+ * Copyright (C) 1999 Niibe Yutaka
+ * Copyright (C) 2003 Paul Mundt
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_AVR32_MMU_CONTEXT_H
+#define __ASM_AVR32_MMU_CONTEXT_H
+
+#include <asm/tlbflush.h>
+#include <asm/sysreg.h>
+#include <asm-generic/mm_hooks.h>
+
+/*
+ * The MMU "context" consists of two things:
+ * (a) TLB cache version
+ * (b) ASID (Address Space IDentifier)
+ */
+#define MMU_CONTEXT_ASID_MASK 0x000000ff
+#define MMU_CONTEXT_VERSION_MASK 0xffffff00
+#define MMU_CONTEXT_FIRST_VERSION 0x00000100
+#define NO_CONTEXT 0
+
+#define MMU_NO_ASID 0x100
+
+/* Virtual Page Number mask */
+#define MMU_VPN_MASK 0xfffff000
+
+/* Cache of MMU context last used */
+extern unsigned long mmu_context_cache;
+
+/*
+ * Get MMU context if needed
+ */
+static inline void
+get_mmu_context(struct mm_struct *mm)
+{
+ unsigned long mc = mmu_context_cache;
+
+ if (((mm->context ^ mc) & MMU_CONTEXT_VERSION_MASK) == 0)
+ /* It's up to date, do nothing */
+ return;
+
+ /* It's old, we need to get new context with new version */
+ mc = ++mmu_context_cache;
+ if (!(mc & MMU_CONTEXT_ASID_MASK)) {
+ /*
+ * We have exhausted all ASIDs of this version.
+ * Flush the TLB and start new cycle.
+ */
+ flush_tlb_all();
+ /*
+ * Fix version. Note that we avoid version #0
+ * to distinguish NO_CONTEXT.
+ */
+ if (!mc)
+ mmu_context_cache = mc = MMU_CONTEXT_FIRST_VERSION;
+ }
+ mm->context = mc;
+}
+
+/*
+ * Initialize the context related info for a new mm_struct
+ * instance.
+ */
+static inline int init_new_context(struct task_struct *tsk,
+ struct mm_struct *mm)
+{
+ mm->context = NO_CONTEXT;
+ return 0;
+}
+
+/*
+ * Destroy context related info for an mm_struct that is about
+ * to be put to rest.
+ */
+static inline void destroy_context(struct mm_struct *mm)
+{
+ /* Do nothing */
+}
+
+static inline void set_asid(unsigned long asid)
+{
+ /* XXX: We're destroying TLBEHI[8:31] */
+ sysreg_write(TLBEHI, asid & MMU_CONTEXT_ASID_MASK);
+ cpu_sync_pipeline();
+}
+
+static inline unsigned long get_asid(void)
+{
+ unsigned long asid;
+
+ asid = sysreg_read(TLBEHI);
+ return asid & MMU_CONTEXT_ASID_MASK;
+}
+
+static inline void activate_context(struct mm_struct *mm)
+{
+ get_mmu_context(mm);
+ set_asid(mm->context & MMU_CONTEXT_ASID_MASK);
+}
+
+static inline void switch_mm(struct mm_struct *prev,
+ struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ if (likely(prev != next)) {
+ unsigned long __pgdir = (unsigned long)next->pgd;
+
+ sysreg_write(PTBR, __pgdir);
+ activate_context(next);
+ }
+}
+
+#define deactivate_mm(tsk,mm) do { } while(0)
+
+#define activate_mm(prev, next) switch_mm((prev), (next), NULL)
+
+static inline void
+enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+}
+
+
+static inline void enable_mmu(void)
+{
+ sysreg_write(MMUCR, (SYSREG_BIT(MMUCR_S)
+ | SYSREG_BIT(E)
+ | SYSREG_BIT(MMUCR_I)));
+ nop(); nop(); nop(); nop(); nop(); nop(); nop(); nop();
+
+ if (mmu_context_cache == NO_CONTEXT)
+ mmu_context_cache = MMU_CONTEXT_FIRST_VERSION;
+
+ set_asid(mmu_context_cache & MMU_CONTEXT_ASID_MASK);
+}
+
+static inline void disable_mmu(void)
+{
+ sysreg_write(MMUCR, SYSREG_BIT(MMUCR_S));
+}
+
+#endif /* __ASM_AVR32_MMU_CONTEXT_H */
diff --git a/arch/avr32/include/asm/module.h b/arch/avr32/include/asm/module.h
new file mode 100644
index 000000000..3f083d385
--- /dev/null
+++ b/arch/avr32/include/asm/module.h
@@ -0,0 +1,26 @@
+#ifndef __ASM_AVR32_MODULE_H
+#define __ASM_AVR32_MODULE_H
+
+#include <asm-generic/module.h>
+
+struct mod_arch_syminfo {
+ unsigned long got_offset;
+ int got_initialized;
+};
+
+struct mod_arch_specific {
+ /* Starting offset of got in the module core memory. */
+ unsigned long got_offset;
+ /* Size of the got. */
+ unsigned long got_size;
+ /* Number of symbols in syminfo. */
+ int nsyms;
+ /* Additional symbol information (got offsets). */
+ struct mod_arch_syminfo *syminfo;
+};
+
+#define MODULE_PROC_FAMILY "AVR32v1"
+
+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
+
+#endif /* __ASM_AVR32_MODULE_H */
diff --git a/arch/avr32/include/asm/mutex.h b/arch/avr32/include/asm/mutex.h
new file mode 100644
index 000000000..458c1f7fb
--- /dev/null
+++ b/arch/avr32/include/asm/mutex.h
@@ -0,0 +1,9 @@
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include <asm-generic/mutex-dec.h>
diff --git a/arch/avr32/include/asm/ocd.h b/arch/avr32/include/asm/ocd.h
new file mode 100644
index 000000000..6bef09490
--- /dev/null
+++ b/arch/avr32/include/asm/ocd.h
@@ -0,0 +1,543 @@
+/*
+ * AVR32 OCD Interface and register definitions
+ *
+ * Copyright (C) 2004-2007 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_AVR32_OCD_H
+#define __ASM_AVR32_OCD_H
+
+/* OCD Register offsets. Abbreviations used below:
+ *
+ * BP Breakpoint
+ * Comm Communication
+ * DT Data Trace
+ * PC Program Counter
+ * PID Process ID
+ * R/W Read/Write
+ * WP Watchpoint
+ */
+#define OCD_DID 0x0000 /* Device ID */
+#define OCD_DC 0x0008 /* Development Control */
+#define OCD_DS 0x0010 /* Development Status */
+#define OCD_RWCS 0x001c /* R/W Access Control */
+#define OCD_RWA 0x0024 /* R/W Access Address */
+#define OCD_RWD 0x0028 /* R/W Access Data */
+#define OCD_WT 0x002c /* Watchpoint Trigger */
+#define OCD_DTC 0x0034 /* Data Trace Control */
+#define OCD_DTSA0 0x0038 /* DT Start Addr Channel 0 */
+#define OCD_DTSA1 0x003c /* DT Start Addr Channel 1 */
+#define OCD_DTEA0 0x0048 /* DT End Addr Channel 0 */
+#define OCD_DTEA1 0x004c /* DT End Addr Channel 1 */
+#define OCD_BWC0A 0x0058 /* PC BP/WP Control 0A */
+#define OCD_BWC0B 0x005c /* PC BP/WP Control 0B */
+#define OCD_BWC1A 0x0060 /* PC BP/WP Control 1A */
+#define OCD_BWC1B 0x0064 /* PC BP/WP Control 1B */
+#define OCD_BWC2A 0x0068 /* PC BP/WP Control 2A */
+#define OCD_BWC2B 0x006c /* PC BP/WP Control 2B */
+#define OCD_BWC3A 0x0070 /* Data BP/WP Control 3A */
+#define OCD_BWC3B 0x0074 /* Data BP/WP Control 3B */
+#define OCD_BWA0A 0x0078 /* PC BP/WP Address 0A */
+#define OCD_BWA0B 0x007c /* PC BP/WP Address 0B */
+#define OCD_BWA1A 0x0080 /* PC BP/WP Address 1A */
+#define OCD_BWA1B 0x0084 /* PC BP/WP Address 1B */
+#define OCD_BWA2A 0x0088 /* PC BP/WP Address 2A */
+#define OCD_BWA2B 0x008c /* PC BP/WP Address 2B */
+#define OCD_BWA3A 0x0090 /* Data BP/WP Address 3A */
+#define OCD_BWA3B 0x0094 /* Data BP/WP Address 3B */
+#define OCD_NXCFG 0x0100 /* Nexus Configuration */
+#define OCD_DINST 0x0104 /* Debug Instruction */
+#define OCD_DPC 0x0108 /* Debug Program Counter */
+#define OCD_CPUCM 0x010c /* CPU Control Mask */
+#define OCD_DCCPU 0x0110 /* Debug Comm CPU */
+#define OCD_DCEMU 0x0114 /* Debug Comm Emulator */
+#define OCD_DCSR 0x0118 /* Debug Comm Status */
+#define OCD_PID 0x011c /* Ownership Trace PID */
+#define OCD_EPC0 0x0120 /* Event Pair Control 0 */
+#define OCD_EPC1 0x0124 /* Event Pair Control 1 */
+#define OCD_EPC2 0x0128 /* Event Pair Control 2 */
+#define OCD_EPC3 0x012c /* Event Pair Control 3 */
+#define OCD_AXC 0x0130 /* AUX port Control */
+
+/* Bits in DID */
+#define OCD_DID_MID_START 1
+#define OCD_DID_MID_SIZE 11
+#define OCD_DID_PN_START 12
+#define OCD_DID_PN_SIZE 16
+#define OCD_DID_RN_START 28
+#define OCD_DID_RN_SIZE 4
+
+/* Bits in DC */
+#define OCD_DC_TM_START 0
+#define OCD_DC_TM_SIZE 2
+#define OCD_DC_EIC_START 3
+#define OCD_DC_EIC_SIZE 2
+#define OCD_DC_OVC_START 5
+#define OCD_DC_OVC_SIZE 3
+#define OCD_DC_SS_BIT 8
+#define OCD_DC_DBR_BIT 12
+#define OCD_DC_DBE_BIT 13
+#define OCD_DC_EOS_START 20
+#define OCD_DC_EOS_SIZE 2
+#define OCD_DC_SQA_BIT 22
+#define OCD_DC_IRP_BIT 23
+#define OCD_DC_IFM_BIT 24
+#define OCD_DC_TOZ_BIT 25
+#define OCD_DC_TSR_BIT 26
+#define OCD_DC_RID_BIT 27
+#define OCD_DC_ORP_BIT 28
+#define OCD_DC_MM_BIT 29
+#define OCD_DC_RES_BIT 30
+#define OCD_DC_ABORT_BIT 31
+
+/* Bits in DS */
+#define OCD_DS_SSS_BIT 0
+#define OCD_DS_SWB_BIT 1
+#define OCD_DS_HWB_BIT 2
+#define OCD_DS_HWE_BIT 3
+#define OCD_DS_STP_BIT 4
+#define OCD_DS_DBS_BIT 5
+#define OCD_DS_BP_START 8
+#define OCD_DS_BP_SIZE 8
+#define OCD_DS_INC_BIT 24
+#define OCD_DS_BOZ_BIT 25
+#define OCD_DS_DBA_BIT 26
+#define OCD_DS_EXB_BIT 27
+#define OCD_DS_NTBF_BIT 28
+
+/* Bits in RWCS */
+#define OCD_RWCS_DV_BIT 0
+#define OCD_RWCS_ERR_BIT 1
+#define OCD_RWCS_CNT_START 2
+#define OCD_RWCS_CNT_SIZE 14
+#define OCD_RWCS_CRC_BIT 19
+#define OCD_RWCS_NTBC_START 20
+#define OCD_RWCS_NTBC_SIZE 2
+#define OCD_RWCS_NTE_BIT 22
+#define OCD_RWCS_NTAP_BIT 23
+#define OCD_RWCS_WRAPPED_BIT 24
+#define OCD_RWCS_CCTRL_START 25
+#define OCD_RWCS_CCTRL_SIZE 2
+#define OCD_RWCS_SZ_START 27
+#define OCD_RWCS_SZ_SIZE 3
+#define OCD_RWCS_RW_BIT 30
+#define OCD_RWCS_AC_BIT 31
+
+/* Bits in RWA */
+#define OCD_RWA_RWA_START 0
+#define OCD_RWA_RWA_SIZE 32
+
+/* Bits in RWD */
+#define OCD_RWD_RWD_START 0
+#define OCD_RWD_RWD_SIZE 32
+
+/* Bits in WT */
+#define OCD_WT_DTE_START 20
+#define OCD_WT_DTE_SIZE 3
+#define OCD_WT_DTS_START 23
+#define OCD_WT_DTS_SIZE 3
+#define OCD_WT_PTE_START 26
+#define OCD_WT_PTE_SIZE 3
+#define OCD_WT_PTS_START 29
+#define OCD_WT_PTS_SIZE 3
+
+/* Bits in DTC */
+#define OCD_DTC_T0WP_BIT 0
+#define OCD_DTC_T1WP_BIT 1
+#define OCD_DTC_ASID0EN_BIT 2
+#define OCD_DTC_ASID0_START 3
+#define OCD_DTC_ASID0_SIZE 8
+#define OCD_DTC_ASID1EN_BIT 11
+#define OCD_DTC_ASID1_START 12
+#define OCD_DTC_ASID1_SIZE 8
+#define OCD_DTC_RWT1_START 28
+#define OCD_DTC_RWT1_SIZE 2
+#define OCD_DTC_RWT0_START 30
+#define OCD_DTC_RWT0_SIZE 2
+
+/* Bits in DTSA0 */
+#define OCD_DTSA0_DTSA_START 0
+#define OCD_DTSA0_DTSA_SIZE 32
+
+/* Bits in DTSA1 */
+#define OCD_DTSA1_DTSA_START 0
+#define OCD_DTSA1_DTSA_SIZE 32
+
+/* Bits in DTEA0 */
+#define OCD_DTEA0_DTEA_START 0
+#define OCD_DTEA0_DTEA_SIZE 32
+
+/* Bits in DTEA1 */
+#define OCD_DTEA1_DTEA_START 0
+#define OCD_DTEA1_DTEA_SIZE 32
+
+/* Bits in BWC0A */
+#define OCD_BWC0A_ASIDEN_BIT 0
+#define OCD_BWC0A_ASID_START 1
+#define OCD_BWC0A_ASID_SIZE 8
+#define OCD_BWC0A_EOC_BIT 14
+#define OCD_BWC0A_AME_BIT 25
+#define OCD_BWC0A_BWE_START 30
+#define OCD_BWC0A_BWE_SIZE 2
+
+/* Bits in BWC0B */
+#define OCD_BWC0B_ASIDEN_BIT 0
+#define OCD_BWC0B_ASID_START 1
+#define OCD_BWC0B_ASID_SIZE 8
+#define OCD_BWC0B_EOC_BIT 14
+#define OCD_BWC0B_AME_BIT 25
+#define OCD_BWC0B_BWE_START 30
+#define OCD_BWC0B_BWE_SIZE 2
+
+/* Bits in BWC1A */
+#define OCD_BWC1A_ASIDEN_BIT 0
+#define OCD_BWC1A_ASID_START 1
+#define OCD_BWC1A_ASID_SIZE 8
+#define OCD_BWC1A_EOC_BIT 14
+#define OCD_BWC1A_AME_BIT 25
+#define OCD_BWC1A_BWE_START 30
+#define OCD_BWC1A_BWE_SIZE 2
+
+/* Bits in BWC1B */
+#define OCD_BWC1B_ASIDEN_BIT 0
+#define OCD_BWC1B_ASID_START 1
+#define OCD_BWC1B_ASID_SIZE 8
+#define OCD_BWC1B_EOC_BIT 14
+#define OCD_BWC1B_AME_BIT 25
+#define OCD_BWC1B_BWE_START 30
+#define OCD_BWC1B_BWE_SIZE 2
+
+/* Bits in BWC2A */
+#define OCD_BWC2A_ASIDEN_BIT 0
+#define OCD_BWC2A_ASID_START 1
+#define OCD_BWC2A_ASID_SIZE 8
+#define OCD_BWC2A_EOC_BIT 14
+#define OCD_BWC2A_AMB_START 20
+#define OCD_BWC2A_AMB_SIZE 5
+#define OCD_BWC2A_AME_BIT 25
+#define OCD_BWC2A_BWE_START 30
+#define OCD_BWC2A_BWE_SIZE 2
+
+/* Bits in BWC2B */
+#define OCD_BWC2B_ASIDEN_BIT 0
+#define OCD_BWC2B_ASID_START 1
+#define OCD_BWC2B_ASID_SIZE 8
+#define OCD_BWC2B_EOC_BIT 14
+#define OCD_BWC2B_AME_BIT 25
+#define OCD_BWC2B_BWE_START 30
+#define OCD_BWC2B_BWE_SIZE 2
+
+/* Bits in BWC3A */
+#define OCD_BWC3A_ASIDEN_BIT 0
+#define OCD_BWC3A_ASID_START 1
+#define OCD_BWC3A_ASID_SIZE 8
+#define OCD_BWC3A_SIZE_START 9
+#define OCD_BWC3A_SIZE_SIZE 3
+#define OCD_BWC3A_EOC_BIT 14
+#define OCD_BWC3A_BWO_START 16
+#define OCD_BWC3A_BWO_SIZE 2
+#define OCD_BWC3A_BME_START 20
+#define OCD_BWC3A_BME_SIZE 4
+#define OCD_BWC3A_BRW_START 28
+#define OCD_BWC3A_BRW_SIZE 2
+#define OCD_BWC3A_BWE_START 30
+#define OCD_BWC3A_BWE_SIZE 2
+
+/* Bits in BWC3B */
+#define OCD_BWC3B_ASIDEN_BIT 0
+#define OCD_BWC3B_ASID_START 1
+#define OCD_BWC3B_ASID_SIZE 8
+#define OCD_BWC3B_SIZE_START 9
+#define OCD_BWC3B_SIZE_SIZE 3
+#define OCD_BWC3B_EOC_BIT 14
+#define OCD_BWC3B_BWO_START 16
+#define OCD_BWC3B_BWO_SIZE 2
+#define OCD_BWC3B_BME_START 20
+#define OCD_BWC3B_BME_SIZE 4
+#define OCD_BWC3B_BRW_START 28
+#define OCD_BWC3B_BRW_SIZE 2
+#define OCD_BWC3B_BWE_START 30
+#define OCD_BWC3B_BWE_SIZE 2
+
+/* Bits in BWA0A */
+#define OCD_BWA0A_BWA_START 0
+#define OCD_BWA0A_BWA_SIZE 32
+
+/* Bits in BWA0B */
+#define OCD_BWA0B_BWA_START 0
+#define OCD_BWA0B_BWA_SIZE 32
+
+/* Bits in BWA1A */
+#define OCD_BWA1A_BWA_START 0
+#define OCD_BWA1A_BWA_SIZE 32
+
+/* Bits in BWA1B */
+#define OCD_BWA1B_BWA_START 0
+#define OCD_BWA1B_BWA_SIZE 32
+
+/* Bits in BWA2A */
+#define OCD_BWA2A_BWA_START 0
+#define OCD_BWA2A_BWA_SIZE 32
+
+/* Bits in BWA2B */
+#define OCD_BWA2B_BWA_START 0
+#define OCD_BWA2B_BWA_SIZE 32
+
+/* Bits in BWA3A */
+#define OCD_BWA3A_BWA_START 0
+#define OCD_BWA3A_BWA_SIZE 32
+
+/* Bits in BWA3B */
+#define OCD_BWA3B_BWA_START 0
+#define OCD_BWA3B_BWA_SIZE 32
+
+/* Bits in NXCFG */
+#define OCD_NXCFG_NXARCH_START 0
+#define OCD_NXCFG_NXARCH_SIZE 4
+#define OCD_NXCFG_NXOCD_START 4
+#define OCD_NXCFG_NXOCD_SIZE 4
+#define OCD_NXCFG_NXPCB_START 8
+#define OCD_NXCFG_NXPCB_SIZE 4
+#define OCD_NXCFG_NXDB_START 12
+#define OCD_NXCFG_NXDB_SIZE 4
+#define OCD_NXCFG_MXMSEO_BIT 16
+#define OCD_NXCFG_NXMDO_START 17
+#define OCD_NXCFG_NXMDO_SIZE 4
+#define OCD_NXCFG_NXPT_BIT 21
+#define OCD_NXCFG_NXOT_BIT 22
+#define OCD_NXCFG_NXDWT_BIT 23
+#define OCD_NXCFG_NXDRT_BIT 24
+#define OCD_NXCFG_NXDTC_START 25
+#define OCD_NXCFG_NXDTC_SIZE 3
+#define OCD_NXCFG_NXDMA_BIT 28
+
+/* Bits in DINST */
+#define OCD_DINST_DINST_START 0
+#define OCD_DINST_DINST_SIZE 32
+
+/* Bits in CPUCM */
+#define OCD_CPUCM_BEM_BIT 1
+#define OCD_CPUCM_FEM_BIT 2
+#define OCD_CPUCM_REM_BIT 3
+#define OCD_CPUCM_IBEM_BIT 4
+#define OCD_CPUCM_IEEM_BIT 5
+
+/* Bits in DCCPU */
+#define OCD_DCCPU_DATA_START 0
+#define OCD_DCCPU_DATA_SIZE 32
+
+/* Bits in DCEMU */
+#define OCD_DCEMU_DATA_START 0
+#define OCD_DCEMU_DATA_SIZE 32
+
+/* Bits in DCSR */
+#define OCD_DCSR_CPUD_BIT 0
+#define OCD_DCSR_EMUD_BIT 1
+
+/* Bits in PID */
+#define OCD_PID_PROCESS_START 0
+#define OCD_PID_PROCESS_SIZE 32
+
+/* Bits in EPC0 */
+#define OCD_EPC0_RNG_START 0
+#define OCD_EPC0_RNG_SIZE 2
+#define OCD_EPC0_CE_BIT 4
+#define OCD_EPC0_ECNT_START 16
+#define OCD_EPC0_ECNT_SIZE 16
+
+/* Bits in EPC1 */
+#define OCD_EPC1_RNG_START 0
+#define OCD_EPC1_RNG_SIZE 2
+#define OCD_EPC1_ATB_BIT 5
+#define OCD_EPC1_AM_BIT 6
+
+/* Bits in EPC2 */
+#define OCD_EPC2_RNG_START 0
+#define OCD_EPC2_RNG_SIZE 2
+#define OCD_EPC2_DB_START 2
+#define OCD_EPC2_DB_SIZE 2
+
+/* Bits in EPC3 */
+#define OCD_EPC3_RNG_START 0
+#define OCD_EPC3_RNG_SIZE 2
+#define OCD_EPC3_DWE_BIT 2
+
+/* Bits in AXC */
+#define OCD_AXC_DIV_START 0
+#define OCD_AXC_DIV_SIZE 4
+#define OCD_AXC_AXE_BIT 8
+#define OCD_AXC_AXS_BIT 9
+#define OCD_AXC_DDR_BIT 10
+#define OCD_AXC_LS_BIT 11
+#define OCD_AXC_REX_BIT 12
+#define OCD_AXC_REXTEN_BIT 13
+
+/* Constants for DC:EIC */
+#define OCD_EIC_PROGRAM_AND_DATA_TRACE 0
+#define OCD_EIC_BREAKPOINT 1
+#define OCD_EIC_NOP 2
+
+/* Constants for DC:OVC */
+#define OCD_OVC_OVERRUN 0
+#define OCD_OVC_DELAY_CPU_BTM 1
+#define OCD_OVC_DELAY_CPU_DTM 2
+#define OCD_OVC_DELAY_CPU_BTM_DTM 3
+
+/* Constants for DC:EOS */
+#define OCD_EOS_NOP 0
+#define OCD_EOS_DEBUG_MODE 1
+#define OCD_EOS_BREAKPOINT_WATCHPOINT 2
+#define OCD_EOS_THQ 3
+
+/* Constants for RWCS:NTBC */
+#define OCD_NTBC_OVERWRITE 0
+#define OCD_NTBC_DISABLE 1
+#define OCD_NTBC_BREAKPOINT 2
+
+/* Constants for RWCS:CCTRL */
+#define OCD_CCTRL_AUTO 0
+#define OCD_CCTRL_CACHED 1
+#define OCD_CCTRL_UNCACHED 2
+
+/* Constants for RWCS:SZ */
+#define OCD_SZ_BYTE 0
+#define OCD_SZ_HALFWORD 1
+#define OCD_SZ_WORD 2
+
+/* Constants for WT:PTS */
+#define OCD_PTS_DISABLED 0
+#define OCD_PTS_PROGRAM_0B 1
+#define OCD_PTS_PROGRAM_1A 2
+#define OCD_PTS_PROGRAM_1B 3
+#define OCD_PTS_PROGRAM_2A 4
+#define OCD_PTS_PROGRAM_2B 5
+#define OCD_PTS_DATA_3A 6
+#define OCD_PTS_DATA_3B 7
+
+/* Constants for DTC:RWT1 */
+#define OCD_RWT1_NO_TRACE 0
+#define OCD_RWT1_DATA_READ 1
+#define OCD_RWT1_DATA_WRITE 2
+#define OCD_RWT1_DATA_READ_WRITE 3
+
+/* Constants for DTC:RWT0 */
+#define OCD_RWT0_NO_TRACE 0
+#define OCD_RWT0_DATA_READ 1
+#define OCD_RWT0_DATA_WRITE 2
+#define OCD_RWT0_DATA_READ_WRITE 3
+
+/* Constants for BWC0A:BWE */
+#define OCD_BWE_DISABLED 0
+#define OCD_BWE_BREAKPOINT_ENABLED 1
+#define OCD_BWE_WATCHPOINT_ENABLED 3
+
+/* Constants for BWC0B:BWE */
+#define OCD_BWE_DISABLED 0
+#define OCD_BWE_BREAKPOINT_ENABLED 1
+#define OCD_BWE_WATCHPOINT_ENABLED 3
+
+/* Constants for BWC1A:BWE */
+#define OCD_BWE_DISABLED 0
+#define OCD_BWE_BREAKPOINT_ENABLED 1
+#define OCD_BWE_WATCHPOINT_ENABLED 3
+
+/* Constants for BWC1B:BWE */
+#define OCD_BWE_DISABLED 0
+#define OCD_BWE_BREAKPOINT_ENABLED 1
+#define OCD_BWE_WATCHPOINT_ENABLED 3
+
+/* Constants for BWC2A:BWE */
+#define OCD_BWE_DISABLED 0
+#define OCD_BWE_BREAKPOINT_ENABLED 1
+#define OCD_BWE_WATCHPOINT_ENABLED 3
+
+/* Constants for BWC2B:BWE */
+#define OCD_BWE_DISABLED 0
+#define OCD_BWE_BREAKPOINT_ENABLED 1
+#define OCD_BWE_WATCHPOINT_ENABLED 3
+
+/* Constants for BWC3A:SIZE */
+#define OCD_SIZE_BYTE_ACCESS 4
+#define OCD_SIZE_HALFWORD_ACCESS 5
+#define OCD_SIZE_WORD_ACCESS 6
+#define OCD_SIZE_DOUBLE_WORD_ACCESS 7
+
+/* Constants for BWC3A:BRW */
+#define OCD_BRW_READ_BREAK 0
+#define OCD_BRW_WRITE_BREAK 1
+#define OCD_BRW_ANY_ACCES_BREAK 2
+
+/* Constants for BWC3A:BWE */
+#define OCD_BWE_DISABLED 0
+#define OCD_BWE_BREAKPOINT_ENABLED 1
+#define OCD_BWE_WATCHPOINT_ENABLED 3
+
+/* Constants for BWC3B:SIZE */
+#define OCD_SIZE_BYTE_ACCESS 4
+#define OCD_SIZE_HALFWORD_ACCESS 5
+#define OCD_SIZE_WORD_ACCESS 6
+#define OCD_SIZE_DOUBLE_WORD_ACCESS 7
+
+/* Constants for BWC3B:BRW */
+#define OCD_BRW_READ_BREAK 0
+#define OCD_BRW_WRITE_BREAK 1
+#define OCD_BRW_ANY_ACCES_BREAK 2
+
+/* Constants for BWC3B:BWE */
+#define OCD_BWE_DISABLED 0
+#define OCD_BWE_BREAKPOINT_ENABLED 1
+#define OCD_BWE_WATCHPOINT_ENABLED 3
+
+/* Constants for EPC0:RNG */
+#define OCD_RNG_DISABLED 0
+#define OCD_RNG_EXCLUSIVE 1
+#define OCD_RNG_INCLUSIVE 2
+
+/* Constants for EPC1:RNG */
+#define OCD_RNG_DISABLED 0
+#define OCD_RNG_EXCLUSIVE 1
+#define OCD_RNG_INCLUSIVE 2
+
+/* Constants for EPC2:RNG */
+#define OCD_RNG_DISABLED 0
+#define OCD_RNG_EXCLUSIVE 1
+#define OCD_RNG_INCLUSIVE 2
+
+/* Constants for EPC2:DB */
+#define OCD_DB_DISABLED 0
+#define OCD_DB_CHAINED_B 1
+#define OCD_DB_CHAINED_A 2
+#define OCD_DB_AHAINED_A_AND_B 3
+
+/* Constants for EPC3:RNG */
+#define OCD_RNG_DISABLED 0
+#define OCD_RNG_EXCLUSIVE 1
+#define OCD_RNG_INCLUSIVE 2
+
+#ifndef __ASSEMBLER__
+
+/* Register access macros */
+static inline unsigned long __ocd_read(unsigned int reg)
+{
+ return __builtin_mfdr(reg);
+}
+
+static inline void __ocd_write(unsigned int reg, unsigned long value)
+{
+ __builtin_mtdr(reg, value);
+}
+
+#define ocd_read(reg) __ocd_read(OCD_##reg)
+#define ocd_write(reg, value) __ocd_write(OCD_##reg, value)
+
+struct task_struct;
+
+void ocd_enable(struct task_struct *child);
+void ocd_disable(struct task_struct *child);
+
+#endif /* !__ASSEMBLER__ */
+
+#endif /* __ASM_AVR32_OCD_H */
diff --git a/arch/avr32/include/asm/page.h b/arch/avr32/include/asm/page.h
new file mode 100644
index 000000000..f805d1cb1
--- /dev/null
+++ b/arch/avr32/include/asm/page.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_AVR32_PAGE_H
+#define __ASM_AVR32_PAGE_H
+
+#include <linux/const.h>
+
+/* PAGE_SHIFT determines the page size */
+#define PAGE_SHIFT 12
+#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE-1))
+#define PTE_MASK PAGE_MASK
+
+#ifndef __ASSEMBLY__
+
+#include <asm/addrspace.h>
+
+extern void clear_page(void *to);
+extern void copy_page(void *to, void *from);
+
+#define clear_user_page(page, vaddr, pg) clear_page(page)
+#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
+
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct { unsigned long pte; } pte_t;
+typedef struct { unsigned long pgd; } pgd_t;
+typedef struct { unsigned long pgprot; } pgprot_t;
+typedef struct page *pgtable_t;
+
+#define pte_val(x) ((x).pte)
+#define pgd_val(x) ((x).pgd)
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) })
+#define __pgd(x) ((pgd_t) { (x) })
+#define __pgprot(x) ((pgprot_t) { (x) })
+
+/* FIXME: These should be removed soon */
+extern unsigned long memory_start, memory_end;
+
+/* Pure 2^n version of get_order */
+static inline int get_order(unsigned long size)
+{
+ unsigned lz;
+
+ size = (size - 1) >> PAGE_SHIFT;
+ asm("clz %0, %1" : "=r"(lz) : "r"(size));
+ return 32 - lz;
+}
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * The hardware maps the virtual addresses 0x80000000 -> 0x9fffffff
+ * permanently to the physical addresses 0x00000000 -> 0x1fffffff when
+ * segmentation is enabled. We want to make use of this in order to
+ * minimize TLB pressure.
+ */
+#define PAGE_OFFSET (0x80000000UL)
+
+/*
+ * ALSA uses virt_to_page() on DMA pages, which I'm not entirely sure
+ * is a good idea. Anyway, we can't simply subtract PAGE_OFFSET here
+ * in that case, so we'll have to mask out the three most significant
+ * bits of the address instead...
+ *
+ * What's the difference between __pa() and virt_to_phys() anyway?
+ */
+#define __pa(x) PHYSADDR(x)
+#define __va(x) ((void *)(P1SEGADDR(x)))
+
+#define MAP_NR(addr) (((unsigned long)(addr) - PAGE_OFFSET) >> PAGE_SHIFT)
+
+#define phys_to_page(phys) (pfn_to_page(phys >> PAGE_SHIFT))
+#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
+
+#ifndef CONFIG_NEED_MULTIPLE_NODES
+
+#define PHYS_PFN_OFFSET (CONFIG_PHYS_OFFSET >> PAGE_SHIFT)
+
+#define pfn_to_page(pfn) (mem_map + ((pfn) - PHYS_PFN_OFFSET))
+#define page_to_pfn(page) ((unsigned long)((page) - mem_map) + PHYS_PFN_OFFSET)
+#define pfn_valid(pfn) ((pfn) >= PHYS_PFN_OFFSET && (pfn) < (PHYS_PFN_OFFSET + max_mapnr))
+#endif /* CONFIG_NEED_MULTIPLE_NODES */
+
+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+
+#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+/*
+ * Memory above this physical address will be considered highmem.
+ */
+#define HIGHMEM_START 0x20000000UL
+
+#endif /* __ASM_AVR32_PAGE_H */
diff --git a/arch/avr32/include/asm/pci.h b/arch/avr32/include/asm/pci.h
new file mode 100644
index 000000000..a32a02372
--- /dev/null
+++ b/arch/avr32/include/asm/pci.h
@@ -0,0 +1,10 @@
+#ifndef __ASM_AVR32_PCI_H__
+#define __ASM_AVR32_PCI_H__
+
+/* We don't support PCI yet, but some drivers require this file anyway */
+
+#define PCI_DMA_BUS_IS_PHYS (1)
+
+#include <asm-generic/pci-dma-compat.h>
+
+#endif /* __ASM_AVR32_PCI_H__ */
diff --git a/arch/avr32/include/asm/pgalloc.h b/arch/avr32/include/asm/pgalloc.h
new file mode 100644
index 000000000..1aba19d68
--- /dev/null
+++ b/arch/avr32/include/asm/pgalloc.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_AVR32_PGALLOC_H
+#define __ASM_AVR32_PGALLOC_H
+
+#include <linux/mm.h>
+#include <linux/quicklist.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+
+#define QUICK_PGD 0 /* Preserve kernel mappings over free */
+#define QUICK_PT 1 /* Zero on free */
+
+static inline void pmd_populate_kernel(struct mm_struct *mm,
+ pmd_t *pmd, pte_t *pte)
+{
+ set_pmd(pmd, __pmd((unsigned long)pte));
+}
+
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
+ pgtable_t pte)
+{
+ set_pmd(pmd, __pmd((unsigned long)page_address(pte)));
+}
+#define pmd_pgtable(pmd) pmd_page(pmd)
+
+static inline void pgd_ctor(void *x)
+{
+ pgd_t *pgd = x;
+
+ memcpy(pgd + USER_PTRS_PER_PGD,
+ swapper_pg_dir + USER_PTRS_PER_PGD,
+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+}
+
+/*
+ * Allocate and free page tables
+ */
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ return quicklist_alloc(QUICK_PGD, GFP_KERNEL | __GFP_REPEAT, pgd_ctor);
+}
+
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+ quicklist_free(QUICK_PGD, NULL, pgd);
+}
+
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+ unsigned long address)
+{
+ return quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL);
+}
+
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
+ unsigned long address)
+{
+ struct page *page;
+ void *pg;
+
+ pg = quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL);
+ if (!pg)
+ return NULL;
+
+ page = virt_to_page(pg);
+ if (!pgtable_page_ctor(page)) {
+ quicklist_free(QUICK_PT, NULL, pg);
+ return NULL;
+ }
+
+ return page;
+}
+
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+ quicklist_free(QUICK_PT, NULL, pte);
+}
+
+static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
+{
+ pgtable_page_dtor(pte);
+ quicklist_free_page(QUICK_PT, NULL, pte);
+}
+
+#define __pte_free_tlb(tlb,pte,addr) \
+do { \
+ pgtable_page_dtor(pte); \
+ tlb_remove_page((tlb), pte); \
+} while (0)
+
+static inline void check_pgt_cache(void)
+{
+ quicklist_trim(QUICK_PGD, NULL, 25, 16);
+ quicklist_trim(QUICK_PT, NULL, 25, 16);
+}
+
+#endif /* __ASM_AVR32_PGALLOC_H */
diff --git a/arch/avr32/include/asm/pgtable-2level.h b/arch/avr32/include/asm/pgtable-2level.h
new file mode 100644
index 000000000..425dd567b
--- /dev/null
+++ b/arch/avr32/include/asm/pgtable-2level.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_AVR32_PGTABLE_2LEVEL_H
+#define __ASM_AVR32_PGTABLE_2LEVEL_H
+
+#include <asm-generic/pgtable-nopmd.h>
+
+/*
+ * Traditional 2-level paging structure
+ */
+#define PGDIR_SHIFT 22
+#define PTRS_PER_PGD 1024
+
+#define PTRS_PER_PTE 1024
+
+#ifndef __ASSEMBLY__
+#define pte_ERROR(e) \
+ printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
+#define pgd_ERROR(e) \
+ printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+/*
+ * Certain architectures need to do special things when PTEs
+ * within a page table are directly modified. Thus, the following
+ * hook is made available.
+ */
+#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
+#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep, pteval)
+
+/*
+ * (pmds are folded into pgds so this doesn't get actually called,
+ * but the define is needed for a generic inline function.)
+ */
+#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
+
+#define pte_pfn(x) ((unsigned long)(((x).pte >> PAGE_SHIFT)))
+#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+#define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_AVR32_PGTABLE_2LEVEL_H */
diff --git a/arch/avr32/include/asm/pgtable.h b/arch/avr32/include/asm/pgtable.h
new file mode 100644
index 000000000..358006640
--- /dev/null
+++ b/arch/avr32/include/asm/pgtable.h
@@ -0,0 +1,347 @@
+/*
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_AVR32_PGTABLE_H
+#define __ASM_AVR32_PGTABLE_H
+
+#include <asm/addrspace.h>
+
+#ifndef __ASSEMBLY__
+#include <linux/sched.h>
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * Use two-level page tables just as the i386 (without PAE)
+ */
+#include <asm/pgtable-2level.h>
+
+/*
+ * The following code might need some cleanup when the values are
+ * final...
+ */
+#define PMD_SIZE (1UL << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE-1))
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
+#define FIRST_USER_ADDRESS 0UL
+
+#ifndef __ASSEMBLY__
+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+extern void paging_init(void);
+
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used for
+ * zero-mapped memory areas etc.
+ */
+extern struct page *empty_zero_page;
+#define ZERO_PAGE(vaddr) (empty_zero_page)
+
+/*
+ * Just any arbitrary offset to the start of the vmalloc VM area: the
+ * current 8 MiB value just means that there will be a 8 MiB "hole"
+ * after the uncached physical memory (P2 segment) until the vmalloc
+ * area starts. That means that any out-of-bounds memory accesses will
+ * hopefully be caught; we don't know if the end of the P1/P2 segments
+ * are actually used for anything, but it is anyway safer to let the
+ * MMU catch these kinds of errors than to rely on the memory bus.
+ *
+ * A "hole" of the same size is added to the end of the P3 segment as
+ * well. It might seem wasteful to use 16 MiB of virtual address space
+ * on this, but we do have 512 MiB of it...
+ *
+ * The vmalloc() routines leave a hole of 4 KiB between each vmalloced
+ * area for the same reason.
+ */
+#define VMALLOC_OFFSET (8 * 1024 * 1024)
+#define VMALLOC_START (P3SEG + VMALLOC_OFFSET)
+#define VMALLOC_END (P4SEG - VMALLOC_OFFSET)
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * Page flags. Some of these flags are not directly supported by
+ * hardware, so we have to emulate them.
+ */
+#define _TLBEHI_BIT_VALID 9
+#define _TLBEHI_VALID (1 << _TLBEHI_BIT_VALID)
+
+#define _PAGE_BIT_WT 0 /* W-bit : write-through */
+#define _PAGE_BIT_DIRTY 1 /* D-bit : page changed */
+#define _PAGE_BIT_SZ0 2 /* SZ0-bit : Size of page */
+#define _PAGE_BIT_SZ1 3 /* SZ1-bit : Size of page */
+#define _PAGE_BIT_EXECUTE 4 /* X-bit : execute access allowed */
+#define _PAGE_BIT_RW 5 /* AP0-bit : write access allowed */
+#define _PAGE_BIT_USER 6 /* AP1-bit : user space access allowed */
+#define _PAGE_BIT_BUFFER 7 /* B-bit : bufferable */
+#define _PAGE_BIT_GLOBAL 8 /* G-bit : global (ignore ASID) */
+#define _PAGE_BIT_CACHABLE 9 /* C-bit : cachable */
+
+/* If we drop support for 1K pages, we get two extra bits */
+#define _PAGE_BIT_PRESENT 10
+#define _PAGE_BIT_ACCESSED 11 /* software: page was accessed */
+
+#define _PAGE_WT (1 << _PAGE_BIT_WT)
+#define _PAGE_DIRTY (1 << _PAGE_BIT_DIRTY)
+#define _PAGE_EXECUTE (1 << _PAGE_BIT_EXECUTE)
+#define _PAGE_RW (1 << _PAGE_BIT_RW)
+#define _PAGE_USER (1 << _PAGE_BIT_USER)
+#define _PAGE_BUFFER (1 << _PAGE_BIT_BUFFER)
+#define _PAGE_GLOBAL (1 << _PAGE_BIT_GLOBAL)
+#define _PAGE_CACHABLE (1 << _PAGE_BIT_CACHABLE)
+
+/* Software flags */
+#define _PAGE_ACCESSED (1 << _PAGE_BIT_ACCESSED)
+#define _PAGE_PRESENT (1 << _PAGE_BIT_PRESENT)
+
+/*
+ * Page types, i.e. sizes. _PAGE_TYPE_NONE corresponds to what is
+ * usually called _PAGE_PROTNONE on other architectures.
+ *
+ * XXX: Find out if _PAGE_PROTNONE is equivalent with !_PAGE_USER. If
+ * so, we can encode all possible page sizes (although we can't really
+ * support 1K pages anyway due to the _PAGE_PRESENT and _PAGE_ACCESSED
+ * bits)
+ *
+ */
+#define _PAGE_TYPE_MASK ((1 << _PAGE_BIT_SZ0) | (1 << _PAGE_BIT_SZ1))
+#define _PAGE_TYPE_NONE (0 << _PAGE_BIT_SZ0)
+#define _PAGE_TYPE_SMALL (1 << _PAGE_BIT_SZ0)
+#define _PAGE_TYPE_MEDIUM (2 << _PAGE_BIT_SZ0)
+#define _PAGE_TYPE_LARGE (3 << _PAGE_BIT_SZ0)
+
+/*
+ * Mask which drop software flags. We currently can't handle more than
+ * 512 MiB of physical memory, so we can use bits 29-31 for other
+ * stuff. With a fixed 4K page size, we can use bits 10-11 as well as
+ * bits 2-3 (SZ)
+ */
+#define _PAGE_FLAGS_HARDWARE_MASK 0xfffff3ff
+
+#define _PAGE_FLAGS_CACHE_MASK (_PAGE_CACHABLE | _PAGE_BUFFER | _PAGE_WT)
+
+/* Flags that may be modified by software */
+#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY \
+ | _PAGE_FLAGS_CACHE_MASK)
+
+#define _PAGE_FLAGS_READ (_PAGE_CACHABLE | _PAGE_BUFFER)
+#define _PAGE_FLAGS_WRITE (_PAGE_FLAGS_READ | _PAGE_RW | _PAGE_DIRTY)
+
+#define _PAGE_NORMAL(x) __pgprot((x) | _PAGE_PRESENT | _PAGE_TYPE_SMALL \
+ | _PAGE_ACCESSED)
+
+#define PAGE_NONE (_PAGE_ACCESSED | _PAGE_TYPE_NONE)
+#define PAGE_READ (_PAGE_FLAGS_READ | _PAGE_USER)
+#define PAGE_EXEC (_PAGE_FLAGS_READ | _PAGE_EXECUTE | _PAGE_USER)
+#define PAGE_WRITE (_PAGE_FLAGS_WRITE | _PAGE_USER)
+#define PAGE_KERNEL _PAGE_NORMAL(_PAGE_FLAGS_WRITE | _PAGE_EXECUTE | _PAGE_GLOBAL)
+#define PAGE_KERNEL_RO _PAGE_NORMAL(_PAGE_FLAGS_READ | _PAGE_EXECUTE | _PAGE_GLOBAL)
+
+#define _PAGE_P(x) _PAGE_NORMAL((x) & ~(_PAGE_RW | _PAGE_DIRTY))
+#define _PAGE_S(x) _PAGE_NORMAL(x)
+
+#define PAGE_COPY _PAGE_P(PAGE_WRITE | PAGE_READ)
+#define PAGE_SHARED _PAGE_S(PAGE_WRITE | PAGE_READ)
+
+#ifndef __ASSEMBLY__
+/*
+ * The hardware supports flags for write- and execute access. Read is
+ * always allowed if the page is loaded into the TLB, so the "-w-",
+ * "--x" and "-wx" mappings are implemented as "rw-", "r-x" and "rwx",
+ * respectively.
+ *
+ * The "---" case is handled by software; the page will simply not be
+ * loaded into the TLB if the page type is _PAGE_TYPE_NONE.
+ */
+
+#define __P000 __pgprot(PAGE_NONE)
+#define __P001 _PAGE_P(PAGE_READ)
+#define __P010 _PAGE_P(PAGE_WRITE)
+#define __P011 _PAGE_P(PAGE_WRITE | PAGE_READ)
+#define __P100 _PAGE_P(PAGE_EXEC)
+#define __P101 _PAGE_P(PAGE_EXEC | PAGE_READ)
+#define __P110 _PAGE_P(PAGE_EXEC | PAGE_WRITE)
+#define __P111 _PAGE_P(PAGE_EXEC | PAGE_WRITE | PAGE_READ)
+
+#define __S000 __pgprot(PAGE_NONE)
+#define __S001 _PAGE_S(PAGE_READ)
+#define __S010 _PAGE_S(PAGE_WRITE)
+#define __S011 _PAGE_S(PAGE_WRITE | PAGE_READ)
+#define __S100 _PAGE_S(PAGE_EXEC)
+#define __S101 _PAGE_S(PAGE_EXEC | PAGE_READ)
+#define __S110 _PAGE_S(PAGE_EXEC | PAGE_WRITE)
+#define __S111 _PAGE_S(PAGE_EXEC | PAGE_WRITE | PAGE_READ)
+
+#define pte_none(x) (!pte_val(x))
+#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
+
+#define pte_clear(mm,addr,xp) \
+ do { \
+ set_pte_at(mm, addr, xp, __pte(0)); \
+ } while (0)
+
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+static inline int pte_write(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_RW;
+}
+static inline int pte_dirty(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_DIRTY;
+}
+static inline int pte_young(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_ACCESSED;
+}
+static inline int pte_special(pte_t pte)
+{
+ return 0;
+}
+
+/* Mutator functions for PTE bits */
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_RW));
+ return pte;
+}
+static inline pte_t pte_mkclean(pte_t pte)
+{
+ set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY));
+ return pte;
+}
+static inline pte_t pte_mkold(pte_t pte)
+{
+ set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED));
+ return pte;
+}
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+ set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW));
+ return pte;
+}
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+ set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY));
+ return pte;
+}
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+ set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED));
+ return pte;
+}
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+ return pte;
+}
+
+#define pmd_none(x) (!pmd_val(x))
+#define pmd_present(x) (pmd_val(x))
+
+static inline void pmd_clear(pmd_t *pmdp)
+{
+ set_pmd(pmdp, __pmd(0));
+}
+
+#define pmd_bad(x) (pmd_val(x) & ~PAGE_MASK)
+
+/*
+ * Permanent address of a page. We don't support highmem, so this is
+ * trivial.
+ */
+#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
+#define pte_page(x) (pfn_to_page(pte_pfn(x)))
+
+/*
+ * Mark the prot value as uncacheable and unbufferable
+ */
+#define pgprot_noncached(prot) \
+ __pgprot(pgprot_val(prot) & ~(_PAGE_BUFFER | _PAGE_CACHABLE))
+
+/*
+ * Mark the prot value as uncacheable but bufferable
+ */
+#define pgprot_writecombine(prot) \
+ __pgprot((pgprot_val(prot) & ~_PAGE_CACHABLE) | _PAGE_BUFFER)
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ *
+ * extern pte_t mk_pte(struct page *page, pgprot_t pgprot)
+ */
+#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK)
+ | pgprot_val(newprot)));
+ return pte;
+}
+
+#define page_pte(page) page_pte_prot(page, __pgprot(0))
+
+#define pmd_page_vaddr(pmd) pmd_val(pmd)
+#define pmd_page(pmd) (virt_to_page(pmd_val(pmd)))
+
+/* to find an entry in a page-table-directory. */
+#define pgd_index(address) (((address) >> PGDIR_SHIFT) \
+ & (PTRS_PER_PGD - 1))
+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
+
+/* to find an entry in a kernel page-table-directory */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
+/* Find an entry in the third-level page table.. */
+#define pte_index(address) \
+ ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define pte_offset(dir, address) \
+ ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
+#define pte_offset_kernel(dir, address) \
+ ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
+#define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
+#define pte_unmap(pte) do { } while (0)
+
+struct vm_area_struct;
+extern void update_mmu_cache(struct vm_area_struct * vma,
+ unsigned long address, pte_t *ptep);
+
+/*
+ * Encode and decode a swap entry
+ *
+ * Constraints:
+ * _PAGE_TYPE_* at bits 2-3 (for emulating _PAGE_PROTNONE)
+ * _PAGE_PRESENT at bit 10
+ *
+ * We encode the type into bits 4-9 and offset into bits 11-31. This
+ * gives us a 21 bits offset, or 2**21 * 4K = 8G usable swap space per
+ * device, and 64 possible types.
+ *
+ * NOTE: We should set ZEROs at the position of _PAGE_PRESENT
+ * and _PAGE_PROTNONE bits
+ */
+#define __swp_type(x) (((x).val >> 4) & 0x3f)
+#define __swp_offset(x) ((x).val >> 11)
+#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 4) | ((offset) << 11) })
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+typedef pte_t *pte_addr_t;
+
+#define kern_addr_valid(addr) (1)
+
+/* No page table caches to initialize (?) */
+#define pgtable_cache_init() do { } while(0)
+
+#include <asm-generic/pgtable.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_AVR32_PGTABLE_H */
diff --git a/arch/avr32/include/asm/processor.h b/arch/avr32/include/asm/processor.h
new file mode 100644
index 000000000..941593c7d
--- /dev/null
+++ b/arch/avr32/include/asm/processor.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_AVR32_PROCESSOR_H
+#define __ASM_AVR32_PROCESSOR_H
+
+#include <asm/page.h>
+#include <asm/cache.h>
+
+#define TASK_SIZE 0x80000000
+
+#ifdef __KERNEL__
+#define STACK_TOP TASK_SIZE
+#define STACK_TOP_MAX STACK_TOP
+#endif
+
+#ifndef __ASSEMBLY__
+
+static inline void *current_text_addr(void)
+{
+ register void *pc asm("pc");
+ return pc;
+}
+
+enum arch_type {
+ ARCH_AVR32A,
+ ARCH_AVR32B,
+ ARCH_MAX
+};
+
+enum cpu_type {
+ CPU_MORGAN,
+ CPU_AT32AP,
+ CPU_MAX
+};
+
+enum tlb_config {
+ TLB_NONE,
+ TLB_SPLIT,
+ TLB_UNIFIED,
+ TLB_INVALID
+};
+
+#define AVR32_FEATURE_RMW (1 << 0)
+#define AVR32_FEATURE_DSP (1 << 1)
+#define AVR32_FEATURE_SIMD (1 << 2)
+#define AVR32_FEATURE_OCD (1 << 3)
+#define AVR32_FEATURE_PCTR (1 << 4)
+#define AVR32_FEATURE_JAVA (1 << 5)
+#define AVR32_FEATURE_FPU (1 << 6)
+
+struct avr32_cpuinfo {
+ struct clk *clk;
+ unsigned long loops_per_jiffy;
+ enum arch_type arch_type;
+ enum cpu_type cpu_type;
+ unsigned short arch_revision;
+ unsigned short cpu_revision;
+ enum tlb_config tlb_config;
+ unsigned long features;
+ u32 device_id;
+
+ struct cache_info icache;
+ struct cache_info dcache;
+};
+
+static inline unsigned int avr32_get_manufacturer_id(struct avr32_cpuinfo *cpu)
+{
+ return (cpu->device_id >> 1) & 0x7f;
+}
+static inline unsigned int avr32_get_product_number(struct avr32_cpuinfo *cpu)
+{
+ return (cpu->device_id >> 12) & 0xffff;
+}
+static inline unsigned int avr32_get_chip_revision(struct avr32_cpuinfo *cpu)
+{
+ return (cpu->device_id >> 28) & 0x0f;
+}
+
+extern struct avr32_cpuinfo boot_cpu_data;
+
+/* No SMP support so far */
+#define current_cpu_data boot_cpu_data
+
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's
+ */
+#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
+
+#define cpu_relax() barrier()
+#define cpu_relax_lowlatency() cpu_relax()
+#define cpu_sync_pipeline() asm volatile("sub pc, -2" : : : "memory")
+
+struct cpu_context {
+ unsigned long sr;
+ unsigned long pc;
+ unsigned long ksp; /* Kernel stack pointer */
+ unsigned long r7;
+ unsigned long r6;
+ unsigned long r5;
+ unsigned long r4;
+ unsigned long r3;
+ unsigned long r2;
+ unsigned long r1;
+ unsigned long r0;
+};
+
+/* This struct contains the CPU context as stored by switch_to() */
+struct thread_struct {
+ struct cpu_context cpu_context;
+ unsigned long single_step_addr;
+ u16 single_step_insn;
+};
+
+#define INIT_THREAD { \
+ .cpu_context = { \
+ .ksp = sizeof(init_stack) + (long)&init_stack, \
+ }, \
+}
+
+/*
+ * Do necessary setup to start up a newly executed thread.
+ */
+#define start_thread(regs, new_pc, new_sp) \
+ do { \
+ memset(regs, 0, sizeof(*regs)); \
+ regs->sr = MODE_USER; \
+ regs->pc = new_pc & ~1; \
+ regs->sp = new_sp; \
+ } while(0)
+
+struct task_struct;
+
+/* Free all resources held by a thread */
+extern void release_thread(struct task_struct *);
+
+/* Return saved PC of a blocked thread */
+#define thread_saved_pc(tsk) ((tsk)->thread.cpu_context.pc)
+
+struct pt_regs;
+extern unsigned long get_wchan(struct task_struct *p);
+extern void show_regs_log_lvl(struct pt_regs *regs, const char *log_lvl);
+extern void show_stack_log_lvl(struct task_struct *tsk, unsigned long sp,
+ struct pt_regs *regs, const char *log_lvl);
+
+#define task_pt_regs(p) \
+ ((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1)
+
+#define KSTK_EIP(tsk) ((tsk)->thread.cpu_context.pc)
+#define KSTK_ESP(tsk) ((tsk)->thread.cpu_context.ksp)
+
+#define ARCH_HAS_PREFETCH
+
+static inline void prefetch(const void *x)
+{
+ const char *c = x;
+ asm volatile("pref %0" : : "r"(c));
+}
+#define PREFETCH_STRIDE L1_CACHE_BYTES
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_AVR32_PROCESSOR_H */
diff --git a/arch/avr32/include/asm/ptrace.h b/arch/avr32/include/asm/ptrace.h
new file mode 100644
index 000000000..630e4f9bf
--- /dev/null
+++ b/arch/avr32/include/asm/ptrace.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_AVR32_PTRACE_H
+#define __ASM_AVR32_PTRACE_H
+
+#include <uapi/asm/ptrace.h>
+
+#ifndef __ASSEMBLY__
+
+#include <asm/ocd.h>
+
+#define arch_has_single_step() (1)
+
+#define arch_ptrace_attach(child) ocd_enable(child)
+
+#define user_mode(regs) (((regs)->sr & MODE_MASK) == MODE_USER)
+#define instruction_pointer(regs) ((regs)->pc)
+#define profile_pc(regs) instruction_pointer(regs)
+#define user_stack_pointer(regs) ((regs)->sp)
+
+static __inline__ int valid_user_regs(struct pt_regs *regs)
+{
+ /*
+ * Some of the Java bits might be acceptable if/when we
+ * implement some support for that stuff...
+ */
+ if ((regs->sr & 0xffff0000) == 0)
+ return 1;
+
+ /*
+ * Force status register flags to be sane and report this
+ * illegal behaviour...
+ */
+ regs->sr &= 0x0000ffff;
+ return 0;
+}
+
+
+#endif /* ! __ASSEMBLY__ */
+#endif /* __ASM_AVR32_PTRACE_H */
diff --git a/arch/avr32/include/asm/serial.h b/arch/avr32/include/asm/serial.h
new file mode 100644
index 000000000..5ecaebc22
--- /dev/null
+++ b/arch/avr32/include/asm/serial.h
@@ -0,0 +1,13 @@
+#ifndef _ASM_SERIAL_H
+#define _ASM_SERIAL_H
+
+/*
+ * This assumes you have a 1.8432 MHz clock for your UART.
+ *
+ * It'd be nice if someone built a serial card with a 24.576 MHz
+ * clock, since the 16550A is capable of handling a top speed of 1.5
+ * megabits/second; but this requires the faster clock.
+ */
+#define BASE_BAUD (1843200 / 16)
+
+#endif /* _ASM_SERIAL_H */
diff --git a/arch/avr32/include/asm/setup.h b/arch/avr32/include/asm/setup.h
new file mode 100644
index 000000000..73490ae0c
--- /dev/null
+++ b/arch/avr32/include/asm/setup.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * Based on linux/include/asm-arm/setup.h
+ * Copyright (C) 1997-1999 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_AVR32_SETUP_H__
+#define __ASM_AVR32_SETUP_H__
+
+#include <uapi/asm/setup.h>
+
+
+/* Magic number indicating that a tag table is present */
+#define ATAG_MAGIC 0xa2a25441
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Generic memory range, used by several tags.
+ *
+ * addr is always physical.
+ * size is measured in bytes.
+ * next is for use by the OS, e.g. for grouping regions into
+ * linked lists.
+ */
+struct tag_mem_range {
+ u32 addr;
+ u32 size;
+ struct tag_mem_range * next;
+};
+
+/* The list ends with an ATAG_NONE node. */
+#define ATAG_NONE 0x00000000
+
+struct tag_header {
+ u32 size;
+ u32 tag;
+};
+
+/* The list must start with an ATAG_CORE node */
+#define ATAG_CORE 0x54410001
+
+struct tag_core {
+ u32 flags;
+ u32 pagesize;
+ u32 rootdev;
+};
+
+/* it is allowed to have multiple ATAG_MEM nodes */
+#define ATAG_MEM 0x54410002
+/* ATAG_MEM uses tag_mem_range */
+
+/* command line: \0 terminated string */
+#define ATAG_CMDLINE 0x54410003
+
+struct tag_cmdline {
+ char cmdline[1]; /* this is the minimum size */
+};
+
+/* Ramdisk image (may be compressed) */
+#define ATAG_RDIMG 0x54410004
+/* ATAG_RDIMG uses tag_mem_range */
+
+/* Information about various clocks present in the system */
+#define ATAG_CLOCK 0x54410005
+
+struct tag_clock {
+ u32 clock_id; /* Which clock are we talking about? */
+ u32 clock_flags; /* Special features */
+ u64 clock_hz; /* Clock speed in Hz */
+};
+
+/* The clock types we know about */
+#define CLOCK_BOOTCPU 0
+
+/* Memory reserved for the system (e.g. the bootloader) */
+#define ATAG_RSVD_MEM 0x54410006
+/* ATAG_RSVD_MEM uses tag_mem_range */
+
+/* Ethernet information */
+
+#define ATAG_ETHERNET 0x54410007
+
+struct tag_ethernet {
+ u8 mac_index;
+ u8 mii_phy_addr;
+ u8 hw_address[6];
+};
+
+#define ETH_INVALID_PHY 0xff
+
+/* board information */
+#define ATAG_BOARDINFO 0x54410008
+
+struct tag_boardinfo {
+ u32 board_number;
+};
+
+struct tag {
+ struct tag_header hdr;
+ union {
+ struct tag_core core;
+ struct tag_mem_range mem_range;
+ struct tag_cmdline cmdline;
+ struct tag_clock clock;
+ struct tag_ethernet ethernet;
+ struct tag_boardinfo boardinfo;
+ } u;
+};
+
+struct tagtable {
+ u32 tag;
+ int (*parse)(struct tag *);
+};
+
+#define __tag __used __attribute__((__section__(".taglist.init")))
+#define __tagtable(tag, fn) \
+ static struct tagtable __tagtable_##fn __tag = { tag, fn }
+
+#define tag_member_present(tag,member) \
+ ((unsigned long)(&((struct tag *)0L)->member + 1) \
+ <= (tag)->hdr.size * 4)
+
+#define tag_next(t) ((struct tag *)((u32 *)(t) + (t)->hdr.size))
+#define tag_size(type) ((sizeof(struct tag_header) + sizeof(struct type)) >> 2)
+
+#define for_each_tag(t,base) \
+ for (t = base; t->hdr.size; t = tag_next(t))
+
+extern struct tag *bootloader_tags;
+
+extern resource_size_t fbmem_start;
+extern resource_size_t fbmem_size;
+extern u32 board_number;
+
+void setup_processor(void);
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_AVR32_SETUP_H__ */
diff --git a/arch/avr32/include/asm/shmparam.h b/arch/avr32/include/asm/shmparam.h
new file mode 100644
index 000000000..3681266c7
--- /dev/null
+++ b/arch/avr32/include/asm/shmparam.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_AVR32_SHMPARAM_H
+#define __ASM_AVR32_SHMPARAM_H
+
+#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
+
+#endif /* __ASM_AVR32_SHMPARAM_H */
diff --git a/arch/avr32/include/asm/signal.h b/arch/avr32/include/asm/signal.h
new file mode 100644
index 000000000..d875eb6a3
--- /dev/null
+++ b/arch/avr32/include/asm/signal.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_AVR32_SIGNAL_H
+#define __ASM_AVR32_SIGNAL_H
+
+#include <uapi/asm/signal.h>
+
+/* Most things should be clean enough to redefine this at will, if care
+ is taken to make libc match. */
+
+#define _NSIG 64
+#define _NSIG_BPW 32
+#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
+
+typedef unsigned long old_sigset_t; /* at least 32 bits */
+
+typedef struct {
+ unsigned long sig[_NSIG_WORDS];
+} sigset_t;
+
+#define __ARCH_HAS_SA_RESTORER
+
+#include <asm/sigcontext.h>
+#undef __HAVE_ARCH_SIG_BITOPS
+
+#endif
diff --git a/arch/avr32/include/asm/string.h b/arch/avr32/include/asm/string.h
new file mode 100644
index 000000000..c91a623cd
--- /dev/null
+++ b/arch/avr32/include/asm/string.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_AVR32_STRING_H
+#define __ASM_AVR32_STRING_H
+
+#define __HAVE_ARCH_MEMSET
+extern void *memset(void *b, int c, size_t len);
+
+#define __HAVE_ARCH_MEMCPY
+extern void *memcpy(void *to, const void *from, size_t len);
+
+#endif /* __ASM_AVR32_STRING_H */
diff --git a/arch/avr32/include/asm/switch_to.h b/arch/avr32/include/asm/switch_to.h
new file mode 100644
index 000000000..9a8e9d520
--- /dev/null
+++ b/arch/avr32/include/asm/switch_to.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_AVR32_SWITCH_TO_H
+#define __ASM_AVR32_SWITCH_TO_H
+
+/*
+ * Help PathFinder and other Nexus-compliant debuggers keep track of
+ * the current PID by emitting an Ownership Trace Message each time we
+ * switch task.
+ */
+#ifdef CONFIG_OWNERSHIP_TRACE
+#include <asm/ocd.h>
+#define finish_arch_switch(prev) \
+ do { \
+ ocd_write(PID, prev->pid); \
+ ocd_write(PID, current->pid); \
+ } while(0)
+#endif
+
+/*
+ * switch_to(prev, next, last) should switch from task `prev' to task
+ * `next'. `prev' will never be the same as `next'.
+ *
+ * We just delegate everything to the __switch_to assembly function,
+ * which is implemented in arch/avr32/kernel/switch_to.S
+ *
+ * mb() tells GCC not to cache `current' across this call.
+ */
+struct cpu_context;
+struct task_struct;
+extern struct task_struct *__switch_to(struct task_struct *,
+ struct cpu_context *,
+ struct cpu_context *);
+#define switch_to(prev, next, last) \
+ do { \
+ last = __switch_to(prev, &prev->thread.cpu_context + 1, \
+ &next->thread.cpu_context); \
+ } while (0)
+
+
+#endif /* __ASM_AVR32_SWITCH_TO_H */
diff --git a/arch/avr32/include/asm/syscalls.h b/arch/avr32/include/asm/syscalls.h
new file mode 100644
index 000000000..244f2acab
--- /dev/null
+++ b/arch/avr32/include/asm/syscalls.h
@@ -0,0 +1,21 @@
+/*
+ * syscalls.h - Linux syscall interfaces (arch-specific)
+ *
+ * Copyright (c) 2008 Jaswinder Singh
+ *
+ * This file is released under the GPLv2.
+ * See the file COPYING for more details.
+ */
+
+#ifndef _ASM_AVR32_SYSCALLS_H
+#define _ASM_AVR32_SYSCALLS_H
+
+#include <linux/compiler.h>
+#include <linux/linkage.h>
+#include <linux/types.h>
+#include <linux/signal.h>
+
+/* mm/cache.c */
+asmlinkage int sys_cacheflush(int, void __user *, size_t);
+
+#endif /* _ASM_AVR32_SYSCALLS_H */
diff --git a/arch/avr32/include/asm/sysreg.h b/arch/avr32/include/asm/sysreg.h
new file mode 100644
index 000000000..d4e095017
--- /dev/null
+++ b/arch/avr32/include/asm/sysreg.h
@@ -0,0 +1,291 @@
+/*
+ * AVR32 System Registers
+ *
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_AVR32_SYSREG_H
+#define __ASM_AVR32_SYSREG_H
+
+/* sysreg register offsets */
+#define SYSREG_SR 0x0000
+#define SYSREG_EVBA 0x0004
+#define SYSREG_ACBA 0x0008
+#define SYSREG_CPUCR 0x000c
+#define SYSREG_ECR 0x0010
+#define SYSREG_RSR_SUP 0x0014
+#define SYSREG_RSR_INT0 0x0018
+#define SYSREG_RSR_INT1 0x001c
+#define SYSREG_RSR_INT2 0x0020
+#define SYSREG_RSR_INT3 0x0024
+#define SYSREG_RSR_EX 0x0028
+#define SYSREG_RSR_NMI 0x002c
+#define SYSREG_RSR_DBG 0x0030
+#define SYSREG_RAR_SUP 0x0034
+#define SYSREG_RAR_INT0 0x0038
+#define SYSREG_RAR_INT1 0x003c
+#define SYSREG_RAR_INT2 0x0040
+#define SYSREG_RAR_INT3 0x0044
+#define SYSREG_RAR_EX 0x0048
+#define SYSREG_RAR_NMI 0x004c
+#define SYSREG_RAR_DBG 0x0050
+#define SYSREG_JECR 0x0054
+#define SYSREG_JOSP 0x0058
+#define SYSREG_JAVA_LV0 0x005c
+#define SYSREG_JAVA_LV1 0x0060
+#define SYSREG_JAVA_LV2 0x0064
+#define SYSREG_JAVA_LV3 0x0068
+#define SYSREG_JAVA_LV4 0x006c
+#define SYSREG_JAVA_LV5 0x0070
+#define SYSREG_JAVA_LV6 0x0074
+#define SYSREG_JAVA_LV7 0x0078
+#define SYSREG_JTBA 0x007c
+#define SYSREG_JBCR 0x0080
+#define SYSREG_CONFIG0 0x0100
+#define SYSREG_CONFIG1 0x0104
+#define SYSREG_COUNT 0x0108
+#define SYSREG_COMPARE 0x010c
+#define SYSREG_TLBEHI 0x0110
+#define SYSREG_TLBELO 0x0114
+#define SYSREG_PTBR 0x0118
+#define SYSREG_TLBEAR 0x011c
+#define SYSREG_MMUCR 0x0120
+#define SYSREG_TLBARLO 0x0124
+#define SYSREG_TLBARHI 0x0128
+#define SYSREG_PCCNT 0x012c
+#define SYSREG_PCNT0 0x0130
+#define SYSREG_PCNT1 0x0134
+#define SYSREG_PCCR 0x0138
+#define SYSREG_BEAR 0x013c
+#define SYSREG_SABAL 0x0300
+#define SYSREG_SABAH 0x0304
+#define SYSREG_SABD 0x0308
+
+/* Bitfields in SR */
+#define SYSREG_SR_C_OFFSET 0
+#define SYSREG_SR_C_SIZE 1
+#define SYSREG_Z_OFFSET 1
+#define SYSREG_Z_SIZE 1
+#define SYSREG_SR_N_OFFSET 2
+#define SYSREG_SR_N_SIZE 1
+#define SYSREG_SR_V_OFFSET 3
+#define SYSREG_SR_V_SIZE 1
+#define SYSREG_Q_OFFSET 4
+#define SYSREG_Q_SIZE 1
+#define SYSREG_L_OFFSET 5
+#define SYSREG_L_SIZE 1
+#define SYSREG_T_OFFSET 14
+#define SYSREG_T_SIZE 1
+#define SYSREG_SR_R_OFFSET 15
+#define SYSREG_SR_R_SIZE 1
+#define SYSREG_GM_OFFSET 16
+#define SYSREG_GM_SIZE 1
+#define SYSREG_I0M_OFFSET 17
+#define SYSREG_I0M_SIZE 1
+#define SYSREG_I1M_OFFSET 18
+#define SYSREG_I1M_SIZE 1
+#define SYSREG_I2M_OFFSET 19
+#define SYSREG_I2M_SIZE 1
+#define SYSREG_I3M_OFFSET 20
+#define SYSREG_I3M_SIZE 1
+#define SYSREG_EM_OFFSET 21
+#define SYSREG_EM_SIZE 1
+#define SYSREG_MODE_OFFSET 22
+#define SYSREG_MODE_SIZE 3
+#define SYSREG_M0_OFFSET 22
+#define SYSREG_M0_SIZE 1
+#define SYSREG_M1_OFFSET 23
+#define SYSREG_M1_SIZE 1
+#define SYSREG_M2_OFFSET 24
+#define SYSREG_M2_SIZE 1
+#define SYSREG_SR_D_OFFSET 26
+#define SYSREG_SR_D_SIZE 1
+#define SYSREG_DM_OFFSET 27
+#define SYSREG_DM_SIZE 1
+#define SYSREG_SR_J_OFFSET 28
+#define SYSREG_SR_J_SIZE 1
+#define SYSREG_H_OFFSET 29
+#define SYSREG_H_SIZE 1
+
+/* Bitfields in CPUCR */
+#define SYSREG_BI_OFFSET 0
+#define SYSREG_BI_SIZE 1
+#define SYSREG_BE_OFFSET 1
+#define SYSREG_BE_SIZE 1
+#define SYSREG_FE_OFFSET 2
+#define SYSREG_FE_SIZE 1
+#define SYSREG_RE_OFFSET 3
+#define SYSREG_RE_SIZE 1
+#define SYSREG_IBE_OFFSET 4
+#define SYSREG_IBE_SIZE 1
+#define SYSREG_IEE_OFFSET 5
+#define SYSREG_IEE_SIZE 1
+
+/* Bitfields in CONFIG0 */
+#define SYSREG_CONFIG0_R_OFFSET 0
+#define SYSREG_CONFIG0_R_SIZE 1
+#define SYSREG_CONFIG0_D_OFFSET 1
+#define SYSREG_CONFIG0_D_SIZE 1
+#define SYSREG_CONFIG0_S_OFFSET 2
+#define SYSREG_CONFIG0_S_SIZE 1
+#define SYSREG_CONFIG0_O_OFFSET 3
+#define SYSREG_CONFIG0_O_SIZE 1
+#define SYSREG_CONFIG0_P_OFFSET 4
+#define SYSREG_CONFIG0_P_SIZE 1
+#define SYSREG_CONFIG0_J_OFFSET 5
+#define SYSREG_CONFIG0_J_SIZE 1
+#define SYSREG_CONFIG0_F_OFFSET 6
+#define SYSREG_CONFIG0_F_SIZE 1
+#define SYSREG_MMUT_OFFSET 7
+#define SYSREG_MMUT_SIZE 3
+#define SYSREG_AR_OFFSET 10
+#define SYSREG_AR_SIZE 3
+#define SYSREG_AT_OFFSET 13
+#define SYSREG_AT_SIZE 3
+#define SYSREG_PROCESSORREVISION_OFFSET 16
+#define SYSREG_PROCESSORREVISION_SIZE 8
+#define SYSREG_PROCESSORID_OFFSET 24
+#define SYSREG_PROCESSORID_SIZE 8
+
+/* Bitfields in CONFIG1 */
+#define SYSREG_DASS_OFFSET 0
+#define SYSREG_DASS_SIZE 3
+#define SYSREG_DLSZ_OFFSET 3
+#define SYSREG_DLSZ_SIZE 3
+#define SYSREG_DSET_OFFSET 6
+#define SYSREG_DSET_SIZE 4
+#define SYSREG_IASS_OFFSET 10
+#define SYSREG_IASS_SIZE 3
+#define SYSREG_ILSZ_OFFSET 13
+#define SYSREG_ILSZ_SIZE 3
+#define SYSREG_ISET_OFFSET 16
+#define SYSREG_ISET_SIZE 4
+#define SYSREG_DMMUSZ_OFFSET 20
+#define SYSREG_DMMUSZ_SIZE 6
+#define SYSREG_IMMUSZ_OFFSET 26
+#define SYSREG_IMMUSZ_SIZE 6
+
+/* Bitfields in TLBEHI */
+#define SYSREG_ASID_OFFSET 0
+#define SYSREG_ASID_SIZE 8
+#define SYSREG_TLBEHI_I_OFFSET 8
+#define SYSREG_TLBEHI_I_SIZE 1
+#define SYSREG_TLBEHI_V_OFFSET 9
+#define SYSREG_TLBEHI_V_SIZE 1
+#define SYSREG_VPN_OFFSET 10
+#define SYSREG_VPN_SIZE 22
+
+/* Bitfields in TLBELO */
+#define SYSREG_W_OFFSET 0
+#define SYSREG_W_SIZE 1
+#define SYSREG_TLBELO_D_OFFSET 1
+#define SYSREG_TLBELO_D_SIZE 1
+#define SYSREG_SZ_OFFSET 2
+#define SYSREG_SZ_SIZE 2
+#define SYSREG_AP_OFFSET 4
+#define SYSREG_AP_SIZE 3
+#define SYSREG_B_OFFSET 7
+#define SYSREG_B_SIZE 1
+#define SYSREG_G_OFFSET 8
+#define SYSREG_G_SIZE 1
+#define SYSREG_TLBELO_C_OFFSET 9
+#define SYSREG_TLBELO_C_SIZE 1
+#define SYSREG_PFN_OFFSET 10
+#define SYSREG_PFN_SIZE 22
+
+/* Bitfields in MMUCR */
+#define SYSREG_E_OFFSET 0
+#define SYSREG_E_SIZE 1
+#define SYSREG_M_OFFSET 1
+#define SYSREG_M_SIZE 1
+#define SYSREG_MMUCR_I_OFFSET 2
+#define SYSREG_MMUCR_I_SIZE 1
+#define SYSREG_MMUCR_N_OFFSET 3
+#define SYSREG_MMUCR_N_SIZE 1
+#define SYSREG_MMUCR_S_OFFSET 4
+#define SYSREG_MMUCR_S_SIZE 1
+#define SYSREG_DLA_OFFSET 8
+#define SYSREG_DLA_SIZE 6
+#define SYSREG_DRP_OFFSET 14
+#define SYSREG_DRP_SIZE 6
+#define SYSREG_ILA_OFFSET 20
+#define SYSREG_ILA_SIZE 6
+#define SYSREG_IRP_OFFSET 26
+#define SYSREG_IRP_SIZE 6
+
+/* Bitfields in PCCR */
+#define SYSREG_PCCR_E_OFFSET 0
+#define SYSREG_PCCR_E_SIZE 1
+#define SYSREG_PCCR_R_OFFSET 1
+#define SYSREG_PCCR_R_SIZE 1
+#define SYSREG_PCCR_C_OFFSET 2
+#define SYSREG_PCCR_C_SIZE 1
+#define SYSREG_PCCR_S_OFFSET 3
+#define SYSREG_PCCR_S_SIZE 1
+#define SYSREG_IEC_OFFSET 4
+#define SYSREG_IEC_SIZE 1
+#define SYSREG_IE0_OFFSET 5
+#define SYSREG_IE0_SIZE 1
+#define SYSREG_IE1_OFFSET 6
+#define SYSREG_IE1_SIZE 1
+#define SYSREG_FC_OFFSET 8
+#define SYSREG_FC_SIZE 1
+#define SYSREG_F0_OFFSET 9
+#define SYSREG_F0_SIZE 1
+#define SYSREG_F1_OFFSET 10
+#define SYSREG_F1_SIZE 1
+#define SYSREG_CONF0_OFFSET 12
+#define SYSREG_CONF0_SIZE 6
+#define SYSREG_CONF1_OFFSET 18
+#define SYSREG_CONF1_SIZE 6
+
+/* Constants for ECR */
+#define ECR_UNRECOVERABLE 0
+#define ECR_TLB_MULTIPLE 1
+#define ECR_BUS_ERROR_WRITE 2
+#define ECR_BUS_ERROR_READ 3
+#define ECR_NMI 4
+#define ECR_ADDR_ALIGN_X 5
+#define ECR_PROTECTION_X 6
+#define ECR_DEBUG 7
+#define ECR_ILLEGAL_OPCODE 8
+#define ECR_UNIMPL_INSTRUCTION 9
+#define ECR_PRIVILEGE_VIOLATION 10
+#define ECR_FPE 11
+#define ECR_COPROC_ABSENT 12
+#define ECR_ADDR_ALIGN_R 13
+#define ECR_ADDR_ALIGN_W 14
+#define ECR_PROTECTION_R 15
+#define ECR_PROTECTION_W 16
+#define ECR_DTLB_MODIFIED 17
+#define ECR_TLB_MISS_X 20
+#define ECR_TLB_MISS_R 24
+#define ECR_TLB_MISS_W 28
+
+/* Bit manipulation macros */
+#define SYSREG_BIT(name) \
+ (1 << SYSREG_##name##_OFFSET)
+#define SYSREG_BF(name,value) \
+ (((value) & ((1 << SYSREG_##name##_SIZE) - 1)) \
+ << SYSREG_##name##_OFFSET)
+#define SYSREG_BFEXT(name,value)\
+ (((value) >> SYSREG_##name##_OFFSET) \
+ & ((1 << SYSREG_##name##_SIZE) - 1))
+#define SYSREG_BFINS(name,value,old) \
+ (((old) & ~(((1 << SYSREG_##name##_SIZE) - 1) \
+ << SYSREG_##name##_OFFSET)) \
+ | SYSREG_BF(name,value))
+
+/* Register access macros */
+#ifdef __CHECKER__
+extern unsigned long __builtin_mfsr(unsigned long reg);
+extern void __builtin_mtsr(unsigned long reg, unsigned long value);
+#endif
+
+#define sysreg_read(reg) __builtin_mfsr(SYSREG_##reg)
+#define sysreg_write(reg, value) __builtin_mtsr(SYSREG_##reg, value)
+
+#endif /* __ASM_AVR32_SYSREG_H */
diff --git a/arch/avr32/include/asm/termios.h b/arch/avr32/include/asm/termios.h
new file mode 100644
index 000000000..9d594376d
--- /dev/null
+++ b/arch/avr32/include/asm/termios.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_AVR32_TERMIOS_H
+#define __ASM_AVR32_TERMIOS_H
+
+#include <uapi/asm/termios.h>
+
+/* intr=^C quit=^\ erase=del kill=^U
+ eof=^D vtime=\0 vmin=\1 sxtc=\0
+ start=^Q stop=^S susp=^Z eol=\0
+ reprint=^R discard=^U werase=^W lnext=^V
+ eol2=\0
+*/
+#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
+
+#include <asm-generic/termios-base.h>
+
+#endif /* __ASM_AVR32_TERMIOS_H */
diff --git a/arch/avr32/include/asm/thread_info.h b/arch/avr32/include/asm/thread_info.h
new file mode 100644
index 000000000..d4d307954
--- /dev/null
+++ b/arch/avr32/include/asm/thread_info.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_AVR32_THREAD_INFO_H
+#define __ASM_AVR32_THREAD_INFO_H
+
+#include <asm/page.h>
+
+#define THREAD_SIZE_ORDER 1
+#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
+
+#ifndef __ASSEMBLY__
+#include <asm/types.h>
+
+struct task_struct;
+
+struct thread_info {
+ struct task_struct *task; /* main task structure */
+ unsigned long flags; /* low level flags */
+ __u32 cpu;
+ __s32 preempt_count; /* 0 => preemptable, <0 => BUG */
+ __u32 rar_saved; /* return address... */
+ __u32 rsr_saved; /* ...and status register
+ saved by debug handler
+ when setting up
+ trampoline */
+ __u8 supervisor_stack[0];
+};
+
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .task = &tsk, \
+ .flags = 0, \
+ .cpu = 0, \
+ .preempt_count = INIT_PREEMPT_COUNT, \
+}
+
+#define init_thread_info (init_thread_union.thread_info)
+#define init_stack (init_thread_union.stack)
+
+/*
+ * Get the thread information struct from C.
+ * We do the usual trick and use the lower end of the stack for this
+ */
+static inline struct thread_info *current_thread_info(void)
+{
+ unsigned long addr = ~(THREAD_SIZE - 1);
+
+ asm("and %0, sp" : "=r"(addr) : "0"(addr));
+ return (struct thread_info *)addr;
+}
+
+#define get_thread_info(ti) get_task_struct((ti)->task)
+#define put_thread_info(ti) put_task_struct((ti)->task)
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * Thread information flags
+ * - these are process state flags that various assembly files may need to access
+ * - pending work-to-be-done flags are in LSW
+ * - other flags in MSW
+ */
+#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
+#define TIF_SIGPENDING 1 /* signal pending */
+#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
+#define TIF_BREAKPOINT 4 /* enter monitor mode on return */
+#define TIF_SINGLE_STEP 5 /* single step in progress */
+#define TIF_MEMDIE 6 /* is terminating due to OOM killer */
+#define TIF_RESTORE_SIGMASK 7 /* restore signal mask in do_signal */
+#define TIF_CPU_GOING_TO_SLEEP 8 /* CPU is entering sleep 0 mode */
+#define TIF_NOTIFY_RESUME 9 /* callback before returning to user */
+#define TIF_DEBUG 30 /* debugging enabled */
+#define TIF_USERSPACE 31 /* true if FS sets userspace */
+
+#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
+#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+#define _TIF_BREAKPOINT (1 << TIF_BREAKPOINT)
+#define _TIF_SINGLE_STEP (1 << TIF_SINGLE_STEP)
+#define _TIF_MEMDIE (1 << TIF_MEMDIE)
+#define _TIF_CPU_GOING_TO_SLEEP (1 << TIF_CPU_GOING_TO_SLEEP)
+#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
+
+/* Note: The masks below must never span more than 16 bits! */
+
+/* work to do on interrupt/exception return */
+#define _TIF_WORK_MASK \
+ (_TIF_SIGPENDING \
+ | _TIF_NOTIFY_RESUME \
+ | _TIF_NEED_RESCHED \
+ | _TIF_BREAKPOINT)
+
+/* work to do on any return to userspace */
+#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_SYSCALL_TRACE)
+/* work to do on return from debug mode */
+#define _TIF_DBGWORK_MASK (_TIF_WORK_MASK & ~_TIF_BREAKPOINT)
+
+#endif /* __ASM_AVR32_THREAD_INFO_H */
diff --git a/arch/avr32/include/asm/timex.h b/arch/avr32/include/asm/timex.h
new file mode 100644
index 000000000..187dcf38b
--- /dev/null
+++ b/arch/avr32/include/asm/timex.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_AVR32_TIMEX_H
+#define __ASM_AVR32_TIMEX_H
+
+/*
+ * This is the frequency of the timer used for Linux's timer interrupt.
+ * The value should be defined as accurate as possible or under certain
+ * circumstances Linux timekeeping might become inaccurate or fail.
+ *
+ * For many system the exact clockrate of the timer isn't known but due to
+ * the way this value is used we can get away with a wrong value as long
+ * as this value is:
+ *
+ * - a multiple of HZ
+ * - a divisor of the actual rate
+ *
+ * 500000 is a good such cheat value.
+ *
+ * The obscure number 1193182 is the same as used by the original i8254
+ * time in legacy PC hardware; the chip is never found in AVR32 systems.
+ */
+#define CLOCK_TICK_RATE 500000 /* Underlying HZ */
+
+typedef unsigned long cycles_t;
+
+static inline cycles_t get_cycles (void)
+{
+ return 0;
+}
+
+#define ARCH_HAS_READ_CURRENT_TIMER
+
+#endif /* __ASM_AVR32_TIMEX_H */
diff --git a/arch/avr32/include/asm/tlb.h b/arch/avr32/include/asm/tlb.h
new file mode 100644
index 000000000..5c55f9ce7
--- /dev/null
+++ b/arch/avr32/include/asm/tlb.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_AVR32_TLB_H
+#define __ASM_AVR32_TLB_H
+
+#define tlb_start_vma(tlb, vma) \
+ flush_cache_range(vma, vma->vm_start, vma->vm_end)
+
+#define tlb_end_vma(tlb, vma) \
+ flush_tlb_range(vma, vma->vm_start, vma->vm_end)
+
+#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while(0)
+
+/*
+ * Flush whole TLB for MM
+ */
+#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
+
+#include <asm-generic/tlb.h>
+
+/*
+ * For debugging purposes
+ */
+extern void show_dtlb_entry(unsigned int index);
+extern void dump_dtlb(void);
+
+#endif /* __ASM_AVR32_TLB_H */
diff --git a/arch/avr32/include/asm/tlbflush.h b/arch/avr32/include/asm/tlbflush.h
new file mode 100644
index 000000000..bf90a786f
--- /dev/null
+++ b/arch/avr32/include/asm/tlbflush.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_AVR32_TLBFLUSH_H
+#define __ASM_AVR32_TLBFLUSH_H
+
+#include <asm/mmu.h>
+
+/*
+ * TLB flushing:
+ *
+ * - flush_tlb() flushes the current mm struct TLBs
+ * - flush_tlb_all() flushes all processes' TLB entries
+ * - flush_tlb_mm(mm) flushes the specified mm context TLBs
+ * - flush_tlb_page(vma, vmaddr) flushes one page
+ * - flush_tlb_range(vma, start, end) flushes a range of pages
+ * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
+ */
+extern void flush_tlb(void);
+extern void flush_tlb_all(void);
+extern void flush_tlb_mm(struct mm_struct *mm);
+extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
+
+extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+
+#endif /* __ASM_AVR32_TLBFLUSH_H */
diff --git a/arch/avr32/include/asm/traps.h b/arch/avr32/include/asm/traps.h
new file mode 100644
index 000000000..6a8fb944f
--- /dev/null
+++ b/arch/avr32/include/asm/traps.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_AVR32_TRAPS_H
+#define __ASM_AVR32_TRAPS_H
+
+#include <linux/list.h>
+
+struct undef_hook {
+ struct list_head node;
+ u32 insn_mask;
+ u32 insn_val;
+ int (*fn)(struct pt_regs *regs, u32 insn);
+};
+
+void register_undef_hook(struct undef_hook *hook);
+void unregister_undef_hook(struct undef_hook *hook);
+
+#endif /* __ASM_AVR32_TRAPS_H */
diff --git a/arch/avr32/include/asm/types.h b/arch/avr32/include/asm/types.h
new file mode 100644
index 000000000..593240580
--- /dev/null
+++ b/arch/avr32/include/asm/types.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_AVR32_TYPES_H
+#define __ASM_AVR32_TYPES_H
+
+#include <uapi/asm/types.h>
+
+/*
+ * These aren't exported outside the kernel to avoid name space clashes
+ */
+
+#define BITS_PER_LONG 32
+
+#endif /* __ASM_AVR32_TYPES_H */
diff --git a/arch/avr32/include/asm/uaccess.h b/arch/avr32/include/asm/uaccess.h
new file mode 100644
index 000000000..a46f7cf3e
--- /dev/null
+++ b/arch/avr32/include/asm/uaccess.h
@@ -0,0 +1,324 @@
+/*
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_AVR32_UACCESS_H
+#define __ASM_AVR32_UACCESS_H
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+
+#define VERIFY_READ 0
+#define VERIFY_WRITE 1
+
+typedef struct {
+ unsigned int is_user_space;
+} mm_segment_t;
+
+/*
+ * The fs value determines whether argument validity checking should be
+ * performed or not. If get_fs() == USER_DS, checking is performed, with
+ * get_fs() == KERNEL_DS, checking is bypassed.
+ *
+ * For historical reasons (Data Segment Register?), these macros are misnamed.
+ */
+#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
+#define segment_eq(a, b) ((a).is_user_space == (b).is_user_space)
+
+#define USER_ADDR_LIMIT 0x80000000
+
+#define KERNEL_DS MAKE_MM_SEG(0)
+#define USER_DS MAKE_MM_SEG(1)
+
+#define get_ds() (KERNEL_DS)
+
+static inline mm_segment_t get_fs(void)
+{
+ return MAKE_MM_SEG(test_thread_flag(TIF_USERSPACE));
+}
+
+static inline void set_fs(mm_segment_t s)
+{
+ if (s.is_user_space)
+ set_thread_flag(TIF_USERSPACE);
+ else
+ clear_thread_flag(TIF_USERSPACE);
+}
+
+/*
+ * Test whether a block of memory is a valid user space address.
+ * Returns 0 if the range is valid, nonzero otherwise.
+ *
+ * We do the following checks:
+ * 1. Is the access from kernel space?
+ * 2. Does (addr + size) set the carry bit?
+ * 3. Is (addr + size) a negative number (i.e. >= 0x80000000)?
+ *
+ * If yes on the first check, access is granted.
+ * If no on any of the others, access is denied.
+ */
+#define __range_ok(addr, size) \
+ (test_thread_flag(TIF_USERSPACE) \
+ && (((unsigned long)(addr) >= 0x80000000) \
+ || ((unsigned long)(size) > 0x80000000) \
+ || (((unsigned long)(addr) + (unsigned long)(size)) > 0x80000000)))
+
+#define access_ok(type, addr, size) (likely(__range_ok(addr, size) == 0))
+
+/* Generic arbitrary sized copy. Return the number of bytes NOT copied */
+extern __kernel_size_t __copy_user(void *to, const void *from,
+ __kernel_size_t n);
+
+extern __kernel_size_t copy_to_user(void __user *to, const void *from,
+ __kernel_size_t n);
+extern __kernel_size_t copy_from_user(void *to, const void __user *from,
+ __kernel_size_t n);
+
+static inline __kernel_size_t __copy_to_user(void __user *to, const void *from,
+ __kernel_size_t n)
+{
+ return __copy_user((void __force *)to, from, n);
+}
+static inline __kernel_size_t __copy_from_user(void *to,
+ const void __user *from,
+ __kernel_size_t n)
+{
+ return __copy_user(to, (const void __force *)from, n);
+}
+
+#define __copy_to_user_inatomic __copy_to_user
+#define __copy_from_user_inatomic __copy_from_user
+
+/*
+ * put_user: - Write a simple value into user space.
+ * @x: Value to copy to user space.
+ * @ptr: Destination address, in user space.
+ *
+ * Context: User context only. This function may sleep.
+ *
+ * This macro copies a single simple value from kernel space to user
+ * space. It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and @x must be assignable
+ * to the result of dereferencing @ptr.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ */
+#define put_user(x, ptr) \
+ __put_user_check((x), (ptr), sizeof(*(ptr)))
+
+/*
+ * get_user: - Get a simple variable from user space.
+ * @x: Variable to store result.
+ * @ptr: Source address, in user space.
+ *
+ * Context: User context only. This function may sleep.
+ *
+ * This macro copies a single simple variable from user space to kernel
+ * space. It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and the result of
+ * dereferencing @ptr must be assignable to @x without a cast.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ * On error, the variable @x is set to zero.
+ */
+#define get_user(x, ptr) \
+ __get_user_check((x), (ptr), sizeof(*(ptr)))
+
+/*
+ * __put_user: - Write a simple value into user space, with less checking.
+ * @x: Value to copy to user space.
+ * @ptr: Destination address, in user space.
+ *
+ * Context: User context only. This function may sleep.
+ *
+ * This macro copies a single simple value from kernel space to user
+ * space. It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and @x must be assignable
+ * to the result of dereferencing @ptr.
+ *
+ * Caller must check the pointer with access_ok() before calling this
+ * function.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ */
+#define __put_user(x, ptr) \
+ __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
+
+/*
+ * __get_user: - Get a simple variable from user space, with less checking.
+ * @x: Variable to store result.
+ * @ptr: Source address, in user space.
+ *
+ * Context: User context only. This function may sleep.
+ *
+ * This macro copies a single simple variable from user space to kernel
+ * space. It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and the result of
+ * dereferencing @ptr must be assignable to @x without a cast.
+ *
+ * Caller must check the pointer with access_ok() before calling this
+ * function.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ * On error, the variable @x is set to zero.
+ */
+#define __get_user(x, ptr) \
+ __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
+
+extern int __get_user_bad(void);
+extern int __put_user_bad(void);
+
+#define __get_user_nocheck(x, ptr, size) \
+({ \
+ unsigned long __gu_val = 0; \
+ int __gu_err = 0; \
+ \
+ switch (size) { \
+ case 1: __get_user_asm("ub", __gu_val, ptr, __gu_err); break; \
+ case 2: __get_user_asm("uh", __gu_val, ptr, __gu_err); break; \
+ case 4: __get_user_asm("w", __gu_val, ptr, __gu_err); break; \
+ default: __gu_err = __get_user_bad(); break; \
+ } \
+ \
+ x = (__force typeof(*(ptr)))__gu_val; \
+ __gu_err; \
+})
+
+#define __get_user_check(x, ptr, size) \
+({ \
+ unsigned long __gu_val = 0; \
+ const typeof(*(ptr)) __user * __gu_addr = (ptr); \
+ int __gu_err = 0; \
+ \
+ if (access_ok(VERIFY_READ, __gu_addr, size)) { \
+ switch (size) { \
+ case 1: \
+ __get_user_asm("ub", __gu_val, __gu_addr, \
+ __gu_err); \
+ break; \
+ case 2: \
+ __get_user_asm("uh", __gu_val, __gu_addr, \
+ __gu_err); \
+ break; \
+ case 4: \
+ __get_user_asm("w", __gu_val, __gu_addr, \
+ __gu_err); \
+ break; \
+ default: \
+ __gu_err = __get_user_bad(); \
+ break; \
+ } \
+ } else { \
+ __gu_err = -EFAULT; \
+ } \
+ x = (__force typeof(*(ptr)))__gu_val; \
+ __gu_err; \
+})
+
+#define __get_user_asm(suffix, __gu_val, ptr, __gu_err) \
+ asm volatile( \
+ "1: ld." suffix " %1, %3 \n" \
+ "2: \n" \
+ " .subsection 1 \n" \
+ "3: mov %0, %4 \n" \
+ " rjmp 2b \n" \
+ " .subsection 0 \n" \
+ " .section __ex_table, \"a\" \n" \
+ " .long 1b, 3b \n" \
+ " .previous \n" \
+ : "=r"(__gu_err), "=r"(__gu_val) \
+ : "0"(__gu_err), "m"(*(ptr)), "i"(-EFAULT))
+
+#define __put_user_nocheck(x, ptr, size) \
+({ \
+ typeof(*(ptr)) __pu_val; \
+ int __pu_err = 0; \
+ \
+ __pu_val = (x); \
+ switch (size) { \
+ case 1: __put_user_asm("b", ptr, __pu_val, __pu_err); break; \
+ case 2: __put_user_asm("h", ptr, __pu_val, __pu_err); break; \
+ case 4: __put_user_asm("w", ptr, __pu_val, __pu_err); break; \
+ case 8: __put_user_asm("d", ptr, __pu_val, __pu_err); break; \
+ default: __pu_err = __put_user_bad(); break; \
+ } \
+ __pu_err; \
+})
+
+#define __put_user_check(x, ptr, size) \
+({ \
+ typeof(*(ptr)) __pu_val; \
+ typeof(*(ptr)) __user *__pu_addr = (ptr); \
+ int __pu_err = 0; \
+ \
+ __pu_val = (x); \
+ if (access_ok(VERIFY_WRITE, __pu_addr, size)) { \
+ switch (size) { \
+ case 1: \
+ __put_user_asm("b", __pu_addr, __pu_val, \
+ __pu_err); \
+ break; \
+ case 2: \
+ __put_user_asm("h", __pu_addr, __pu_val, \
+ __pu_err); \
+ break; \
+ case 4: \
+ __put_user_asm("w", __pu_addr, __pu_val, \
+ __pu_err); \
+ break; \
+ case 8: \
+ __put_user_asm("d", __pu_addr, __pu_val, \
+ __pu_err); \
+ break; \
+ default: \
+ __pu_err = __put_user_bad(); \
+ break; \
+ } \
+ } else { \
+ __pu_err = -EFAULT; \
+ } \
+ __pu_err; \
+})
+
+#define __put_user_asm(suffix, ptr, __pu_val, __gu_err) \
+ asm volatile( \
+ "1: st." suffix " %1, %3 \n" \
+ "2: \n" \
+ " .subsection 1 \n" \
+ "3: mov %0, %4 \n" \
+ " rjmp 2b \n" \
+ " .subsection 0 \n" \
+ " .section __ex_table, \"a\" \n" \
+ " .long 1b, 3b \n" \
+ " .previous \n" \
+ : "=r"(__gu_err), "=m"(*(ptr)) \
+ : "0"(__gu_err), "r"(__pu_val), "i"(-EFAULT))
+
+extern __kernel_size_t clear_user(void __user *addr, __kernel_size_t size);
+extern __kernel_size_t __clear_user(void __user *addr, __kernel_size_t size);
+
+extern long strncpy_from_user(char *dst, const char __user *src, long count);
+extern long __strncpy_from_user(char *dst, const char __user *src, long count);
+
+extern long strnlen_user(const char __user *__s, long __n);
+extern long __strnlen_user(const char __user *__s, long __n);
+
+#define strlen_user(s) strnlen_user(s, ~0UL >> 1)
+
+struct exception_table_entry
+{
+ unsigned long insn, fixup;
+};
+
+#endif /* __ASM_AVR32_UACCESS_H */
diff --git a/arch/avr32/include/asm/ucontext.h b/arch/avr32/include/asm/ucontext.h
new file mode 100644
index 000000000..ac7259c2a
--- /dev/null
+++ b/arch/avr32/include/asm/ucontext.h
@@ -0,0 +1,12 @@
+#ifndef __ASM_AVR32_UCONTEXT_H
+#define __ASM_AVR32_UCONTEXT_H
+
+struct ucontext {
+ unsigned long uc_flags;
+ struct ucontext * uc_link;
+ stack_t uc_stack;
+ struct sigcontext uc_mcontext;
+ sigset_t uc_sigmask;
+};
+
+#endif /* __ASM_AVR32_UCONTEXT_H */
diff --git a/arch/avr32/include/asm/unaligned.h b/arch/avr32/include/asm/unaligned.h
new file mode 100644
index 000000000..041877290
--- /dev/null
+++ b/arch/avr32/include/asm/unaligned.h
@@ -0,0 +1,21 @@
+#ifndef _ASM_AVR32_UNALIGNED_H
+#define _ASM_AVR32_UNALIGNED_H
+
+/*
+ * AVR32 can handle some unaligned accesses, depending on the
+ * implementation. The AVR32 AP implementation can handle unaligned
+ * words, but halfwords must be halfword-aligned, and doublewords must
+ * be word-aligned.
+ *
+ * However, swapped word loads must be word-aligned so we can't
+ * optimize word loads in general.
+ */
+
+#include <linux/unaligned/be_struct.h>
+#include <linux/unaligned/le_byteshift.h>
+#include <linux/unaligned/generic.h>
+
+#define get_unaligned __get_unaligned_be
+#define put_unaligned __put_unaligned_be
+
+#endif /* _ASM_AVR32_UNALIGNED_H */
diff --git a/arch/avr32/include/asm/unistd.h b/arch/avr32/include/asm/unistd.h
new file mode 100644
index 000000000..2011bee3f
--- /dev/null
+++ b/arch/avr32/include/asm/unistd.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_AVR32_UNISTD_H
+#define __ASM_AVR32_UNISTD_H
+
+#include <uapi/asm/unistd.h>
+
+#define NR_syscalls 321
+
+/* Old stuff */
+#define __IGNORE_uselib
+#define __IGNORE_mmap
+
+/* NUMA stuff */
+#define __IGNORE_mbind
+#define __IGNORE_get_mempolicy
+#define __IGNORE_set_mempolicy
+#define __IGNORE_migrate_pages
+#define __IGNORE_move_pages
+
+/* SMP stuff */
+#define __IGNORE_getcpu
+
+#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_SYS_ALARM
+#define __ARCH_WANT_SYS_GETHOSTNAME
+#define __ARCH_WANT_SYS_PAUSE
+#define __ARCH_WANT_SYS_TIME
+#define __ARCH_WANT_SYS_UTIME
+#define __ARCH_WANT_SYS_WAITPID
+#define __ARCH_WANT_SYS_FADVISE64
+#define __ARCH_WANT_SYS_GETPGRP
+#define __ARCH_WANT_SYS_LLSEEK
+#define __ARCH_WANT_SYS_GETPGRP
+#define __ARCH_WANT_SYS_FORK
+#define __ARCH_WANT_SYS_VFORK
+#define __ARCH_WANT_SYS_CLONE
+
+#endif /* __ASM_AVR32_UNISTD_H */
diff --git a/arch/avr32/include/asm/user.h b/arch/avr32/include/asm/user.h
new file mode 100644
index 000000000..7e9152f81
--- /dev/null
+++ b/arch/avr32/include/asm/user.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Note: We may not need these definitions for AVR32, as we don't
+ * support a.out.
+ */
+#ifndef __ASM_AVR32_USER_H
+#define __ASM_AVR32_USER_H
+
+#include <linux/types.h>
+#include <asm/ptrace.h>
+#include <asm/page.h>
+
+/*
+ * Core file format: The core file is written in such a way that gdb
+ * can understand it and provide useful information to the user (under
+ * linux we use the `trad-core' bfd). The file contents are as follows:
+ *
+ * upage: 1 page consisting of a user struct that tells gdb
+ * what is present in the file. Directly after this is a
+ * copy of the task_struct, which is currently not used by gdb,
+ * but it may come in handy at some point. All of the registers
+ * are stored as part of the upage. The upage should always be
+ * only one page long.
+ * data: The data segment follows next. We use current->end_text to
+ * current->brk to pick up all of the user variables, plus any memory
+ * that may have been sbrk'ed. No attempt is made to determine if a
+ * page is demand-zero or if a page is totally unused, we just cover
+ * the entire range. All of the addresses are rounded in such a way
+ * that an integral number of pages is written.
+ * stack: We need the stack information in order to get a meaningful
+ * backtrace. We need to write the data from usp to
+ * current->start_stack, so we round each of these in order to be able
+ * to write an integer number of pages.
+ */
+
+struct user_fpu_struct {
+ /* We have no FPU (yet) */
+};
+
+struct user {
+ struct pt_regs regs; /* entire machine state */
+ size_t u_tsize; /* text size (pages) */
+ size_t u_dsize; /* data size (pages) */
+ size_t u_ssize; /* stack size (pages) */
+ unsigned long start_code; /* text starting address */
+ unsigned long start_data; /* data starting address */
+ unsigned long start_stack; /* stack starting address */
+ long int signal; /* signal causing core dump */
+ unsigned long u_ar0; /* help gdb find registers */
+ unsigned long magic; /* identifies a core file */
+ char u_comm[32]; /* user command name */
+};
+
+#define NBPG PAGE_SIZE
+#define UPAGES 1
+#define HOST_TEXT_START_ADDR (u.start_code)
+#define HOST_DATA_START_ADDR (u.start_data)
+#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
+
+#endif /* __ASM_AVR32_USER_H */