summaryrefslogtreecommitdiff
path: root/arch/arc/include
diff options
context:
space:
mode:
authorAndré Fabian Silva Delgado <emulatorman@parabola.nu>2015-08-05 17:04:01 -0300
committerAndré Fabian Silva Delgado <emulatorman@parabola.nu>2015-08-05 17:04:01 -0300
commit57f0f512b273f60d52568b8c6b77e17f5636edc0 (patch)
tree5e910f0e82173f4ef4f51111366a3f1299037a7b /arch/arc/include
Initial import
Diffstat (limited to 'arch/arc/include')
-rw-r--r--arch/arc/include/asm/Kbuild52
-rw-r--r--arch/arc/include/asm/arcregs.h341
-rw-r--r--arch/arc/include/asm/asm-offsets.h9
-rw-r--r--arch/arc/include/asm/atomic.h191
-rw-r--r--arch/arc/include/asm/bitops.h533
-rw-r--r--arch/arc/include/asm/bug.h35
-rw-r--r--arch/arc/include/asm/cache.h85
-rw-r--r--arch/arc/include/asm/cacheflush.h120
-rw-r--r--arch/arc/include/asm/checksum.h101
-rw-r--r--arch/arc/include/asm/clk.h22
-rw-r--r--arch/arc/include/asm/cmpxchg.h161
-rw-r--r--arch/arc/include/asm/current.h28
-rw-r--r--arch/arc/include/asm/delay.h67
-rw-r--r--arch/arc/include/asm/disasm.h116
-rw-r--r--arch/arc/include/asm/dma-mapping.h221
-rw-r--r--arch/arc/include/asm/dma.h14
-rw-r--r--arch/arc/include/asm/elf.h75
-rw-r--r--arch/arc/include/asm/entry.h648
-rw-r--r--arch/arc/include/asm/exec.h15
-rw-r--r--arch/arc/include/asm/futex.h151
-rw-r--r--arch/arc/include/asm/io.h107
-rw-r--r--arch/arc/include/asm/irq.h28
-rw-r--r--arch/arc/include/asm/irqflags.h179
-rw-r--r--arch/arc/include/asm/kdebug.h19
-rw-r--r--arch/arc/include/asm/kgdb.h63
-rw-r--r--arch/arc/include/asm/kprobes.h60
-rw-r--r--arch/arc/include/asm/linkage.h51
-rw-r--r--arch/arc/include/asm/mach_desc.h76
-rw-r--r--arch/arc/include/asm/mmu.h66
-rw-r--r--arch/arc/include/asm/mmu_context.h177
-rw-r--r--arch/arc/include/asm/module.h28
-rw-r--r--arch/arc/include/asm/mutex.h18
-rw-r--r--arch/arc/include/asm/page.h109
-rw-r--r--arch/arc/include/asm/perf_event.h217
-rw-r--r--arch/arc/include/asm/pgalloc.h139
-rw-r--r--arch/arc/include/asm/pgtable.h388
-rw-r--r--arch/arc/include/asm/processor.h135
-rw-r--r--arch/arc/include/asm/ptrace.h107
-rw-r--r--arch/arc/include/asm/sections.h16
-rw-r--r--arch/arc/include/asm/segment.h24
-rw-r--r--arch/arc/include/asm/serial.h22
-rw-r--r--arch/arc/include/asm/setup.h36
-rw-r--r--arch/arc/include/asm/shmparam.h18
-rw-r--r--arch/arc/include/asm/smp.h138
-rw-r--r--arch/arc/include/asm/spinlock.h183
-rw-r--r--arch/arc/include/asm/spinlock_types.h35
-rw-r--r--arch/arc/include/asm/stacktrace.h37
-rw-r--r--arch/arc/include/asm/string.h37
-rw-r--r--arch/arc/include/asm/switch_to.h41
-rw-r--r--arch/arc/include/asm/syscall.h71
-rw-r--r--arch/arc/include/asm/syscalls.h23
-rw-r--r--arch/arc/include/asm/thread_info.h109
-rw-r--r--arch/arc/include/asm/timex.h18
-rw-r--r--arch/arc/include/asm/tlb-mmu1.h104
-rw-r--r--arch/arc/include/asm/tlb.h47
-rw-r--r--arch/arc/include/asm/tlbflush.h35
-rw-r--r--arch/arc/include/asm/uaccess.h751
-rw-r--r--arch/arc/include/asm/unaligned.h30
-rw-r--r--arch/arc/include/asm/unwind.h163
-rw-r--r--arch/arc/include/uapi/asm/Kbuild5
-rw-r--r--arch/arc/include/uapi/asm/byteorder.h18
-rw-r--r--arch/arc/include/uapi/asm/cachectl.h28
-rw-r--r--arch/arc/include/uapi/asm/elf.h26
-rw-r--r--arch/arc/include/uapi/asm/page.h39
-rw-r--r--arch/arc/include/uapi/asm/ptrace.h52
-rw-r--r--arch/arc/include/uapi/asm/setup.h6
-rw-r--r--arch/arc/include/uapi/asm/sigcontext.h22
-rw-r--r--arch/arc/include/uapi/asm/signal.h27
-rw-r--r--arch/arc/include/uapi/asm/swab.h98
-rw-r--r--arch/arc/include/uapi/asm/unistd.h45
70 files changed, 7256 insertions, 0 deletions
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
new file mode 100644
index 000000000..be0c39e76
--- /dev/null
+++ b/arch/arc/include/asm/Kbuild
@@ -0,0 +1,52 @@
+generic-y += auxvec.h
+generic-y += barrier.h
+generic-y += bitsperlong.h
+generic-y += bugs.h
+generic-y += clkdev.h
+generic-y += cputime.h
+generic-y += device.h
+generic-y += div64.h
+generic-y += emergency-restart.h
+generic-y += errno.h
+generic-y += fb.h
+generic-y += fcntl.h
+generic-y += ftrace.h
+generic-y += hardirq.h
+generic-y += hw_irq.h
+generic-y += ioctl.h
+generic-y += ioctls.h
+generic-y += ipcbuf.h
+generic-y += irq_regs.h
+generic-y += irq_work.h
+generic-y += kmap_types.h
+generic-y += kvm_para.h
+generic-y += local.h
+generic-y += local64.h
+generic-y += mcs_spinlock.h
+generic-y += mman.h
+generic-y += msgbuf.h
+generic-y += param.h
+generic-y += parport.h
+generic-y += pci.h
+generic-y += percpu.h
+generic-y += poll.h
+generic-y += posix_types.h
+generic-y += preempt.h
+generic-y += resource.h
+generic-y += scatterlist.h
+generic-y += sembuf.h
+generic-y += shmbuf.h
+generic-y += siginfo.h
+generic-y += socket.h
+generic-y += sockios.h
+generic-y += stat.h
+generic-y += statfs.h
+generic-y += termbits.h
+generic-y += termios.h
+generic-y += topology.h
+generic-y += trace_clock.h
+generic-y += types.h
+generic-y += ucontext.h
+generic-y += user.h
+generic-y += vga.h
+generic-y += xor.h
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
new file mode 100644
index 000000000..e2b1b1211
--- /dev/null
+++ b/arch/arc/include/asm/arcregs.h
@@ -0,0 +1,341 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_ARCREGS_H
+#define _ASM_ARC_ARCREGS_H
+
+/* Build Configuration Registers */
+#define ARC_REG_DCCMBASE_BCR 0x61 /* DCCM Base Addr */
+#define ARC_REG_CRC_BCR 0x62
+#define ARC_REG_VECBASE_BCR 0x68
+#define ARC_REG_PERIBASE_BCR 0x69
+#define ARC_REG_FP_BCR 0x6B /* ARCompact: Single-Precision FPU */
+#define ARC_REG_DPFP_BCR 0x6C /* ARCompact: Dbl Precision FPU */
+#define ARC_REG_DCCM_BCR 0x74 /* DCCM Present + SZ */
+#define ARC_REG_TIMERS_BCR 0x75
+#define ARC_REG_AP_BCR 0x76
+#define ARC_REG_ICCM_BCR 0x78
+#define ARC_REG_XY_MEM_BCR 0x79
+#define ARC_REG_MAC_BCR 0x7a
+#define ARC_REG_MUL_BCR 0x7b
+#define ARC_REG_SWAP_BCR 0x7c
+#define ARC_REG_NORM_BCR 0x7d
+#define ARC_REG_MIXMAX_BCR 0x7e
+#define ARC_REG_BARREL_BCR 0x7f
+#define ARC_REG_D_UNCACH_BCR 0x6A
+#define ARC_REG_BPU_BCR 0xc0
+#define ARC_REG_ISA_CFG_BCR 0xc1
+#define ARC_REG_RTT_BCR 0xF2
+#define ARC_REG_SMART_BCR 0xFF
+
+/* status32 Bits Positions */
+#define STATUS_AE_BIT 5 /* Exception active */
+#define STATUS_DE_BIT 6 /* PC is in delay slot */
+#define STATUS_U_BIT 7 /* User/Kernel mode */
+#define STATUS_L_BIT 12 /* Loop inhibit */
+
+/* These masks correspond to the status word(STATUS_32) bits */
+#define STATUS_AE_MASK (1<<STATUS_AE_BIT)
+#define STATUS_DE_MASK (1<<STATUS_DE_BIT)
+#define STATUS_U_MASK (1<<STATUS_U_BIT)
+#define STATUS_L_MASK (1<<STATUS_L_BIT)
+
+/*
+ * ECR: Exception Cause Reg bits-n-pieces
+ * [23:16] = Exception Vector
+ * [15: 8] = Exception Cause Code
+ * [ 7: 0] = Exception Parameters (for certain types only)
+ */
+#define ECR_V_MEM_ERR 0x01
+#define ECR_V_INSN_ERR 0x02
+#define ECR_V_MACH_CHK 0x20
+#define ECR_V_ITLB_MISS 0x21
+#define ECR_V_DTLB_MISS 0x22
+#define ECR_V_PROTV 0x23
+#define ECR_V_TRAP 0x25
+
+/* DTLB Miss and Protection Violation Cause Codes */
+
+#define ECR_C_PROTV_INST_FETCH 0x00
+#define ECR_C_PROTV_LOAD 0x01
+#define ECR_C_PROTV_STORE 0x02
+#define ECR_C_PROTV_XCHG 0x03
+#define ECR_C_PROTV_MISALIG_DATA 0x04
+
+#define ECR_C_BIT_PROTV_MISALIG_DATA 10
+
+/* Machine Check Cause Code Values */
+#define ECR_C_MCHK_DUP_TLB 0x01
+
+/* DTLB Miss Exception Cause Code Values */
+#define ECR_C_BIT_DTLB_LD_MISS 8
+#define ECR_C_BIT_DTLB_ST_MISS 9
+
+/* Dummy ECR values for Interrupts */
+#define event_IRQ1 0x0031abcd
+#define event_IRQ2 0x0032abcd
+
+/* Auxiliary registers */
+#define AUX_IDENTITY 4
+#define AUX_INTR_VEC_BASE 0x25
+
+
+/*
+ * Floating Pt Registers
+ * Status regs are read-only (build-time) so need not be saved/restored
+ */
+#define ARC_AUX_FP_STAT 0x300
+#define ARC_AUX_DPFP_1L 0x301
+#define ARC_AUX_DPFP_1H 0x302
+#define ARC_AUX_DPFP_2L 0x303
+#define ARC_AUX_DPFP_2H 0x304
+#define ARC_AUX_DPFP_STAT 0x305
+
+#ifndef __ASSEMBLY__
+
+/*
+ ******************************************************************
+ * Inline ASM macros to read/write AUX Regs
+ * Essentially invocation of lr/sr insns from "C"
+ */
+
+#if 1
+
+#define read_aux_reg(reg) __builtin_arc_lr(reg)
+
+/* gcc builtin sr needs reg param to be long immediate */
+#define write_aux_reg(reg_immed, val) \
+ __builtin_arc_sr((unsigned int)val, reg_immed)
+
+#else
+
+#define read_aux_reg(reg) \
+({ \
+ unsigned int __ret; \
+ __asm__ __volatile__( \
+ " lr %0, [%1]" \
+ : "=r"(__ret) \
+ : "i"(reg)); \
+ __ret; \
+})
+
+/*
+ * Aux Reg address is specified as long immediate by caller
+ * e.g.
+ * write_aux_reg(0x69, some_val);
+ * This generates tightest code.
+ */
+#define write_aux_reg(reg_imm, val) \
+({ \
+ __asm__ __volatile__( \
+ " sr %0, [%1] \n" \
+ : \
+ : "ir"(val), "i"(reg_imm)); \
+})
+
+/*
+ * Aux Reg address is specified in a variable
+ * * e.g.
+ * reg_num = 0x69
+ * write_aux_reg2(reg_num, some_val);
+ * This has to generate glue code to load the reg num from
+ * memory to a reg hence not recommended.
+ */
+#define write_aux_reg2(reg_in_var, val) \
+({ \
+ unsigned int tmp; \
+ \
+ __asm__ __volatile__( \
+ " ld %0, [%2] \n\t" \
+ " sr %1, [%0] \n\t" \
+ : "=&r"(tmp) \
+ : "r"(val), "memory"(&reg_in_var)); \
+})
+
+#endif
+
+#define READ_BCR(reg, into) \
+{ \
+ unsigned int tmp; \
+ tmp = read_aux_reg(reg); \
+ if (sizeof(tmp) == sizeof(into)) { \
+ into = *((typeof(into) *)&tmp); \
+ } else { \
+ extern void bogus_undefined(void); \
+ bogus_undefined(); \
+ } \
+}
+
+#define WRITE_AUX(reg, into) \
+{ \
+ unsigned int tmp; \
+ if (sizeof(tmp) == sizeof(into)) { \
+ tmp = (*(unsigned int *)&(into)); \
+ write_aux_reg(reg, tmp); \
+ } else { \
+ extern void bogus_undefined(void); \
+ bogus_undefined(); \
+ } \
+}
+
+/* Helpers */
+#define TO_KB(bytes) ((bytes) >> 10)
+#define TO_MB(bytes) (TO_KB(bytes) >> 10)
+#define PAGES_TO_KB(n_pages) ((n_pages) << (PAGE_SHIFT - 10))
+#define PAGES_TO_MB(n_pages) (PAGES_TO_KB(n_pages) >> 10)
+
+
+/*
+ ***************************************************************
+ * Build Configuration Registers, with encoded hardware config
+ */
+struct bcr_identity {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int chip_id:16, cpu_id:8, family:8;
+#else
+ unsigned int family:8, cpu_id:8, chip_id:16;
+#endif
+};
+
+struct bcr_isa {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad1:23, atomic1:1, ver:8;
+#else
+ unsigned int ver:8, atomic1:1, pad1:23;
+#endif
+};
+
+struct bcr_mpy {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad:8, x1616:8, dsp:4, cycles:2, type:2, ver:8;
+#else
+ unsigned int ver:8, type:2, cycles:2, dsp:4, x1616:8, pad:8;
+#endif
+};
+
+struct bcr_extn_xymem {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int ram_org:2, num_banks:4, bank_sz:4, ver:8;
+#else
+ unsigned int ver:8, bank_sz:4, num_banks:4, ram_org:2;
+#endif
+};
+
+struct bcr_perip {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int start:8, pad2:8, sz:8, pad:8;
+#else
+ unsigned int pad:8, sz:8, pad2:8, start:8;
+#endif
+};
+
+struct bcr_iccm {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int base:16, pad:5, sz:3, ver:8;
+#else
+ unsigned int ver:8, sz:3, pad:5, base:16;
+#endif
+};
+
+/* DCCM Base Address Register: ARC_REG_DCCMBASE_BCR */
+struct bcr_dccm_base {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int addr:24, ver:8;
+#else
+ unsigned int ver:8, addr:24;
+#endif
+};
+
+/* DCCM RAM Configuration Register: ARC_REG_DCCM_BCR */
+struct bcr_dccm {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int res:21, sz:3, ver:8;
+#else
+ unsigned int ver:8, sz:3, res:21;
+#endif
+};
+
+/* ARCompact: Both SP and DP FPU BCRs have same format */
+struct bcr_fp_arcompact {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int fast:1, ver:8;
+#else
+ unsigned int ver:8, fast:1;
+#endif
+};
+
+struct bcr_timer {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad2:15, rtsc:1, pad1:6, t1:1, t0:1, ver:8;
+#else
+ unsigned int ver:8, t0:1, t1:1, pad1:6, rtsc:1, pad2:15;
+#endif
+};
+
+struct bcr_bpu_arcompact {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad2:19, fam:1, pad:2, ent:2, ver:8;
+#else
+ unsigned int ver:8, ent:2, pad:2, fam:1, pad2:19;
+#endif
+};
+
+struct bcr_generic {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad:24, ver:8;
+#else
+ unsigned int ver:8, pad:24;
+#endif
+};
+
+/*
+ *******************************************************************
+ * Generic structures to hold build configuration used at runtime
+ */
+
+struct cpuinfo_arc_mmu {
+ unsigned int ver, pg_sz, sets, ways, u_dtlb, u_itlb, num_tlb;
+};
+
+struct cpuinfo_arc_cache {
+ unsigned int sz_k:8, line_len:8, assoc:4, ver:4, alias:1, vipt:1, pad:6;
+};
+
+struct cpuinfo_arc_bpu {
+ unsigned int ver, full, num_cache, num_pred;
+};
+
+struct cpuinfo_arc_ccm {
+ unsigned int base_addr, sz;
+};
+
+struct cpuinfo_arc {
+ struct cpuinfo_arc_cache icache, dcache;
+ struct cpuinfo_arc_mmu mmu;
+ struct cpuinfo_arc_bpu bpu;
+ struct bcr_identity core;
+ struct bcr_isa isa;
+ struct bcr_timer timers;
+ unsigned int vec_base;
+ unsigned int uncached_base;
+ struct cpuinfo_arc_ccm iccm, dccm;
+ struct {
+ unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, pad1:3,
+ fpu_sp:1, fpu_dp:1, pad2:6,
+ debug:1, ap:1, smart:1, rtt:1, pad3:4,
+ pad4:8;
+ } extn;
+ struct bcr_mpy extn_mpy;
+ struct bcr_extn_xymem extn_xymem;
+};
+
+extern struct cpuinfo_arc cpuinfo_arc700[];
+
+#endif /* __ASEMBLY__ */
+
+#endif /* _ASM_ARC_ARCREGS_H */
diff --git a/arch/arc/include/asm/asm-offsets.h b/arch/arc/include/asm/asm-offsets.h
new file mode 100644
index 000000000..dad18768f
--- /dev/null
+++ b/arch/arc/include/asm/asm-offsets.h
@@ -0,0 +1,9 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <generated/asm-offsets.h>
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
new file mode 100644
index 000000000..20b7dc179
--- /dev/null
+++ b/arch/arc/include/asm/atomic.h
@@ -0,0 +1,191 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_ATOMIC_H
+#define _ASM_ARC_ATOMIC_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <asm/cmpxchg.h>
+#include <asm/barrier.h>
+#include <asm/smp.h>
+
+#define atomic_read(v) ((v)->counter)
+
+#ifdef CONFIG_ARC_HAS_LLSC
+
+#define atomic_set(v, i) (((v)->counter) = (i))
+
+#define ATOMIC_OP(op, c_op, asm_op) \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ unsigned int temp; \
+ \
+ __asm__ __volatile__( \
+ "1: llock %0, [%1] \n" \
+ " " #asm_op " %0, %0, %2 \n" \
+ " scond %0, [%1] \n" \
+ " bnz 1b \n" \
+ : "=&r"(temp) /* Early clobber, to prevent reg reuse */ \
+ : "r"(&v->counter), "ir"(i) \
+ : "cc"); \
+} \
+
+#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
+static inline int atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ unsigned int temp; \
+ \
+ /* \
+ * Explicit full memory barrier needed before/after as \
+ * LLOCK/SCOND thmeselves don't provide any such semantics \
+ */ \
+ smp_mb(); \
+ \
+ __asm__ __volatile__( \
+ "1: llock %0, [%1] \n" \
+ " " #asm_op " %0, %0, %2 \n" \
+ " scond %0, [%1] \n" \
+ " bnz 1b \n" \
+ : "=&r"(temp) \
+ : "r"(&v->counter), "ir"(i) \
+ : "cc"); \
+ \
+ smp_mb(); \
+ \
+ return temp; \
+}
+
+#else /* !CONFIG_ARC_HAS_LLSC */
+
+#ifndef CONFIG_SMP
+
+ /* violating atomic_xxx API locking protocol in UP for optimization sake */
+#define atomic_set(v, i) (((v)->counter) = (i))
+
+#else
+
+static inline void atomic_set(atomic_t *v, int i)
+{
+ /*
+ * Independent of hardware support, all of the atomic_xxx() APIs need
+ * to follow the same locking rules to make sure that a "hardware"
+ * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
+ * sequence
+ *
+ * Thus atomic_set() despite being 1 insn (and seemingly atomic)
+ * requires the locking.
+ */
+ unsigned long flags;
+
+ atomic_ops_lock(flags);
+ v->counter = i;
+ atomic_ops_unlock(flags);
+}
+
+#endif
+
+/*
+ * Non hardware assisted Atomic-R-M-W
+ * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
+ */
+
+#define ATOMIC_OP(op, c_op, asm_op) \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ unsigned long flags; \
+ \
+ atomic_ops_lock(flags); \
+ v->counter c_op i; \
+ atomic_ops_unlock(flags); \
+}
+
+#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
+static inline int atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ unsigned long flags; \
+ unsigned long temp; \
+ \
+ /* \
+ * spin lock/unlock provides the needed smp_mb() before/after \
+ */ \
+ atomic_ops_lock(flags); \
+ temp = v->counter; \
+ temp c_op i; \
+ v->counter = temp; \
+ atomic_ops_unlock(flags); \
+ \
+ return temp; \
+}
+
+#endif /* !CONFIG_ARC_HAS_LLSC */
+
+#define ATOMIC_OPS(op, c_op, asm_op) \
+ ATOMIC_OP(op, c_op, asm_op) \
+ ATOMIC_OP_RETURN(op, c_op, asm_op)
+
+ATOMIC_OPS(add, +=, add)
+ATOMIC_OPS(sub, -=, sub)
+ATOMIC_OP(and, &=, and)
+
+#define atomic_clear_mask(mask, v) atomic_and(~(mask), (v))
+
+#undef ATOMIC_OPS
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
+
+/**
+ * __atomic_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns the old value of @v
+ */
+#define __atomic_add_unless(v, a, u) \
+({ \
+ int c, old; \
+ \
+ /* \
+ * Explicit full memory barrier needed before/after as \
+ * LLOCK/SCOND thmeselves don't provide any such semantics \
+ */ \
+ smp_mb(); \
+ \
+ c = atomic_read(v); \
+ while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
+ c = old; \
+ \
+ smp_mb(); \
+ \
+ c; \
+})
+
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
+#define atomic_inc(v) atomic_add(1, v)
+#define atomic_dec(v) atomic_sub(1, v)
+
+#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
+#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
+#define atomic_inc_return(v) atomic_add_return(1, (v))
+#define atomic_dec_return(v) atomic_sub_return(1, (v))
+#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
+
+#define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
+
+#define ATOMIC_INIT(i) { (i) }
+
+#include <asm-generic/atomic64.h>
+
+#endif
+
+#endif
diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
new file mode 100644
index 000000000..624a9d048
--- /dev/null
+++ b/arch/arc/include/asm/bitops.h
@@ -0,0 +1,533 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_BITOPS_H
+#define _ASM_BITOPS_H
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <asm/barrier.h>
+
+/*
+ * Hardware assisted read-modify-write using ARC700 LLOCK/SCOND insns.
+ * The Kconfig glue ensures that in SMP, this is only set if the container
+ * SoC/platform has cross-core coherent LLOCK/SCOND
+ */
+#if defined(CONFIG_ARC_HAS_LLSC)
+
+static inline void set_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned int temp;
+
+ m += nr >> 5;
+
+ /*
+ * ARC ISA micro-optimization:
+ *
+ * Instructions dealing with bitpos only consider lower 5 bits (0-31)
+ * e.g (x << 33) is handled like (x << 1) by ASL instruction
+ * (mem pointer still needs adjustment to point to next word)
+ *
+ * Hence the masking to clamp @nr arg can be elided in general.
+ *
+ * However if @nr is a constant (above assumed it in a register),
+ * and greater than 31, gcc can optimize away (x << 33) to 0,
+ * as overflow, given the 32-bit ISA. Thus masking needs to be done
+ * for constant @nr, but no code is generated due to const prop.
+ */
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%1] \n"
+ " bset %0, %0, %2 \n"
+ " scond %0, [%1] \n"
+ " bnz 1b \n"
+ : "=&r"(temp)
+ : "r"(m), "ir"(nr)
+ : "cc");
+}
+
+static inline void clear_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned int temp;
+
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%1] \n"
+ " bclr %0, %0, %2 \n"
+ " scond %0, [%1] \n"
+ " bnz 1b \n"
+ : "=&r"(temp)
+ : "r"(m), "ir"(nr)
+ : "cc");
+}
+
+static inline void change_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned int temp;
+
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%1] \n"
+ " bxor %0, %0, %2 \n"
+ " scond %0, [%1] \n"
+ " bnz 1b \n"
+ : "=&r"(temp)
+ : "r"(m), "ir"(nr)
+ : "cc");
+}
+
+/*
+ * Semantically:
+ * Test the bit
+ * if clear
+ * set it and return 0 (old value)
+ * else
+ * return 1 (old value).
+ *
+ * Since ARC lacks a equivalent h/w primitive, the bit is set unconditionally
+ * and the old value of bit is returned
+ */
+static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long old, temp;
+
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ /*
+ * Explicit full memory barrier needed before/after as
+ * LLOCK/SCOND themselves don't provide any such semantics
+ */
+ smp_mb();
+
+ __asm__ __volatile__(
+ "1: llock %0, [%2] \n"
+ " bset %1, %0, %3 \n"
+ " scond %1, [%2] \n"
+ " bnz 1b \n"
+ : "=&r"(old), "=&r"(temp)
+ : "r"(m), "ir"(nr)
+ : "cc");
+
+ smp_mb();
+
+ return (old & (1 << nr)) != 0;
+}
+
+static inline int
+test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned int old, temp;
+
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ smp_mb();
+
+ __asm__ __volatile__(
+ "1: llock %0, [%2] \n"
+ " bclr %1, %0, %3 \n"
+ " scond %1, [%2] \n"
+ " bnz 1b \n"
+ : "=&r"(old), "=&r"(temp)
+ : "r"(m), "ir"(nr)
+ : "cc");
+
+ smp_mb();
+
+ return (old & (1 << nr)) != 0;
+}
+
+static inline int
+test_and_change_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned int old, temp;
+
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ smp_mb();
+
+ __asm__ __volatile__(
+ "1: llock %0, [%2] \n"
+ " bxor %1, %0, %3 \n"
+ " scond %1, [%2] \n"
+ " bnz 1b \n"
+ : "=&r"(old), "=&r"(temp)
+ : "r"(m), "ir"(nr)
+ : "cc");
+
+ smp_mb();
+
+ return (old & (1 << nr)) != 0;
+}
+
+#else /* !CONFIG_ARC_HAS_LLSC */
+
+#include <asm/smp.h>
+
+/*
+ * Non hardware assisted Atomic-R-M-W
+ * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
+ *
+ * There's "significant" micro-optimization in writing our own variants of
+ * bitops (over generic variants)
+ *
+ * (1) The generic APIs have "signed" @nr while we have it "unsigned"
+ * This avoids extra code to be generated for pointer arithmatic, since
+ * is "not sure" that index is NOT -ve
+ * (2) Utilize the fact that ARCompact bit fidding insn (BSET/BCLR/ASL) etc
+ * only consider bottom 5 bits of @nr, so NO need to mask them off.
+ * (GCC Quirk: however for constant @nr we still need to do the masking
+ * at compile time)
+ */
+
+static inline void set_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long temp, flags;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ bitops_lock(flags);
+
+ temp = *m;
+ *m = temp | (1UL << nr);
+
+ bitops_unlock(flags);
+}
+
+static inline void clear_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long temp, flags;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ bitops_lock(flags);
+
+ temp = *m;
+ *m = temp & ~(1UL << nr);
+
+ bitops_unlock(flags);
+}
+
+static inline void change_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long temp, flags;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ bitops_lock(flags);
+
+ temp = *m;
+ *m = temp ^ (1UL << nr);
+
+ bitops_unlock(flags);
+}
+
+static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long old, flags;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ /*
+ * spin lock/unlock provide the needed smp_mb() before/after
+ */
+ bitops_lock(flags);
+
+ old = *m;
+ *m = old | (1 << nr);
+
+ bitops_unlock(flags);
+
+ return (old & (1 << nr)) != 0;
+}
+
+static inline int
+test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long old, flags;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ bitops_lock(flags);
+
+ old = *m;
+ *m = old & ~(1 << nr);
+
+ bitops_unlock(flags);
+
+ return (old & (1 << nr)) != 0;
+}
+
+static inline int
+test_and_change_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long old, flags;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ bitops_lock(flags);
+
+ old = *m;
+ *m = old ^ (1 << nr);
+
+ bitops_unlock(flags);
+
+ return (old & (1 << nr)) != 0;
+}
+
+#endif /* CONFIG_ARC_HAS_LLSC */
+
+/***************************************
+ * Non atomic variants
+ **************************************/
+
+static inline void __set_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long temp;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ temp = *m;
+ *m = temp | (1UL << nr);
+}
+
+static inline void __clear_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long temp;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ temp = *m;
+ *m = temp & ~(1UL << nr);
+}
+
+static inline void __change_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long temp;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ temp = *m;
+ *m = temp ^ (1UL << nr);
+}
+
+static inline int
+__test_and_set_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long old;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ old = *m;
+ *m = old | (1 << nr);
+
+ return (old & (1 << nr)) != 0;
+}
+
+static inline int
+__test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long old;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ old = *m;
+ *m = old & ~(1 << nr);
+
+ return (old & (1 << nr)) != 0;
+}
+
+static inline int
+__test_and_change_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long old;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ old = *m;
+ *m = old ^ (1 << nr);
+
+ return (old & (1 << nr)) != 0;
+}
+
+/*
+ * This routine doesn't need to be atomic.
+ */
+static inline int
+test_bit(unsigned int nr, const volatile unsigned long *addr)
+{
+ unsigned long mask;
+
+ addr += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ mask = 1 << nr;
+
+ return ((mask & *addr) != 0);
+}
+
+/*
+ * Count the number of zeros, starting from MSB
+ * Helper for fls( ) friends
+ * This is a pure count, so (1-32) or (0-31) doesn't apply
+ * It could be 0 to 32, based on num of 0's in there
+ * clz(0x8000_0000) = 0, clz(0xFFFF_FFFF)=0, clz(0) = 32, clz(1) = 31
+ */
+static inline __attribute__ ((const)) int clz(unsigned int x)
+{
+ unsigned int res;
+
+ __asm__ __volatile__(
+ " norm.f %0, %1 \n"
+ " mov.n %0, 0 \n"
+ " add.p %0, %0, 1 \n"
+ : "=r"(res)
+ : "r"(x)
+ : "cc");
+
+ return res;
+}
+
+static inline int constant_fls(int x)
+{
+ int r = 32;
+
+ if (!x)
+ return 0;
+ if (!(x & 0xffff0000u)) {
+ x <<= 16;
+ r -= 16;
+ }
+ if (!(x & 0xff000000u)) {
+ x <<= 8;
+ r -= 8;
+ }
+ if (!(x & 0xf0000000u)) {
+ x <<= 4;
+ r -= 4;
+ }
+ if (!(x & 0xc0000000u)) {
+ x <<= 2;
+ r -= 2;
+ }
+ if (!(x & 0x80000000u)) {
+ x <<= 1;
+ r -= 1;
+ }
+ return r;
+}
+
+/*
+ * fls = Find Last Set in word
+ * @result: [1-32]
+ * fls(1) = 1, fls(0x80000000) = 32, fls(0) = 0
+ */
+static inline __attribute__ ((const)) int fls(unsigned long x)
+{
+ if (__builtin_constant_p(x))
+ return constant_fls(x);
+
+ return 32 - clz(x);
+}
+
+/*
+ * __fls: Similar to fls, but zero based (0-31)
+ */
+static inline __attribute__ ((const)) int __fls(unsigned long x)
+{
+ if (!x)
+ return 0;
+ else
+ return fls(x) - 1;
+}
+
+/*
+ * ffs = Find First Set in word (LSB to MSB)
+ * @result: [1-32], 0 if all 0's
+ */
+#define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); })
+
+/*
+ * __ffs: Similar to ffs, but zero based (0-31)
+ */
+static inline __attribute__ ((const)) int __ffs(unsigned long word)
+{
+ if (!word)
+ return word;
+
+ return ffs(word) - 1;
+}
+
+/*
+ * ffz = Find First Zero in word.
+ * @return:[0-31], 32 if all 1's
+ */
+#define ffz(x) __ffs(~(x))
+
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/fls64.h>
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/lock.h>
+
+#include <asm-generic/bitops/find.h>
+#include <asm-generic/bitops/le.h>
+#include <asm-generic/bitops/ext2-atomic-setbit.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif
diff --git a/arch/arc/include/asm/bug.h b/arch/arc/include/asm/bug.h
new file mode 100644
index 000000000..ea022d478
--- /dev/null
+++ b/arch/arc/include/asm/bug.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_BUG_H
+#define _ASM_ARC_BUG_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/ptrace.h>
+
+struct task_struct;
+
+void show_regs(struct pt_regs *regs);
+void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs);
+void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
+ unsigned long address);
+void die(const char *str, struct pt_regs *regs, unsigned long address);
+
+#define BUG() do { \
+ pr_warn("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \
+ dump_stack(); \
+} while (0)
+
+#define HAVE_ARCH_BUG
+
+#include <asm-generic/bug.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
new file mode 100644
index 000000000..7861255da
--- /dev/null
+++ b/arch/arc/include/asm/cache.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARC_ASM_CACHE_H
+#define __ARC_ASM_CACHE_H
+
+/* In case $$ not config, setup a dummy number for rest of kernel */
+#ifndef CONFIG_ARC_CACHE_LINE_SHIFT
+#define L1_CACHE_SHIFT 6
+#else
+#define L1_CACHE_SHIFT CONFIG_ARC_CACHE_LINE_SHIFT
+#endif
+
+#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+#define CACHE_LINE_MASK (~(L1_CACHE_BYTES - 1))
+
+/*
+ * ARC700 doesn't cache any access in top 1G (0xc000_0000 to 0xFFFF_FFFF)
+ * Ideal for wiring memory mapped peripherals as we don't need to do
+ * explicit uncached accesses (LD.di/ST.di) hence more portable drivers
+ */
+#define ARC_UNCACHED_ADDR_SPACE 0xc0000000
+
+#ifndef __ASSEMBLY__
+
+/* Uncached access macros */
+#define arc_read_uncached_32(ptr) \
+({ \
+ unsigned int __ret; \
+ __asm__ __volatile__( \
+ " ld.di %0, [%1] \n" \
+ : "=r"(__ret) \
+ : "r"(ptr)); \
+ __ret; \
+})
+
+#define arc_write_uncached_32(ptr, data)\
+({ \
+ __asm__ __volatile__( \
+ " st.di %0, [%1] \n" \
+ : \
+ : "r"(data), "r"(ptr)); \
+})
+
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+
+extern void arc_cache_init(void);
+extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
+extern void read_decode_cache_bcr(void);
+
+#endif /* !__ASSEMBLY__ */
+
+/* Instruction cache related Auxiliary registers */
+#define ARC_REG_IC_BCR 0x77 /* Build Config reg */
+#define ARC_REG_IC_IVIC 0x10
+#define ARC_REG_IC_CTRL 0x11
+#define ARC_REG_IC_IVIL 0x19
+#if defined(CONFIG_ARC_MMU_V3)
+#define ARC_REG_IC_PTAG 0x1E
+#endif
+
+/* Bit val in IC_CTRL */
+#define IC_CTRL_CACHE_DISABLE 0x1
+
+/* Data cache related Auxiliary registers */
+#define ARC_REG_DC_BCR 0x72 /* Build Config reg */
+#define ARC_REG_DC_IVDC 0x47
+#define ARC_REG_DC_CTRL 0x48
+#define ARC_REG_DC_IVDL 0x4A
+#define ARC_REG_DC_FLSH 0x4B
+#define ARC_REG_DC_FLDL 0x4C
+#if defined(CONFIG_ARC_MMU_V3)
+#define ARC_REG_DC_PTAG 0x5C
+#endif
+
+/* Bit val in DC_CTRL */
+#define DC_CTRL_INV_MODE_FLUSH 0x40
+#define DC_CTRL_FLUSH_STATUS 0x100
+
+#endif /* _ASM_CACHE_H */
diff --git a/arch/arc/include/asm/cacheflush.h b/arch/arc/include/asm/cacheflush.h
new file mode 100644
index 000000000..6abc4972b
--- /dev/null
+++ b/arch/arc/include/asm/cacheflush.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: May 2011: for Non-aliasing VIPT D-cache following can be NOPs
+ * -flush_cache_dup_mm (fork)
+ * -likewise for flush_cache_mm (exit/execve)
+ * -likewise for flush_cache_{range,page} (munmap, exit, COW-break)
+ *
+ * vineetg: April 2008
+ * -Added a critical CacheLine flush to copy_to_user_page( ) which
+ * was causing gdbserver to not setup breakpoints consistently
+ */
+
+#ifndef _ASM_CACHEFLUSH_H
+#define _ASM_CACHEFLUSH_H
+
+#include <linux/mm.h>
+#include <asm/shmparam.h>
+
+/*
+ * Semantically we need this because icache doesn't snoop dcache/dma.
+ * However ARC Cache flush requires paddr as well as vaddr, latter not available
+ * in the flush_icache_page() API. So we no-op it but do the equivalent work
+ * in update_mmu_cache()
+ */
+#define flush_icache_page(vma, page)
+
+void flush_cache_all(void);
+
+void flush_icache_range(unsigned long start, unsigned long end);
+void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len);
+void __inv_icache_page(unsigned long paddr, unsigned long vaddr);
+void ___flush_dcache_page(unsigned long paddr, unsigned long vaddr);
+#define __flush_dcache_page(p, v) \
+ ___flush_dcache_page((unsigned long)p, (unsigned long)v)
+
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
+
+void flush_dcache_page(struct page *page);
+
+void dma_cache_wback_inv(unsigned long start, unsigned long sz);
+void dma_cache_inv(unsigned long start, unsigned long sz);
+void dma_cache_wback(unsigned long start, unsigned long sz);
+
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
+
+/* TBD: optimize this */
+#define flush_cache_vmap(start, end) flush_cache_all()
+#define flush_cache_vunmap(start, end) flush_cache_all()
+
+#define flush_cache_dup_mm(mm) /* called on fork (VIVT only) */
+
+#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING
+
+#define flush_cache_mm(mm) /* called on munmap/exit */
+#define flush_cache_range(mm, u_vstart, u_vend)
+#define flush_cache_page(vma, u_vaddr, pfn) /* PF handling/COW-break */
+
+#else /* VIPT aliasing dcache */
+
+/* To clear out stale userspace mappings */
+void flush_cache_mm(struct mm_struct *mm);
+void flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start,unsigned long end);
+void flush_cache_page(struct vm_area_struct *vma,
+ unsigned long user_addr, unsigned long page);
+
+/*
+ * To make sure that userspace mapping is flushed to memory before
+ * get_user_pages() uses a kernel mapping to access the page
+ */
+#define ARCH_HAS_FLUSH_ANON_PAGE
+void flush_anon_page(struct vm_area_struct *vma,
+ struct page *page, unsigned long u_vaddr);
+
+#endif /* CONFIG_ARC_CACHE_VIPT_ALIASING */
+
+/*
+ * A new pagecache page has PG_arch_1 clear - thus dcache dirty by default
+ * This works around some PIO based drivers which don't call flush_dcache_page
+ * to record that they dirtied the dcache
+ */
+#define PG_dc_clean PG_arch_1
+
+/*
+ * Simple wrapper over config option
+ * Bootup code ensures that hardware matches kernel configuration
+ */
+static inline int cache_is_vipt_aliasing(void)
+{
+ return IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
+}
+
+#define CACHE_COLOR(addr) (((unsigned long)(addr) >> (PAGE_SHIFT)) & 1)
+
+/*
+ * checks if two addresses (after page aligning) index into same cache set
+ */
+#define addr_not_cache_congruent(addr1, addr2) \
+({ \
+ cache_is_vipt_aliasing() ? \
+ (CACHE_COLOR(addr1) != CACHE_COLOR(addr2)) : 0; \
+})
+
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+do { \
+ memcpy(dst, src, len); \
+ if (vma->vm_flags & VM_EXEC) \
+ __sync_icache_dcache((unsigned long)(dst), vaddr, len); \
+} while (0)
+
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+ memcpy(dst, src, len); \
+
+#endif
diff --git a/arch/arc/include/asm/checksum.h b/arch/arc/include/asm/checksum.h
new file mode 100644
index 000000000..10957298b
--- /dev/null
+++ b/arch/arc/include/asm/checksum.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Joern Rennecke <joern.rennecke@embecosm.com>: Jan 2012
+ * -Insn Scheduling improvements to csum core routines.
+ * = csum_fold( ) largely derived from ARM version.
+ * = ip_fast_cum( ) to have module scheduling
+ * -gcc 4.4.x broke networking. Alias analysis needed to be primed.
+ * worked around by adding memory clobber to ip_fast_csum( )
+ *
+ * vineetg: May 2010
+ * -Rewrote ip_fast_cscum( ) and csum_fold( ) with fast inline asm
+ */
+
+#ifndef _ASM_ARC_CHECKSUM_H
+#define _ASM_ARC_CHECKSUM_H
+
+/*
+ * Fold a partial checksum
+ *
+ * The 2 swords comprising the 32bit sum are added, any carry to 16th bit
+ * added back and final sword result inverted.
+ */
+static inline __sum16 csum_fold(__wsum s)
+{
+ unsigned r = s << 16 | s >> 16; /* ror */
+ s = ~s;
+ s -= r;
+ return s >> 16;
+}
+
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries.
+ */
+static inline __sum16
+ip_fast_csum(const void *iph, unsigned int ihl)
+{
+ const void *ptr = iph;
+ unsigned int tmp, tmp2, sum;
+
+ __asm__(
+ " ld.ab %0, [%3, 4] \n"
+ " ld.ab %2, [%3, 4] \n"
+ " sub %1, %4, 2 \n"
+ " lsr.f lp_count, %1, 1 \n"
+ " bcc 0f \n"
+ " add.f %0, %0, %2 \n"
+ " ld.ab %2, [%3, 4] \n"
+ "0: lp 1f \n"
+ " ld.ab %1, [%3, 4] \n"
+ " adc.f %0, %0, %2 \n"
+ " ld.ab %2, [%3, 4] \n"
+ " adc.f %0, %0, %1 \n"
+ "1: adc.f %0, %0, %2 \n"
+ " add.cs %0,%0,1 \n"
+ : "=&r"(sum), "=r"(tmp), "=&r"(tmp2), "+&r" (ptr)
+ : "r"(ihl)
+ : "cc", "lp_count", "memory");
+
+ return csum_fold(sum);
+}
+
+/*
+ * TCP pseudo Header is 12 bytes:
+ * SA [4], DA [4], zeroes [1], Proto[1], TCP Seg(hdr+data) Len [2]
+ */
+static inline __wsum
+csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
+ unsigned short proto, __wsum sum)
+{
+ __asm__ __volatile__(
+ " add.f %0, %0, %1 \n"
+ " adc.f %0, %0, %2 \n"
+ " adc.f %0, %0, %3 \n"
+ " adc.f %0, %0, %4 \n"
+ " adc %0, %0, 0 \n"
+ : "+&r"(sum)
+ : "r"(saddr), "r"(daddr),
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ "r"(len),
+#else
+ "r"(len << 8),
+#endif
+ "r"(htons(proto))
+ : "cc");
+
+ return sum;
+}
+
+#define csum_fold csum_fold
+#define ip_fast_csum ip_fast_csum
+#define csum_tcpudp_nofold csum_tcpudp_nofold
+
+#include <asm-generic/checksum.h>
+
+#endif /* _ASM_ARC_CHECKSUM_H */
diff --git a/arch/arc/include/asm/clk.h b/arch/arc/include/asm/clk.h
new file mode 100644
index 000000000..bf9d29f5b
--- /dev/null
+++ b/arch/arc/include/asm/clk.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_CLK_H
+#define _ASM_ARC_CLK_H
+
+/* Although we can't really hide core_freq, the accessor is still better way */
+extern unsigned long core_freq;
+
+static inline unsigned long arc_get_core_freq(void)
+{
+ return core_freq;
+}
+
+extern int arc_set_core_freq(unsigned long);
+
+#endif
diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h
new file mode 100644
index 000000000..44fd531f4
--- /dev/null
+++ b/arch/arc/include/asm/cmpxchg.h
@@ -0,0 +1,161 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_CMPXCHG_H
+#define __ASM_ARC_CMPXCHG_H
+
+#include <linux/types.h>
+
+#include <asm/barrier.h>
+#include <asm/smp.h>
+
+#ifdef CONFIG_ARC_HAS_LLSC
+
+static inline unsigned long
+__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
+{
+ unsigned long prev;
+
+ /*
+ * Explicit full memory barrier needed before/after as
+ * LLOCK/SCOND thmeselves don't provide any such semantics
+ */
+ smp_mb();
+
+ __asm__ __volatile__(
+ "1: llock %0, [%1] \n"
+ " brne %0, %2, 2f \n"
+ " scond %3, [%1] \n"
+ " bnz 1b \n"
+ "2: \n"
+ : "=&r"(prev) /* Early clobber, to prevent reg reuse */
+ : "r"(ptr), /* Not "m": llock only supports reg direct addr mode */
+ "ir"(expected),
+ "r"(new) /* can't be "ir". scond can't take LIMM for "b" */
+ : "cc", "memory"); /* so that gcc knows memory is being written here */
+
+ smp_mb();
+
+ return prev;
+}
+
+#else
+
+static inline unsigned long
+__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
+{
+ unsigned long flags;
+ int prev;
+ volatile unsigned long *p = ptr;
+
+ /*
+ * spin lock/unlock provide the needed smp_mb() before/after
+ */
+ atomic_ops_lock(flags);
+ prev = *p;
+ if (prev == expected)
+ *p = new;
+ atomic_ops_unlock(flags);
+ return prev;
+}
+
+#endif /* CONFIG_ARC_HAS_LLSC */
+
+#define cmpxchg(ptr, o, n) ((typeof(*(ptr)))__cmpxchg((ptr), \
+ (unsigned long)(o), (unsigned long)(n)))
+
+/*
+ * Since not supported natively, ARC cmpxchg() uses atomic_ops_lock (UP/SMP)
+ * just to gaurantee semantics.
+ * atomic_cmpxchg() needs to use the same locks as it's other atomic siblings
+ * which also happens to be atomic_ops_lock.
+ *
+ * Thus despite semantically being different, implementation of atomic_cmpxchg()
+ * is same as cmpxchg().
+ */
+#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+
+
+/*
+ * xchg (reg with memory) based on "Native atomic" EX insn
+ */
+static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
+ int size)
+{
+ extern unsigned long __xchg_bad_pointer(void);
+
+ switch (size) {
+ case 4:
+ smp_mb();
+
+ __asm__ __volatile__(
+ " ex %0, [%1] \n"
+ : "+r"(val)
+ : "r"(ptr)
+ : "memory");
+
+ smp_mb();
+
+ return val;
+ }
+ return __xchg_bad_pointer();
+}
+
+#define _xchg(ptr, with) ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), \
+ sizeof(*(ptr))))
+
+/*
+ * On ARC700, EX insn is inherently atomic, so by default "vanilla" xchg() need
+ * not require any locking. However there's a quirk.
+ * ARC lacks native CMPXCHG, thus emulated (see above), using external locking -
+ * incidently it "reuses" the same atomic_ops_lock used by atomic APIs.
+ * Now, llist code uses cmpxchg() and xchg() on same data, so xchg() needs to
+ * abide by same serializing rules, thus ends up using atomic_ops_lock as well.
+ *
+ * This however is only relevant if SMP and/or ARC lacks LLSC
+ * if (UP or LLSC)
+ * xchg doesn't need serialization
+ * else <==> !(UP or LLSC) <==> (!UP and !LLSC) <==> (SMP and !LLSC)
+ * xchg needs serialization
+ */
+
+#if !defined(CONFIG_ARC_HAS_LLSC) && defined(CONFIG_SMP)
+
+#define xchg(ptr, with) \
+({ \
+ unsigned long flags; \
+ typeof(*(ptr)) old_val; \
+ \
+ atomic_ops_lock(flags); \
+ old_val = _xchg(ptr, with); \
+ atomic_ops_unlock(flags); \
+ old_val; \
+})
+
+#else
+
+#define xchg(ptr, with) _xchg(ptr, with)
+
+#endif
+
+/*
+ * "atomic" variant of xchg()
+ * REQ: It needs to follow the same serialization rules as other atomic_xxx()
+ * Since xchg() doesn't always do that, it would seem that following defintion
+ * is incorrect. But here's the rationale:
+ * SMP : Even xchg() takes the atomic_ops_lock, so OK.
+ * LLSC: atomic_ops_lock are not relevent at all (even if SMP, since LLSC
+ * is natively "SMP safe", no serialization required).
+ * UP : other atomics disable IRQ, so no way a difft ctxt atomic_xchg()
+ * could clobber them. atomic_xchg() itself would be 1 insn, so it
+ * can't be clobbered by others. Thus no serialization required when
+ * atomic_xchg is involved.
+ */
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
+#endif
diff --git a/arch/arc/include/asm/current.h b/arch/arc/include/asm/current.h
new file mode 100644
index 000000000..c2453ee62
--- /dev/null
+++ b/arch/arc/include/asm/current.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: May 16th, 2008
+ * - Current macro is now implemented as "global register" r25
+ */
+
+#ifndef _ASM_ARC_CURRENT_H
+#define _ASM_ARC_CURRENT_H
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+
+register struct task_struct *curr_arc asm("r25");
+#define current (curr_arc)
+
+#else
+#include <asm-generic/current.h>
+#endif /* ! CONFIG_ARC_CURR_IN_REG */
+
+#endif /* ! __ASSEMBLY__ */
+
+#endif /* _ASM_ARC_CURRENT_H */
diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h
new file mode 100644
index 000000000..43de30256
--- /dev/null
+++ b/arch/arc/include/asm/delay.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Delay routines using pre computed loops_per_jiffy value.
+ *
+ * vineetg: Feb 2012
+ * -Rewrote in "C" to avoid dealing with availability of H/w MPY
+ * -Also reduced the num of MPY operations from 3 to 2
+ *
+ * Amit Bhor: Codito Technologies 2004
+ */
+
+#ifndef __ASM_ARC_UDELAY_H
+#define __ASM_ARC_UDELAY_H
+
+#include <asm/param.h> /* HZ */
+
+static inline void __delay(unsigned long loops)
+{
+ __asm__ __volatile__(
+ "1: sub.f %0, %0, 1 \n"
+ " jpnz 1b \n"
+ : "+r"(loops)
+ :
+ : "cc");
+}
+
+extern void __bad_udelay(void);
+
+/*
+ * Normal Math for computing loops in "N" usecs
+ * -we have precomputed @loops_per_jiffy
+ * -1 sec has HZ jiffies
+ * loops per "N" usecs = ((loops_per_jiffy * HZ / 1000000) * N)
+ *
+ * Approximate Division by multiplication:
+ * -Mathematically if we multiply and divide a number by same value the
+ * result remains unchanged: In this case, we use 2^32
+ * -> (loops_per_N_usec * 2^32 ) / 2^32
+ * -> (((loops_per_jiffy * HZ / 1000000) * N) * 2^32) / 2^32
+ * -> (loops_per_jiffy * HZ * N * 4295) / 2^32
+ *
+ * -Divide by 2^32 is very simply right shift by 32
+ * -We simply need to ensure that the multiply per above eqn happens in
+ * 64-bit precision (if CPU doesn't support it - gcc can emaulate it)
+ */
+
+static inline void __udelay(unsigned long usecs)
+{
+ unsigned long loops;
+
+ /* (u64) cast ensures 64 bit MPY - real or emulated
+ * HZ * 4295 is pre-evaluated by gcc - hence only 2 mpy ops
+ */
+ loops = ((u64) usecs * 4295 * HZ * loops_per_jiffy) >> 32;
+
+ __delay(loops);
+}
+
+#define udelay(n) (__builtin_constant_p(n) ? ((n) > 20000 ? __bad_udelay() \
+ : __udelay(n)) : __udelay(n))
+
+#endif /* __ASM_ARC_UDELAY_H */
diff --git a/arch/arc/include/asm/disasm.h b/arch/arc/include/asm/disasm.h
new file mode 100644
index 000000000..f1cce3d05
--- /dev/null
+++ b/arch/arc/include/asm/disasm.h
@@ -0,0 +1,116 @@
+/*
+ * several functions that help interpret ARC instructions
+ * used for unaligned accesses, kprobes and kgdb
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARC_DISASM_H__
+#define __ARC_DISASM_H__
+
+enum {
+ op_Bcc = 0, op_BLcc = 1, op_LD = 2, op_ST = 3, op_MAJOR_4 = 4,
+ op_MAJOR_5 = 5, op_LD_ADD = 12, op_ADD_SUB_SHIFT = 13,
+ op_ADD_MOV_CMP = 14, op_S = 15, op_LD_S = 16, op_LDB_S = 17,
+ op_LDW_S = 18, op_LDWX_S = 19, op_ST_S = 20, op_STB_S = 21,
+ op_STW_S = 22, op_Su5 = 23, op_SP = 24, op_GP = 25,
+ op_Pcl = 26, op_MOV_S = 27, op_ADD_CMP = 28, op_BR_S = 29,
+ op_B_S = 30, op_BL_S = 31
+};
+
+enum flow {
+ noflow,
+ direct_jump,
+ direct_call,
+ indirect_jump,
+ indirect_call,
+ invalid_instr
+};
+
+#define IS_BIT(word, n) ((word) & (1<<n))
+#define BITS(word, s, e) (((word) >> (s)) & (~((-2) << ((e) - (s)))))
+
+#define MAJOR_OPCODE(word) (BITS((word), 27, 31))
+#define MINOR_OPCODE(word) (BITS((word), 16, 21))
+#define FIELD_A(word) (BITS((word), 0, 5))
+#define FIELD_B(word) ((BITS((word), 12, 14)<<3) | \
+ (BITS((word), 24, 26)))
+#define FIELD_C(word) (BITS((word), 6, 11))
+#define FIELD_u6(word) FIELDC(word)
+#define FIELD_s12(word) sign_extend(((BITS((word), 0, 5) << 6) | \
+ BITS((word), 6, 11)), 12)
+
+/* note that for BL/BRcc these two macro's need another AND statement to mask
+ * out bit 1 (make the result a multiple of 4) */
+#define FIELD_s9(word) sign_extend(((BITS(word, 15, 15) << 8) | \
+ BITS(word, 16, 23)), 9)
+#define FIELD_s21(word) sign_extend(((BITS(word, 6, 15) << 11) | \
+ (BITS(word, 17, 26) << 1)), 12)
+#define FIELD_s25(word) sign_extend(((BITS(word, 0, 3) << 21) | \
+ (BITS(word, 6, 15) << 11) | \
+ (BITS(word, 17, 26) << 1)), 12)
+
+/* note: these operate on 16 bits! */
+#define FIELD_S_A(word) ((BITS((word), 2, 2)<<3) | BITS((word), 0, 2))
+#define FIELD_S_B(word) ((BITS((word), 10, 10)<<3) | \
+ BITS((word), 8, 10))
+#define FIELD_S_C(word) ((BITS((word), 7, 7)<<3) | BITS((word), 5, 7))
+#define FIELD_S_H(word) ((BITS((word), 0, 2)<<3) | BITS((word), 5, 8))
+#define FIELD_S_u5(word) (BITS((word), 0, 4))
+#define FIELD_S_u6(word) (BITS((word), 0, 4) << 1)
+#define FIELD_S_u7(word) (BITS((word), 0, 4) << 2)
+#define FIELD_S_u10(word) (BITS((word), 0, 7) << 2)
+#define FIELD_S_s7(word) sign_extend(BITS((word), 0, 5) << 1, 9)
+#define FIELD_S_s8(word) sign_extend(BITS((word), 0, 7) << 1, 9)
+#define FIELD_S_s9(word) sign_extend(BITS((word), 0, 8), 9)
+#define FIELD_S_s10(word) sign_extend(BITS((word), 0, 8) << 1, 10)
+#define FIELD_S_s11(word) sign_extend(BITS((word), 0, 8) << 2, 11)
+#define FIELD_S_s13(word) sign_extend(BITS((word), 0, 10) << 2, 13)
+
+#define STATUS32_L 0x00000100
+#define REG_LIMM 62
+
+struct disasm_state {
+ /* generic info */
+ unsigned long words[2];
+ int instr_len;
+ int major_opcode;
+ /* info for branch/jump */
+ int is_branch;
+ int target;
+ int delay_slot;
+ enum flow flow;
+ /* info for load/store */
+ int src1, src2, src3, dest, wb_reg;
+ int zz, aa, x, pref, di;
+ int fault, write;
+};
+
+static inline int sign_extend(int value, int bits)
+{
+ if (IS_BIT(value, (bits - 1)))
+ value |= (0xffffffff << bits);
+
+ return value;
+}
+
+static inline int is_short_instr(unsigned long addr)
+{
+ uint16_t word = *((uint16_t *)addr);
+ int opcode = (word >> 11) & 0x1F;
+ return (opcode >= 0x0B);
+}
+
+void disasm_instr(unsigned long addr, struct disasm_state *state,
+ int userspace, struct pt_regs *regs, struct callee_regs *cregs);
+int disasm_next_pc(unsigned long pc, struct pt_regs *regs, struct callee_regs
+ *cregs, unsigned long *fall_thru, unsigned long *target);
+long get_reg(int reg, struct pt_regs *regs, struct callee_regs *cregs);
+void set_reg(int reg, long val, struct pt_regs *regs,
+ struct callee_regs *cregs);
+
+#endif /* __ARC_DISASM_H__ */
diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h
new file mode 100644
index 000000000..45b8e0cea
--- /dev/null
+++ b/arch/arc/include/asm/dma-mapping.h
@@ -0,0 +1,221 @@
+/*
+ * DMA Mapping glue for ARC
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef ASM_ARC_DMA_MAPPING_H
+#define ASM_ARC_DMA_MAPPING_H
+
+#include <asm-generic/dma-coherent.h>
+#include <asm/cacheflush.h>
+
+#ifndef CONFIG_ARC_PLAT_NEEDS_CPU_TO_DMA
+/*
+ * dma_map_* API take cpu addresses, which is kernel logical address in the
+ * untranslated address space (0x8000_0000) based. The dma address (bus addr)
+ * ideally needs to be 0x0000_0000 based hence these glue routines.
+ * However given that intermediate bus bridges can ignore the high bit, we can
+ * do with these routines being no-ops.
+ * If a platform/device comes up which sriclty requires 0 based bus addr
+ * (e.g. AHB-PCI bridge on Angel4 board), then it can provide it's own versions
+ */
+#define plat_dma_addr_to_kernel(dev, addr) ((unsigned long)(addr))
+#define plat_kernel_addr_to_dma(dev, ptr) ((dma_addr_t)(ptr))
+
+#else
+#include <plat/dma_addr.h>
+#endif
+
+void *dma_alloc_noncoherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp);
+
+void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle);
+
+void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp);
+
+void dma_free_coherent(struct device *dev, size_t size, void *kvaddr,
+ dma_addr_t dma_handle);
+
+/* drivers/base/dma-mapping.c */
+extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size);
+extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr,
+ size_t size);
+
+#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
+#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
+
+/*
+ * streaming DMA Mapping API...
+ * CPU accesses page via normal paddr, thus needs to explicitly made
+ * consistent before each use
+ */
+
+static inline void __inline_dma_cache_sync(unsigned long paddr, size_t size,
+ enum dma_data_direction dir)
+{
+ switch (dir) {
+ case DMA_FROM_DEVICE:
+ dma_cache_inv(paddr, size);
+ break;
+ case DMA_TO_DEVICE:
+ dma_cache_wback(paddr, size);
+ break;
+ case DMA_BIDIRECTIONAL:
+ dma_cache_wback_inv(paddr, size);
+ break;
+ default:
+ pr_err("Invalid DMA dir [%d] for OP @ %lx\n", dir, paddr);
+ }
+}
+
+void __arc_dma_cache_sync(unsigned long paddr, size_t size,
+ enum dma_data_direction dir);
+
+#define _dma_cache_sync(addr, sz, dir) \
+do { \
+ if (__builtin_constant_p(dir)) \
+ __inline_dma_cache_sync(addr, sz, dir); \
+ else \
+ __arc_dma_cache_sync(addr, sz, dir); \
+} \
+while (0);
+
+static inline dma_addr_t
+dma_map_single(struct device *dev, void *cpu_addr, size_t size,
+ enum dma_data_direction dir)
+{
+ _dma_cache_sync((unsigned long)cpu_addr, size, dir);
+ return plat_kernel_addr_to_dma(dev, cpu_addr);
+}
+
+static inline void
+dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction dir)
+{
+}
+
+static inline dma_addr_t
+dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir)
+{
+ unsigned long paddr = page_to_phys(page) + offset;
+ return dma_map_single(dev, (void *)paddr, size, dir);
+}
+
+static inline void
+dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir)
+{
+}
+
+static inline int
+dma_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nents, i)
+ s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
+ s->length, dir);
+
+ return nents;
+}
+
+static inline void
+dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nents, i)
+ dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
+}
+
+static inline void
+dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir)
+{
+ _dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle), size,
+ DMA_FROM_DEVICE);
+}
+
+static inline void
+dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir)
+{
+ _dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle), size,
+ DMA_TO_DEVICE);
+}
+
+static inline void
+dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ _dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle) + offset,
+ size, DMA_FROM_DEVICE);
+}
+
+static inline void
+dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ _dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle) + offset,
+ size, DMA_TO_DEVICE);
+}
+
+static inline void
+dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
+ enum dma_data_direction dir)
+{
+ int i;
+
+ for (i = 0; i < nelems; i++, sg++)
+ _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
+}
+
+static inline void
+dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
+ enum dma_data_direction dir)
+{
+ int i;
+
+ for (i = 0; i < nelems; i++, sg++)
+ _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
+}
+
+static inline int dma_supported(struct device *dev, u64 dma_mask)
+{
+ /* Support 32 bit DMA mask exclusively */
+ return dma_mask == DMA_BIT_MASK(32);
+}
+
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return 0;
+}
+
+static inline int dma_set_mask(struct device *dev, u64 dma_mask)
+{
+ if (!dev->dma_mask || !dma_supported(dev, dma_mask))
+ return -EIO;
+
+ *dev->dma_mask = dma_mask;
+
+ return 0;
+}
+
+#endif
diff --git a/arch/arc/include/asm/dma.h b/arch/arc/include/asm/dma.h
new file mode 100644
index 000000000..ca7c45181
--- /dev/null
+++ b/arch/arc/include/asm/dma.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef ASM_ARC_DMA_H
+#define ASM_ARC_DMA_H
+
+#define MAX_DMA_ADDRESS 0xC0000000
+
+#endif
diff --git a/arch/arc/include/asm/elf.h b/arch/arc/include/asm/elf.h
new file mode 100644
index 000000000..a26282857
--- /dev/null
+++ b/arch/arc/include/asm/elf.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_ELF_H
+#define __ASM_ARC_ELF_H
+
+#include <linux/types.h>
+#include <uapi/asm/elf.h>
+
+/* These ELF defines belong to uapi but libc elf.h already defines them */
+#define EM_ARCOMPACT 93
+
+/* ARC Relocations (kernel Modules only) */
+#define R_ARC_32 0x4
+#define R_ARC_32_ME 0x1B
+#define R_ARC_S25H_PCREL 0x10
+#define R_ARC_S25W_PCREL 0x11
+
+/*to set parameters in the core dumps */
+#define ELF_ARCH EM_ARCOMPACT
+#define ELF_CLASS ELFCLASS32
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define ELF_DATA ELFDATA2MSB
+#else
+#define ELF_DATA ELFDATA2LSB
+#endif
+
+/*
+ * To ensure that
+ * -we don't load something for the wrong architecture.
+ * -The userspace is using the correct syscall ABI
+ */
+struct elf32_hdr;
+extern int elf_check_arch(const struct elf32_hdr *);
+#define elf_check_arch elf_check_arch
+
+#define CORE_DUMP_USE_REGSET
+
+#define ELF_EXEC_PAGESIZE PAGE_SIZE
+
+/*
+ * This is the location that an ET_DYN program is loaded if exec'ed. Typical
+ * use of this is to invoke "./ld.so someprog" to test out a new version of
+ * the loader. We need to make sure that it is out of the way of the program
+ * that it will "exec", and that there is sufficient room for the brk.
+ */
+#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
+
+/*
+ * When the program starts, a1 contains a pointer to a function to be
+ * registered with atexit, as per the SVR4 ABI. A value of 0 means we
+ * have no such handler.
+ */
+#define ELF_PLAT_INIT(_r, load_addr) ((_r)->r0 = 0)
+
+/*
+ * This yields a mask that user programs can use to figure out what
+ * instruction set this cpu supports.
+ */
+#define ELF_HWCAP (0)
+
+/*
+ * This yields a string that ld.so will use to load implementation
+ * specific libraries for optimization. This is more specific in
+ * intent than poking at uname or /proc/cpuinfo.
+ */
+#define ELF_PLATFORM (NULL)
+
+#endif
diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h
new file mode 100644
index 000000000..884081099
--- /dev/null
+++ b/arch/arc/include/asm/entry.h
@@ -0,0 +1,648 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: March 2009 (Supporting 2 levels of Interrupts)
+ * Stack switching code can no longer reliably rely on the fact that
+ * if we are NOT in user mode, stack is switched to kernel mode.
+ * e.g. L2 IRQ interrupted a L1 ISR which had not yet completed
+ * it's prologue including stack switching from user mode
+ *
+ * Vineetg: Aug 28th 2008: Bug #94984
+ * -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap
+ * Normally CPU does this automatically, however when doing FAKE rtie,
+ * we also need to explicitly do this. The problem in macros
+ * FAKE_RET_FROM_EXCPN and FAKE_RET_FROM_EXCPN_LOCK_IRQ was that this bit
+ * was being "CLEARED" rather then "SET". Actually "SET" clears ZOL context
+ *
+ * Vineetg: May 5th 2008
+ * -Modified CALLEE_REG save/restore macros to handle the fact that
+ * r25 contains the kernel current task ptr
+ * - Defined Stack Switching Macro to be reused in all intr/excp hdlrs
+ * - Shaved off 11 instructions from RESTORE_ALL_INT1 by using the
+ * address Write back load ld.ab instead of seperate ld/add instn
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef __ASM_ARC_ENTRY_H
+#define __ASM_ARC_ENTRY_H
+
+#ifdef __ASSEMBLY__
+#include <asm/unistd.h> /* For NR_syscalls defination */
+#include <asm/asm-offsets.h>
+#include <asm/arcregs.h>
+#include <asm/ptrace.h>
+#include <asm/processor.h> /* For VMALLOC_START */
+#include <asm/thread_info.h> /* For THREAD_SIZE */
+#include <asm/mmu.h>
+
+/* Note on the LD/ST addr modes with addr reg wback
+ *
+ * LD.a same as LD.aw
+ *
+ * LD.a reg1, [reg2, x] => Pre Incr
+ * Eff Addr for load = [reg2 + x]
+ *
+ * LD.ab reg1, [reg2, x] => Post Incr
+ * Eff Addr for load = [reg2]
+ */
+
+.macro PUSH reg
+ st.a \reg, [sp, -4]
+.endm
+
+.macro PUSHAX aux
+ lr r9, [\aux]
+ PUSH r9
+.endm
+
+.macro POP reg
+ ld.ab \reg, [sp, 4]
+.endm
+
+.macro POPAX aux
+ POP r9
+ sr r9, [\aux]
+.endm
+
+/*--------------------------------------------------------------
+ * Helpers to save/restore Scratch Regs:
+ * used by Interrupt/Exception Prologue/Epilogue
+ *-------------------------------------------------------------*/
+.macro SAVE_R0_TO_R12
+ PUSH r0
+ PUSH r1
+ PUSH r2
+ PUSH r3
+ PUSH r4
+ PUSH r5
+ PUSH r6
+ PUSH r7
+ PUSH r8
+ PUSH r9
+ PUSH r10
+ PUSH r11
+ PUSH r12
+.endm
+
+.macro RESTORE_R12_TO_R0
+ POP r12
+ POP r11
+ POP r10
+ POP r9
+ POP r8
+ POP r7
+ POP r6
+ POP r5
+ POP r4
+ POP r3
+ POP r2
+ POP r1
+ POP r0
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+ ld r25, [sp, 12]
+#endif
+.endm
+
+/*--------------------------------------------------------------
+ * Helpers to save/restore callee-saved regs:
+ * used by several macros below
+ *-------------------------------------------------------------*/
+.macro SAVE_R13_TO_R24
+ PUSH r13
+ PUSH r14
+ PUSH r15
+ PUSH r16
+ PUSH r17
+ PUSH r18
+ PUSH r19
+ PUSH r20
+ PUSH r21
+ PUSH r22
+ PUSH r23
+ PUSH r24
+.endm
+
+.macro RESTORE_R24_TO_R13
+ POP r24
+ POP r23
+ POP r22
+ POP r21
+ POP r20
+ POP r19
+ POP r18
+ POP r17
+ POP r16
+ POP r15
+ POP r14
+ POP r13
+.endm
+
+#define OFF_USER_R25_FROM_R24 (SZ_CALLEE_REGS + SZ_PT_REGS - 8)/4
+
+/*--------------------------------------------------------------
+ * Collect User Mode callee regs as struct callee_regs - needed by
+ * fork/do_signal/unaligned-access-emulation.
+ * (By default only scratch regs are saved on entry to kernel)
+ *
+ * Special handling for r25 if used for caching Task Pointer.
+ * It would have been saved in task->thread.user_r25 already, but to keep
+ * the interface same it is copied into regular r25 placeholder in
+ * struct callee_regs.
+ *-------------------------------------------------------------*/
+.macro SAVE_CALLEE_SAVED_USER
+
+ SAVE_R13_TO_R24
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+ ; Retrieve orig r25 and save it on stack
+ ld.as r12, [sp, OFF_USER_R25_FROM_R24]
+ st.a r12, [sp, -4]
+#else
+ PUSH r25
+#endif
+
+.endm
+
+/*--------------------------------------------------------------
+ * Save kernel Mode callee regs at the time of Contect Switch.
+ *
+ * Special handling for r25 if used for caching Task Pointer.
+ * Kernel simply skips saving it since it will be loaded with
+ * incoming task pointer anyways
+ *-------------------------------------------------------------*/
+.macro SAVE_CALLEE_SAVED_KERNEL
+
+ SAVE_R13_TO_R24
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+ sub sp, sp, 4
+#else
+ PUSH r25
+#endif
+.endm
+
+/*--------------------------------------------------------------
+ * Opposite of SAVE_CALLEE_SAVED_KERNEL
+ *-------------------------------------------------------------*/
+.macro RESTORE_CALLEE_SAVED_KERNEL
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+ add sp, sp, 4 /* skip usual r25 placeholder */
+#else
+ POP r25
+#endif
+ RESTORE_R24_TO_R13
+.endm
+
+/*--------------------------------------------------------------
+ * Opposite of SAVE_CALLEE_SAVED_USER
+ *
+ * ptrace tracer or unaligned-access fixup might have changed a user mode
+ * callee reg which is saved back to usual r25 storage location
+ *-------------------------------------------------------------*/
+.macro RESTORE_CALLEE_SAVED_USER
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+ ld.ab r12, [sp, 4]
+ st.as r12, [sp, OFF_USER_R25_FROM_R24]
+#else
+ POP r25
+#endif
+ RESTORE_R24_TO_R13
+.endm
+
+/*--------------------------------------------------------------
+ * Super FAST Restore callee saved regs by simply re-adjusting SP
+ *-------------------------------------------------------------*/
+.macro DISCARD_CALLEE_SAVED_USER
+ add sp, sp, SZ_CALLEE_REGS
+.endm
+
+/*-------------------------------------------------------------
+ * given a tsk struct, get to the base of it's kernel mode stack
+ * tsk->thread_info is really a PAGE, whose bottom hoists stack
+ * which grows upwards towards thread_info
+ *------------------------------------------------------------*/
+
+.macro GET_TSK_STACK_BASE tsk, out
+
+ /* Get task->thread_info (this is essentially start of a PAGE) */
+ ld \out, [\tsk, TASK_THREAD_INFO]
+
+ /* Go to end of page where stack begins (grows upwards) */
+ add2 \out, \out, (THREAD_SIZE)/4
+
+.endm
+
+/*--------------------------------------------------------------
+ * Switch to Kernel Mode stack if SP points to User Mode stack
+ *
+ * Entry : r9 contains pre-IRQ/exception/trap status32
+ * Exit : SP is set to kernel mode stack pointer
+ * If CURR_IN_REG, r25 set to "current" task pointer
+ * Clobbers: r9
+ *-------------------------------------------------------------*/
+
+.macro SWITCH_TO_KERNEL_STK
+
+ /* User Mode when this happened ? Yes: Proceed to switch stack */
+ bbit1 r9, STATUS_U_BIT, 88f
+
+ /* OK we were already in kernel mode when this event happened, thus can
+ * assume SP is kernel mode SP. _NO_ need to do any stack switching
+ */
+
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+ /* However....
+ * If Level 2 Interrupts enabled, we may end up with a corner case:
+ * 1. User Task executing
+ * 2. L1 IRQ taken, ISR starts (CPU auto-switched to KERNEL mode)
+ * 3. But before it could switch SP from USER to KERNEL stack
+ * a L2 IRQ "Interrupts" L1
+ * Thay way although L2 IRQ happened in Kernel mode, stack is still
+ * not switched.
+ * To handle this, we may need to switch stack even if in kernel mode
+ * provided SP has values in range of USER mode stack ( < 0x7000_0000 )
+ */
+ brlo sp, VMALLOC_START, 88f
+
+ /* TODO: vineetg:
+ * We need to be a bit more cautious here. What if a kernel bug in
+ * L1 ISR, caused SP to go whaco (some small value which looks like
+ * USER stk) and then we take L2 ISR.
+ * Above brlo alone would treat it as a valid L1-L2 sceanrio
+ * instead of shouting alound
+ * The only feasible way is to make sure this L2 happened in
+ * L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in
+ * L1 ISR before it switches stack
+ */
+
+#endif
+
+ /* Save Pre Intr/Exception KERNEL MODE SP on kernel stack
+ * safe-keeping not really needed, but it keeps the epilogue code
+ * (SP restore) simpler/uniform.
+ */
+ b.d 66f
+ mov r9, sp
+
+88: /*------Intr/Ecxp happened in user mode, "switch" stack ------ */
+
+ GET_CURR_TASK_ON_CPU r9
+
+ /* With current tsk in r9, get it's kernel mode stack base */
+ GET_TSK_STACK_BASE r9, r9
+
+66:
+#ifdef CONFIG_ARC_CURR_IN_REG
+ /*
+ * Treat r25 as scratch reg, save it on stack first
+ * Load it with current task pointer
+ */
+ st r25, [r9, -4]
+ GET_CURR_TASK_ON_CPU r25
+#endif
+
+ /* Save Pre Intr/Exception User SP on kernel stack */
+ st.a sp, [r9, -16] ; Make room for orig_r0, ECR, user_r25
+
+ /* CAUTION:
+ * SP should be set at the very end when we are done with everything
+ * In case of 2 levels of interrupt we depend on value of SP to assume
+ * that everything else is done (loading r25 etc)
+ */
+
+ /* set SP to point to kernel mode stack */
+ mov sp, r9
+
+ /* ----- Stack Switched to kernel Mode, Now save REG FILE ----- */
+
+.endm
+
+/*------------------------------------------------------------
+ * "FAKE" a rtie to return from CPU Exception context
+ * This is to re-enable Exceptions within exception
+ * Look at EV_ProtV to see how this is actually used
+ *-------------------------------------------------------------*/
+
+.macro FAKE_RET_FROM_EXCPN reg
+
+ ld \reg, [sp, PT_status32]
+ bic \reg, \reg, (STATUS_U_MASK|STATUS_DE_MASK)
+ bset \reg, \reg, STATUS_L_BIT
+ sr \reg, [erstatus]
+ mov \reg, 55f
+ sr \reg, [eret]
+
+ rtie
+55:
+.endm
+
+/*
+ * @reg [OUT] &thread_info of "current"
+ */
+.macro GET_CURR_THR_INFO_FROM_SP reg
+ bic \reg, sp, (THREAD_SIZE - 1)
+.endm
+
+/*
+ * @reg [OUT] thread_info->flags of "current"
+ */
+.macro GET_CURR_THR_INFO_FLAGS reg
+ GET_CURR_THR_INFO_FROM_SP \reg
+ ld \reg, [\reg, THREAD_INFO_FLAGS]
+.endm
+
+/*--------------------------------------------------------------
+ * For early Exception Prologue, a core reg is temporarily needed to
+ * code the rest of prolog (stack switching). This is done by stashing
+ * it to memory (non-SMP case) or SCRATCH0 Aux Reg (SMP).
+ *
+ * Before saving the full regfile - this reg is restored back, only
+ * to be saved again on kernel mode stack, as part of pt_regs.
+ *-------------------------------------------------------------*/
+.macro EXCPN_PROLOG_FREEUP_REG reg
+#ifdef CONFIG_SMP
+ sr \reg, [ARC_REG_SCRATCH_DATA0]
+#else
+ st \reg, [@ex_saved_reg1]
+#endif
+.endm
+
+.macro EXCPN_PROLOG_RESTORE_REG reg
+#ifdef CONFIG_SMP
+ lr \reg, [ARC_REG_SCRATCH_DATA0]
+#else
+ ld \reg, [@ex_saved_reg1]
+#endif
+.endm
+
+/*--------------------------------------------------------------
+ * Exception Entry prologue
+ * -Switches stack to K mode (if not already)
+ * -Saves the register file
+ *
+ * After this it is safe to call the "C" handlers
+ *-------------------------------------------------------------*/
+.macro EXCEPTION_PROLOGUE
+
+ /* Need at least 1 reg to code the early exception prologue */
+ EXCPN_PROLOG_FREEUP_REG r9
+
+ /* U/K mode at time of exception (stack not switched if already K) */
+ lr r9, [erstatus]
+
+ /* ARC700 doesn't provide auto-stack switching */
+ SWITCH_TO_KERNEL_STK
+
+ /* save the regfile */
+ SAVE_ALL_SYS
+.endm
+
+/*--------------------------------------------------------------
+ * Save all registers used by Exceptions (TLB Miss, Prot-V, Mem err etc)
+ * Requires SP to be already switched to kernel mode Stack
+ * sp points to the next free element on the stack at exit of this macro.
+ * Registers are pushed / popped in the order defined in struct ptregs
+ * in asm/ptrace.h
+ * Note that syscalls are implemented via TRAP which is also a exception
+ * from CPU's point of view
+ *-------------------------------------------------------------*/
+.macro SAVE_ALL_SYS
+
+ lr r9, [ecr]
+ st r9, [sp, 8] /* ECR */
+ st r0, [sp, 4] /* orig_r0, needed only for sys calls */
+
+ /* Restore r9 used to code the early prologue */
+ EXCPN_PROLOG_RESTORE_REG r9
+
+ SAVE_R0_TO_R12
+ PUSH gp
+ PUSH fp
+ PUSH blink
+ PUSHAX eret
+ PUSHAX erstatus
+ PUSH lp_count
+ PUSHAX lp_end
+ PUSHAX lp_start
+ PUSHAX erbta
+.endm
+
+/*--------------------------------------------------------------
+ * Restore all registers used by system call or Exceptions
+ * SP should always be pointing to the next free stack element
+ * when entering this macro.
+ *
+ * NOTE:
+ *
+ * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
+ * for memory load operations. If used in that way interrupts are deffered
+ * by hardware and that is not good.
+ *-------------------------------------------------------------*/
+.macro RESTORE_ALL_SYS
+ POPAX erbta
+ POPAX lp_start
+ POPAX lp_end
+
+ POP r9
+ mov lp_count, r9 ;LD to lp_count is not allowed
+
+ POPAX erstatus
+ POPAX eret
+ POP blink
+ POP fp
+ POP gp
+ RESTORE_R12_TO_R0
+
+ ld sp, [sp] /* restore original sp */
+ /* orig_r0, ECR, user_r25 skipped automatically */
+.endm
+
+
+/*--------------------------------------------------------------
+ * Save all registers used by interrupt handlers.
+ *-------------------------------------------------------------*/
+.macro SAVE_ALL_INT1
+
+ /* restore original r9 to be saved as part of reg-file */
+#ifdef CONFIG_SMP
+ lr r9, [ARC_REG_SCRATCH_DATA0]
+#else
+ ld r9, [@int1_saved_reg]
+#endif
+
+ /* now we are ready to save the remaining context :) */
+ st event_IRQ1, [sp, 8] /* Dummy ECR */
+ st 0, [sp, 4] /* orig_r0 , N/A for IRQ */
+
+ SAVE_R0_TO_R12
+ PUSH gp
+ PUSH fp
+ PUSH blink
+ PUSH ilink1
+ PUSHAX status32_l1
+ PUSH lp_count
+ PUSHAX lp_end
+ PUSHAX lp_start
+ PUSHAX bta_l1
+.endm
+
+.macro SAVE_ALL_INT2
+
+ /* TODO-vineetg: SMP we can't use global nor can we use
+ * SCRATCH0 as we do for int1 because while int1 is using
+ * it, int2 can come
+ */
+ /* retsore original r9 , saved in sys_saved_r9 */
+ ld r9, [@int2_saved_reg]
+
+ /* now we are ready to save the remaining context :) */
+ st event_IRQ2, [sp, 8] /* Dummy ECR */
+ st 0, [sp, 4] /* orig_r0 , N/A for IRQ */
+
+ SAVE_R0_TO_R12
+ PUSH gp
+ PUSH fp
+ PUSH blink
+ PUSH ilink2
+ PUSHAX status32_l2
+ PUSH lp_count
+ PUSHAX lp_end
+ PUSHAX lp_start
+ PUSHAX bta_l2
+.endm
+
+/*--------------------------------------------------------------
+ * Restore all registers used by interrupt handlers.
+ *
+ * NOTE:
+ *
+ * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
+ * for memory load operations. If used in that way interrupts are deffered
+ * by hardware and that is not good.
+ *-------------------------------------------------------------*/
+
+.macro RESTORE_ALL_INT1
+ POPAX bta_l1
+ POPAX lp_start
+ POPAX lp_end
+
+ POP r9
+ mov lp_count, r9 ;LD to lp_count is not allowed
+
+ POPAX status32_l1
+ POP ilink1
+ POP blink
+ POP fp
+ POP gp
+ RESTORE_R12_TO_R0
+
+ ld sp, [sp] /* restore original sp */
+ /* orig_r0, ECR, user_r25 skipped automatically */
+.endm
+
+.macro RESTORE_ALL_INT2
+ POPAX bta_l2
+ POPAX lp_start
+ POPAX lp_end
+
+ POP r9
+ mov lp_count, r9 ;LD to lp_count is not allowed
+
+ POPAX status32_l2
+ POP ilink2
+ POP blink
+ POP fp
+ POP gp
+ RESTORE_R12_TO_R0
+
+ ld sp, [sp] /* restore original sp */
+ /* orig_r0, ECR, user_r25 skipped automatically */
+.endm
+
+
+/* Get CPU-ID of this core */
+.macro GET_CPU_ID reg
+ lr \reg, [identity]
+ lsr \reg, \reg, 8
+ bmsk \reg, \reg, 7
+.endm
+
+#ifdef CONFIG_SMP
+
+/*-------------------------------------------------
+ * Retrieve the current running task on this CPU
+ * 1. Determine curr CPU id.
+ * 2. Use it to index into _current_task[ ]
+ */
+.macro GET_CURR_TASK_ON_CPU reg
+ GET_CPU_ID \reg
+ ld.as \reg, [@_current_task, \reg]
+.endm
+
+/*-------------------------------------------------
+ * Save a new task as the "current" task on this CPU
+ * 1. Determine curr CPU id.
+ * 2. Use it to index into _current_task[ ]
+ *
+ * Coded differently than GET_CURR_TASK_ON_CPU (which uses LD.AS)
+ * because ST r0, [r1, offset] can ONLY have s9 @offset
+ * while LD can take s9 (4 byte insn) or LIMM (8 byte insn)
+ */
+
+.macro SET_CURR_TASK_ON_CPU tsk, tmp
+ GET_CPU_ID \tmp
+ add2 \tmp, @_current_task, \tmp
+ st \tsk, [\tmp]
+#ifdef CONFIG_ARC_CURR_IN_REG
+ mov r25, \tsk
+#endif
+
+.endm
+
+
+#else /* Uniprocessor implementation of macros */
+
+.macro GET_CURR_TASK_ON_CPU reg
+ ld \reg, [@_current_task]
+.endm
+
+.macro SET_CURR_TASK_ON_CPU tsk, tmp
+ st \tsk, [@_current_task]
+#ifdef CONFIG_ARC_CURR_IN_REG
+ mov r25, \tsk
+#endif
+.endm
+
+#endif /* SMP / UNI */
+
+/* ------------------------------------------------------------------
+ * Get the ptr to some field of Current Task at @off in task struct
+ * -Uses r25 for Current task ptr if that is enabled
+ */
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+
+.macro GET_CURR_TASK_FIELD_PTR off, reg
+ add \reg, r25, \off
+.endm
+
+#else
+
+.macro GET_CURR_TASK_FIELD_PTR off, reg
+ GET_CURR_TASK_ON_CPU \reg
+ add \reg, \reg, \off
+.endm
+
+#endif /* CONFIG_ARC_CURR_IN_REG */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_ARC_ENTRY_H */
diff --git a/arch/arc/include/asm/exec.h b/arch/arc/include/asm/exec.h
new file mode 100644
index 000000000..28abc6905
--- /dev/null
+++ b/arch/arc/include/asm/exec.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_EXEC_H
+#define __ASM_ARC_EXEC_H
+
+/* Align to 16b */
+#define arch_align_stack(p) ((unsigned long)(p) & ~0xf)
+
+#endif
diff --git a/arch/arc/include/asm/futex.h b/arch/arc/include/asm/futex.h
new file mode 100644
index 000000000..4dc64ddeb
--- /dev/null
+++ b/arch/arc/include/asm/futex.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: August 2010: From Android kernel work
+ */
+
+#ifndef _ASM_FUTEX_H
+#define _ASM_FUTEX_H
+
+#include <linux/futex.h>
+#include <linux/preempt.h>
+#include <linux/uaccess.h>
+#include <asm/errno.h>
+
+#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)\
+ \
+ __asm__ __volatile__( \
+ "1: ld %1, [%2] \n" \
+ insn "\n" \
+ "2: st %0, [%2] \n" \
+ " mov %0, 0 \n" \
+ "3: \n" \
+ " .section .fixup,\"ax\" \n" \
+ " .align 4 \n" \
+ "4: mov %0, %4 \n" \
+ " b 3b \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " .align 4 \n" \
+ " .word 1b, 4b \n" \
+ " .word 2b, 4b \n" \
+ " .previous \n" \
+ \
+ : "=&r" (ret), "=&r" (oldval) \
+ : "r" (uaddr), "r" (oparg), "ir" (-EFAULT) \
+ : "cc", "memory")
+
+static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+{
+ int op = (encoded_op >> 28) & 7;
+ int cmp = (encoded_op >> 24) & 15;
+ int oparg = (encoded_op << 8) >> 20;
+ int cmparg = (encoded_op << 20) >> 20;
+ int oldval = 0, ret;
+
+ if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+ oparg = 1 << oparg;
+
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+ return -EFAULT;
+
+ pagefault_disable(); /* implies preempt_disable() */
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ __futex_atomic_op("mov %0, %3", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ADD:
+ __futex_atomic_op("add %0, %1, %3", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_OR:
+ __futex_atomic_op("or %0, %1, %3", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ANDN:
+ __futex_atomic_op("bic %0, %1, %3", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_XOR:
+ __futex_atomic_op("xor %0, %1, %3", ret, oldval, uaddr, oparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ pagefault_enable(); /* subsumes preempt_enable() */
+
+ if (!ret) {
+ switch (cmp) {
+ case FUTEX_OP_CMP_EQ:
+ ret = (oldval == cmparg);
+ break;
+ case FUTEX_OP_CMP_NE:
+ ret = (oldval != cmparg);
+ break;
+ case FUTEX_OP_CMP_LT:
+ ret = (oldval < cmparg);
+ break;
+ case FUTEX_OP_CMP_GE:
+ ret = (oldval >= cmparg);
+ break;
+ case FUTEX_OP_CMP_LE:
+ ret = (oldval <= cmparg);
+ break;
+ case FUTEX_OP_CMP_GT:
+ ret = (oldval > cmparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+ }
+ return ret;
+}
+
+/* Compare-xchg with preemption disabled.
+ * Notes:
+ * -Best-Effort: Exchg happens only if compare succeeds.
+ * If compare fails, returns; leaving retry/looping to upper layers
+ * -successful cmp-xchg: return orig value in @addr (same as cmp val)
+ * -Compare fails: return orig value in @addr
+ * -user access r/w fails: return -EFAULT
+ */
+static inline int
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval,
+ u32 newval)
+{
+ u32 val;
+
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+ return -EFAULT;
+
+ pagefault_disable(); /* implies preempt_disable() */
+
+ /* TBD : can use llock/scond */
+ __asm__ __volatile__(
+ "1: ld %0, [%3] \n"
+ " brne %0, %1, 3f \n"
+ "2: st %2, [%3] \n"
+ "3: \n"
+ " .section .fixup,\"ax\" \n"
+ "4: mov %0, %4 \n"
+ " b 3b \n"
+ " .previous \n"
+ " .section __ex_table,\"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 4b \n"
+ " .word 2b, 4b \n"
+ " .previous\n"
+ : "=&r"(val)
+ : "r"(oldval), "r"(newval), "r"(uaddr), "ir"(-EFAULT)
+ : "cc", "memory");
+
+ pagefault_enable(); /* subsumes preempt_enable() */
+
+ *uval = val;
+ return val;
+}
+
+#endif
diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h
new file mode 100644
index 000000000..cabd518cb
--- /dev/null
+++ b/arch/arc/include/asm/io.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_IO_H
+#define _ASM_ARC_IO_H
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <asm/page.h>
+
+extern void __iomem *ioremap(unsigned long physaddr, unsigned long size);
+extern void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
+ unsigned long flags);
+extern void iounmap(const void __iomem *addr);
+
+#define ioremap_nocache(phy, sz) ioremap(phy, sz)
+#define ioremap_wc(phy, sz) ioremap(phy, sz)
+
+/* Change struct page to physical address */
+#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
+
+#define __raw_readb __raw_readb
+static inline u8 __raw_readb(const volatile void __iomem *addr)
+{
+ u8 b;
+
+ __asm__ __volatile__(
+ " ldb%U1 %0, %1 \n"
+ : "=r" (b)
+ : "m" (*(volatile u8 __force *)addr)
+ : "memory");
+
+ return b;
+}
+
+#define __raw_readw __raw_readw
+static inline u16 __raw_readw(const volatile void __iomem *addr)
+{
+ u16 s;
+
+ __asm__ __volatile__(
+ " ldw%U1 %0, %1 \n"
+ : "=r" (s)
+ : "m" (*(volatile u16 __force *)addr)
+ : "memory");
+
+ return s;
+}
+
+#define __raw_readl __raw_readl
+static inline u32 __raw_readl(const volatile void __iomem *addr)
+{
+ u32 w;
+
+ __asm__ __volatile__(
+ " ld%U1 %0, %1 \n"
+ : "=r" (w)
+ : "m" (*(volatile u32 __force *)addr)
+ : "memory");
+
+ return w;
+}
+
+#define __raw_writeb __raw_writeb
+static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
+{
+ __asm__ __volatile__(
+ " stb%U1 %0, %1 \n"
+ :
+ : "r" (b), "m" (*(volatile u8 __force *)addr)
+ : "memory");
+}
+
+#define __raw_writew __raw_writew
+static inline void __raw_writew(u16 s, volatile void __iomem *addr)
+{
+ __asm__ __volatile__(
+ " stw%U1 %0, %1 \n"
+ :
+ : "r" (s), "m" (*(volatile u16 __force *)addr)
+ : "memory");
+
+}
+
+#define __raw_writel __raw_writel
+static inline void __raw_writel(u32 w, volatile void __iomem *addr)
+{
+ __asm__ __volatile__(
+ " st%U1 %0, %1 \n"
+ :
+ : "r" (w), "m" (*(volatile u32 __force *)addr)
+ : "memory");
+
+}
+
+#define readb_relaxed readb
+#define readw_relaxed readw
+#define readl_relaxed readl
+
+#include <asm-generic/io.h>
+
+#endif /* _ASM_ARC_IO_H */
diff --git a/arch/arc/include/asm/irq.h b/arch/arc/include/asm/irq.h
new file mode 100644
index 000000000..f38652fb2
--- /dev/null
+++ b/arch/arc/include/asm/irq.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_IRQ_H
+#define __ASM_ARC_IRQ_H
+
+#define NR_CPU_IRQS 32 /* number of interrupt lines of ARC770 CPU */
+#define NR_IRQS 128 /* allow some CPU external IRQ handling */
+
+/* Platform Independent IRQs */
+#define TIMER0_IRQ 3
+#define TIMER1_IRQ 4
+
+#include <linux/interrupt.h>
+#include <asm-generic/irq.h>
+
+extern void arc_init_IRQ(void);
+void arc_local_timer_setup(void);
+void arc_request_percpu_irq(int irq, int cpu,
+ irqreturn_t (*isr)(int irq, void *dev),
+ const char *irq_nm, void *percpu_dev);
+
+#endif
diff --git a/arch/arc/include/asm/irqflags.h b/arch/arc/include/asm/irqflags.h
new file mode 100644
index 000000000..27ecc6975
--- /dev/null
+++ b/arch/arc/include/asm/irqflags.h
@@ -0,0 +1,179 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_IRQFLAGS_H
+#define __ASM_ARC_IRQFLAGS_H
+
+/* vineetg: March 2010 : local_irq_save( ) optimisation
+ * -Remove explicit mov of current status32 into reg, that is not needed
+ * -Use BIC insn instead of INVERTED + AND
+ * -Conditionally disable interrupts (if they are not enabled, don't disable)
+*/
+
+#include <asm/arcregs.h>
+
+/* status32 Reg bits related to Interrupt Handling */
+#define STATUS_E1_BIT 1 /* Int 1 enable */
+#define STATUS_E2_BIT 2 /* Int 2 enable */
+#define STATUS_A1_BIT 3 /* Int 1 active */
+#define STATUS_A2_BIT 4 /* Int 2 active */
+
+#define STATUS_E1_MASK (1<<STATUS_E1_BIT)
+#define STATUS_E2_MASK (1<<STATUS_E2_BIT)
+#define STATUS_A1_MASK (1<<STATUS_A1_BIT)
+#define STATUS_A2_MASK (1<<STATUS_A2_BIT)
+
+/* Other Interrupt Handling related Aux regs */
+#define AUX_IRQ_LEV 0x200 /* IRQ Priority: L1 or L2 */
+#define AUX_IRQ_HINT 0x201 /* For generating Soft Interrupts */
+#define AUX_IRQ_LV12 0x43 /* interrupt level register */
+
+#define AUX_IENABLE 0x40c
+#define AUX_ITRIGGER 0x40d
+#define AUX_IPULSE 0x415
+
+#ifndef __ASSEMBLY__
+
+/******************************************************************
+ * IRQ Control Macros
+ *
+ * All of them have "memory" clobber (compiler barrier) which is needed to
+ * ensure that LD/ST requiring irq safetly (R-M-W when LLSC is not available)
+ * are redone after IRQs are re-enabled (and gcc doesn't reuse stale register)
+ *
+ * Noted at the time of Abilis Timer List corruption
+ * Orig Bug + Rejected solution : https://lkml.org/lkml/2013/3/29/67
+ * Reasoning : https://lkml.org/lkml/2013/4/8/15
+ *
+ ******************************************************************/
+
+/*
+ * Save IRQ state and disable IRQs
+ */
+static inline long arch_local_irq_save(void)
+{
+ unsigned long temp, flags;
+
+ __asm__ __volatile__(
+ " lr %1, [status32] \n"
+ " bic %0, %1, %2 \n"
+ " and.f 0, %1, %2 \n"
+ " flag.nz %0 \n"
+ : "=r"(temp), "=r"(flags)
+ : "n"((STATUS_E1_MASK | STATUS_E2_MASK))
+ : "memory", "cc");
+
+ return flags;
+}
+
+/*
+ * restore saved IRQ state
+ */
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+
+ __asm__ __volatile__(
+ " flag %0 \n"
+ :
+ : "r"(flags)
+ : "memory");
+}
+
+/*
+ * Unconditionally Enable IRQs
+ */
+extern void arch_local_irq_enable(void);
+
+/*
+ * Unconditionally Disable IRQs
+ */
+static inline void arch_local_irq_disable(void)
+{
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " lr %0, [status32] \n"
+ " and %0, %0, %1 \n"
+ " flag %0 \n"
+ : "=&r"(temp)
+ : "n"(~(STATUS_E1_MASK | STATUS_E2_MASK))
+ : "memory");
+}
+
+/*
+ * save IRQ state
+ */
+static inline long arch_local_save_flags(void)
+{
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " lr %0, [status32] \n"
+ : "=&r"(temp)
+ :
+ : "memory");
+
+ return temp;
+}
+
+/*
+ * Query IRQ state
+ */
+static inline int arch_irqs_disabled_flags(unsigned long flags)
+{
+ return !(flags & (STATUS_E1_MASK
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+ | STATUS_E2_MASK
+#endif
+ ));
+}
+
+static inline int arch_irqs_disabled(void)
+{
+ return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
+#else
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+
+.macro TRACE_ASM_IRQ_DISABLE
+ bl trace_hardirqs_off
+.endm
+
+.macro TRACE_ASM_IRQ_ENABLE
+ bl trace_hardirqs_on
+.endm
+
+#else
+
+.macro TRACE_ASM_IRQ_DISABLE
+.endm
+
+.macro TRACE_ASM_IRQ_ENABLE
+.endm
+
+#endif
+
+.macro IRQ_DISABLE scratch
+ lr \scratch, [status32]
+ bic \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
+ flag \scratch
+ TRACE_ASM_IRQ_DISABLE
+.endm
+
+.macro IRQ_ENABLE scratch
+ lr \scratch, [status32]
+ or \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
+ flag \scratch
+ TRACE_ASM_IRQ_ENABLE
+.endm
+
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/arch/arc/include/asm/kdebug.h b/arch/arc/include/asm/kdebug.h
new file mode 100644
index 000000000..3fbe6c472
--- /dev/null
+++ b/arch/arc/include/asm/kdebug.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_KDEBUG_H
+#define _ASM_ARC_KDEBUG_H
+
+enum die_val {
+ DIE_UNUSED,
+ DIE_TRAP,
+ DIE_IERR,
+ DIE_OOPS
+};
+
+#endif
diff --git a/arch/arc/include/asm/kgdb.h b/arch/arc/include/asm/kgdb.h
new file mode 100644
index 000000000..fea931634
--- /dev/null
+++ b/arch/arc/include/asm/kgdb.h
@@ -0,0 +1,63 @@
+/*
+ * kgdb support for ARC
+ *
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARC_KGDB_H__
+#define __ARC_KGDB_H__
+
+#ifdef CONFIG_KGDB
+
+#include <asm/ptrace.h>
+
+/* to ensure compatibility with Linux 2.6.35, we don't implement the get/set
+ * register API yet */
+#undef DBG_MAX_REG_NUM
+
+#define GDB_MAX_REGS 87
+
+#define BREAK_INSTR_SIZE 2
+#define CACHE_FLUSH_IS_SAFE 1
+#define NUMREGBYTES (GDB_MAX_REGS * 4)
+#define BUFMAX 2048
+
+static inline void arch_kgdb_breakpoint(void)
+{
+ __asm__ __volatile__ ("trap_s 0x4\n");
+}
+
+extern void kgdb_trap(struct pt_regs *regs);
+
+/* This is the numbering of registers according to the GDB. See GDB's
+ * arc-tdep.h for details.
+ *
+ * Registers are ordered for GDB 7.5. It is incompatible with GDB 6.8. */
+enum arc_linux_regnums {
+ _R0 = 0,
+ _R1, _R2, _R3, _R4, _R5, _R6, _R7, _R8, _R9, _R10, _R11, _R12, _R13,
+ _R14, _R15, _R16, _R17, _R18, _R19, _R20, _R21, _R22, _R23, _R24,
+ _R25, _R26,
+ _FP = 27,
+ __SP = 28,
+ _R30 = 30,
+ _BLINK = 31,
+ _LP_COUNT = 60,
+ _STOP_PC = 64,
+ _RET = 64,
+ _LP_START = 65,
+ _LP_END = 66,
+ _STATUS32 = 67,
+ _ECR = 76,
+ _BTA = 82,
+};
+
+#else
+#define kgdb_trap(regs)
+#endif
+
+#endif /* __ARC_KGDB_H__ */
diff --git a/arch/arc/include/asm/kprobes.h b/arch/arc/include/asm/kprobes.h
new file mode 100644
index 000000000..944dbedb3
--- /dev/null
+++ b/arch/arc/include/asm/kprobes.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ARC_KPROBES_H
+#define _ARC_KPROBES_H
+
+#ifdef CONFIG_KPROBES
+
+typedef u16 kprobe_opcode_t;
+
+#define UNIMP_S_INSTRUCTION 0x79e0
+#define TRAP_S_2_INSTRUCTION 0x785e
+
+#define MAX_INSN_SIZE 8
+#define MAX_STACK_SIZE 64
+
+struct arch_specific_insn {
+ int is_short;
+ kprobe_opcode_t *t1_addr, *t2_addr;
+ kprobe_opcode_t t1_opcode, t2_opcode;
+};
+
+#define flush_insn_slot(p) do { } while (0)
+
+#define kretprobe_blacklist_size 0
+
+struct kprobe;
+
+void arch_remove_kprobe(struct kprobe *p);
+
+int kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data);
+
+struct prev_kprobe {
+ struct kprobe *kp;
+ unsigned long status;
+};
+
+struct kprobe_ctlblk {
+ unsigned int kprobe_status;
+ struct pt_regs jprobe_saved_regs;
+ char jprobes_stack[MAX_STACK_SIZE];
+ struct prev_kprobe prev_kprobe;
+};
+
+int kprobe_fault_handler(struct pt_regs *regs, unsigned long cause);
+void kretprobe_trampoline(void);
+void trap_is_kprobe(unsigned long address, struct pt_regs *regs);
+#else
+static void trap_is_kprobe(unsigned long address, struct pt_regs *regs)
+{
+}
+#endif
+
+#endif
diff --git a/arch/arc/include/asm/linkage.h b/arch/arc/include/asm/linkage.h
new file mode 100644
index 000000000..5faad1711
--- /dev/null
+++ b/arch/arc/include/asm/linkage.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_LINKAGE_H
+#define __ASM_LINKAGE_H
+
+#ifdef __ASSEMBLY__
+
+#define ASM_NL ` /* use '`' to mark new line in macro */
+
+/* annotation for data we want in DCCM - if enabled in .config */
+.macro ARCFP_DATA nm
+#ifdef CONFIG_ARC_HAS_DCCM
+ .section .data.arcfp
+#else
+ .section .data
+#endif
+ .global \nm
+.endm
+
+/* annotation for data we want in DCCM - if enabled in .config */
+.macro ARCFP_CODE
+#ifdef CONFIG_ARC_HAS_ICCM
+ .section .text.arcfp, "ax",@progbits
+#else
+ .section .text, "ax",@progbits
+#endif
+.endm
+
+#else /* !__ASSEMBLY__ */
+
+#ifdef CONFIG_ARC_HAS_ICCM
+#define __arcfp_code __attribute__((__section__(".text.arcfp")))
+#else
+#define __arcfp_code __attribute__((__section__(".text")))
+#endif
+
+#ifdef CONFIG_ARC_HAS_DCCM
+#define __arcfp_data __attribute__((__section__(".data.arcfp")))
+#else
+#define __arcfp_data __attribute__((__section__(".data")))
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/arch/arc/include/asm/mach_desc.h b/arch/arc/include/asm/mach_desc.h
new file mode 100644
index 000000000..e8993a2be
--- /dev/null
+++ b/arch/arc/include/asm/mach_desc.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * based on METAG mach/arch.h (which in turn was based on ARM)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_MACH_DESC_H_
+#define _ASM_ARC_MACH_DESC_H_
+
+/**
+ * struct machine_desc - Board specific callbacks, called from ARC common code
+ * Provided by each ARC board using MACHINE_START()/MACHINE_END(), so
+ * a multi-platform kernel builds with array of such descriptors.
+ * We extend the early DT scan to also match the DT's "compatible" string
+ * against the @dt_compat of all such descriptors, and one with highest
+ * "DT score" is selected as global @machine_desc.
+ *
+ * @name: Board/SoC name
+ * @dt_compat: Array of device tree 'compatible' strings
+ * (XXX: although only 1st entry is looked at)
+ * @init_early: Very early callback [called from setup_arch()]
+ * @init_irq: setup external IRQ controllers [called from init_IRQ()]
+ * @init_smp: for each CPU (e.g. setup IPI)
+ * [(M):init_IRQ(), (o):start_kernel_secondary()]
+ * @init_time: platform specific clocksource/clockevent registration
+ * [called from time_init()]
+ * @init_machine: arch initcall level callback (e.g. populate static
+ * platform devices or parse Devicetree)
+ * @init_late: Late initcall level callback
+ *
+ */
+struct machine_desc {
+ const char *name;
+ const char **dt_compat;
+
+ void (*init_early)(void);
+ void (*init_irq)(void);
+#ifdef CONFIG_SMP
+ void (*init_smp)(unsigned int);
+#endif
+ void (*init_time)(void);
+ void (*init_machine)(void);
+ void (*init_late)(void);
+
+};
+
+/*
+ * Current machine - only accessible during boot.
+ */
+extern const struct machine_desc *machine_desc;
+
+/*
+ * Machine type table - also only accessible during boot
+ */
+extern const struct machine_desc __arch_info_begin[], __arch_info_end[];
+
+/*
+ * Set of macros to define architecture features.
+ * This is built into a table by the linker.
+ */
+#define MACHINE_START(_type, _name) \
+static const struct machine_desc __mach_desc_##_type \
+__used \
+__attribute__((__section__(".arch.info.init"))) = { \
+ .name = _name,
+
+#define MACHINE_END \
+};
+
+extern const struct machine_desc *setup_machine_fdt(void *dt);
+
+#endif
diff --git a/arch/arc/include/asm/mmu.h b/arch/arc/include/asm/mmu.h
new file mode 100644
index 000000000..8c84ae98c
--- /dev/null
+++ b/arch/arc/include/asm/mmu.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_MMU_H
+#define _ASM_ARC_MMU_H
+
+#if defined(CONFIG_ARC_MMU_V1)
+#define CONFIG_ARC_MMU_VER 1
+#elif defined(CONFIG_ARC_MMU_V2)
+#define CONFIG_ARC_MMU_VER 2
+#elif defined(CONFIG_ARC_MMU_V3)
+#define CONFIG_ARC_MMU_VER 3
+#endif
+
+/* MMU Management regs */
+#define ARC_REG_MMU_BCR 0x06f
+#define ARC_REG_TLBPD0 0x405
+#define ARC_REG_TLBPD1 0x406
+#define ARC_REG_TLBINDEX 0x407
+#define ARC_REG_TLBCOMMAND 0x408
+#define ARC_REG_PID 0x409
+#define ARC_REG_SCRATCH_DATA0 0x418
+
+/* Bits in MMU PID register */
+#define MMU_ENABLE (1 << 31) /* Enable MMU for process */
+
+/* Error code if probe fails */
+#define TLB_LKUP_ERR 0x80000000
+
+#define TLB_DUP_ERR (TLB_LKUP_ERR | 0x00000001)
+
+/* TLB Commands */
+#define TLBWrite 0x1
+#define TLBRead 0x2
+#define TLBGetIndex 0x3
+#define TLBProbe 0x4
+
+#if (CONFIG_ARC_MMU_VER >= 2)
+#define TLBWriteNI 0x5 /* write JTLB without inv uTLBs */
+#define TLBIVUTLB 0x6 /* explicitly inv uTLBs */
+#endif
+
+#ifndef __ASSEMBLY__
+
+typedef struct {
+ unsigned long asid[NR_CPUS]; /* 8 bit MMU PID + Generation cycle */
+} mm_context_t;
+
+#ifdef CONFIG_ARC_DBG_TLB_PARANOIA
+void tlb_paranoid_check(unsigned int mm_asid, unsigned long address);
+#else
+#define tlb_paranoid_check(a, b)
+#endif
+
+void arc_mmu_init(void);
+extern char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len);
+void read_decode_mmu_bcr(void);
+
+#endif /* !__ASSEMBLY__ */
+
+#endif
diff --git a/arch/arc/include/asm/mmu_context.h b/arch/arc/include/asm/mmu_context.h
new file mode 100644
index 000000000..1fd467ef6
--- /dev/null
+++ b/arch/arc/include/asm/mmu_context.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: May 2011
+ * -Refactored get_new_mmu_context( ) to only handle live-mm.
+ * retiring-mm handled in other hooks
+ *
+ * Vineetg: March 25th, 2008: Bug #92690
+ * -Major rewrite of Core ASID allocation routine get_new_mmu_context
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_MMU_CONTEXT_H
+#define _ASM_ARC_MMU_CONTEXT_H
+
+#include <asm/arcregs.h>
+#include <asm/tlb.h>
+
+#include <asm-generic/mm_hooks.h>
+
+/* ARC700 ASID Management
+ *
+ * ARC MMU provides 8-bit ASID (0..255) to TAG TLB entries, allowing entries
+ * with same vaddr (different tasks) to co-exit. This provides for
+ * "Fast Context Switch" i.e. no TLB flush on ctxt-switch
+ *
+ * Linux assigns each task a unique ASID. A simple round-robin allocation
+ * of H/w ASID is done using software tracker @asid_cpu.
+ * When it reaches max 255, the allocation cycle starts afresh by flushing
+ * the entire TLB and wrapping ASID back to zero.
+ *
+ * A new allocation cycle, post rollover, could potentially reassign an ASID
+ * to a different task. Thus the rule is to refresh the ASID in a new cycle.
+ * The 32 bit @asid_cpu (and mm->asid) have 8 bits MMU PID and rest 24 bits
+ * serve as cycle/generation indicator and natural 32 bit unsigned math
+ * automagically increments the generation when lower 8 bits rollover.
+ */
+
+#define MM_CTXT_ASID_MASK 0x000000ff /* MMU PID reg :8 bit PID */
+#define MM_CTXT_CYCLE_MASK (~MM_CTXT_ASID_MASK)
+
+#define MM_CTXT_FIRST_CYCLE (MM_CTXT_ASID_MASK + 1)
+#define MM_CTXT_NO_ASID 0UL
+
+#define asid_mm(mm, cpu) mm->context.asid[cpu]
+#define hw_pid(mm, cpu) (asid_mm(mm, cpu) & MM_CTXT_ASID_MASK)
+
+DECLARE_PER_CPU(unsigned int, asid_cache);
+#define asid_cpu(cpu) per_cpu(asid_cache, cpu)
+
+/*
+ * Get a new ASID if task doesn't have a valid one (unalloc or from prev cycle)
+ * Also set the MMU PID register to existing/updated ASID
+ */
+static inline void get_new_mmu_context(struct mm_struct *mm)
+{
+ const unsigned int cpu = smp_processor_id();
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ /*
+ * Move to new ASID if it was not from current alloc-cycle/generation.
+ * This is done by ensuring that the generation bits in both mm->ASID
+ * and cpu's ASID counter are exactly same.
+ *
+ * Note: Callers needing new ASID unconditionally, independent of
+ * generation, e.g. local_flush_tlb_mm() for forking parent,
+ * first need to destroy the context, setting it to invalid
+ * value.
+ */
+ if (!((asid_mm(mm, cpu) ^ asid_cpu(cpu)) & MM_CTXT_CYCLE_MASK))
+ goto set_hw;
+
+ /* move to new ASID and handle rollover */
+ if (unlikely(!(++asid_cpu(cpu) & MM_CTXT_ASID_MASK))) {
+
+ local_flush_tlb_all();
+
+ /*
+ * Above checke for rollover of 8 bit ASID in 32 bit container.
+ * If the container itself wrapped around, set it to a non zero
+ * "generation" to distinguish from no context
+ */
+ if (!asid_cpu(cpu))
+ asid_cpu(cpu) = MM_CTXT_FIRST_CYCLE;
+ }
+
+ /* Assign new ASID to tsk */
+ asid_mm(mm, cpu) = asid_cpu(cpu);
+
+set_hw:
+ write_aux_reg(ARC_REG_PID, hw_pid(mm, cpu) | MMU_ENABLE);
+
+ local_irq_restore(flags);
+}
+
+/*
+ * Initialize the context related info for a new mm_struct
+ * instance.
+ */
+static inline int
+init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+{
+ int i;
+
+ for_each_possible_cpu(i)
+ asid_mm(mm, i) = MM_CTXT_NO_ASID;
+
+ return 0;
+}
+
+static inline void destroy_context(struct mm_struct *mm)
+{
+ unsigned long flags;
+
+ /* Needed to elide CONFIG_DEBUG_PREEMPT warning */
+ local_irq_save(flags);
+ asid_mm(mm, smp_processor_id()) = MM_CTXT_NO_ASID;
+ local_irq_restore(flags);
+}
+
+/* Prepare the MMU for task: setup PID reg with allocated ASID
+ If task doesn't have an ASID (never alloc or stolen, get a new ASID)
+*/
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ const int cpu = smp_processor_id();
+
+ /*
+ * Note that the mm_cpumask is "aggregating" only, we don't clear it
+ * for the switched-out task, unlike some other arches.
+ * It is used to enlist cpus for sending TLB flush IPIs and not sending
+ * it to CPUs where a task once ran-on, could cause stale TLB entry
+ * re-use, specially for a multi-threaded task.
+ * e.g. T1 runs on C1, migrates to C3. T2 running on C2 munmaps.
+ * For a non-aggregating mm_cpumask, IPI not sent C1, and if T1
+ * were to re-migrate to C1, it could access the unmapped region
+ * via any existing stale TLB entries.
+ */
+ cpumask_set_cpu(cpu, mm_cpumask(next));
+
+#ifndef CONFIG_SMP
+ /* PGD cached in MMU reg to avoid 3 mem lookups: task->mm->pgd */
+ write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd);
+#endif
+
+ get_new_mmu_context(next);
+}
+
+/*
+ * Called at the time of execve() to get a new ASID
+ * Note the subtlety here: get_new_mmu_context() behaves differently here
+ * vs. in switch_mm(). Here it always returns a new ASID, because mm has
+ * an unallocated "initial" value, while in latter, it moves to a new ASID,
+ * only if it was unallocated
+ */
+#define activate_mm(prev, next) switch_mm(prev, next, NULL)
+
+/* it seemed that deactivate_mm( ) is a reasonable place to do book-keeping
+ * for retiring-mm. However destroy_context( ) still needs to do that because
+ * between mm_release( ) = >deactive_mm( ) and
+ * mmput => .. => __mmdrop( ) => destroy_context( )
+ * there is a good chance that task gets sched-out/in, making it's ASID valid
+ * again (this teased me for a whole day).
+ */
+#define deactivate_mm(tsk, mm) do { } while (0)
+
+#define enter_lazy_tlb(mm, tsk)
+
+#endif /* __ASM_ARC_MMU_CONTEXT_H */
diff --git a/arch/arc/include/asm/module.h b/arch/arc/include/asm/module.h
new file mode 100644
index 000000000..518222bb3
--- /dev/null
+++ b/arch/arc/include/asm/module.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+
+ */
+
+#ifndef _ASM_ARC_MODULE_H
+#define _ASM_ARC_MODULE_H
+
+#include <asm-generic/module.h>
+
+#ifdef CONFIG_ARC_DW2_UNWIND
+struct mod_arch_specific {
+ void *unw_info;
+ int unw_sec_idx;
+};
+#endif
+
+#define MODULE_PROC_FAMILY "ARC700"
+
+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
+
+#endif /* _ASM_ARC_MODULE_H */
diff --git a/arch/arc/include/asm/mutex.h b/arch/arc/include/asm/mutex.h
new file mode 100644
index 000000000..a2f88ff9f
--- /dev/null
+++ b/arch/arc/include/asm/mutex.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * xchg() based mutex fast path maintains a state of 0 or 1, as opposed to
+ * atomic dec based which can "count" any number of lock contenders.
+ * This ideally needs to be fixed in core, but for now switching to dec ver.
+ */
+#if defined(CONFIG_SMP) && (CONFIG_NR_CPUS > 2)
+#include <asm-generic/mutex-dec.h>
+#else
+#include <asm-generic/mutex-xchg.h>
+#endif
diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
new file mode 100644
index 000000000..9c8aa41e4
--- /dev/null
+++ b/arch/arc/include/asm/page.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_ARC_PAGE_H
+#define __ASM_ARC_PAGE_H
+
+#include <uapi/asm/page.h>
+
+
+#ifndef __ASSEMBLY__
+
+#define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
+#define free_user_page(page, addr) free_page(addr)
+
+#define clear_page(paddr) memset((paddr), 0, PAGE_SIZE)
+#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
+
+struct vm_area_struct;
+struct page;
+
+#define __HAVE_ARCH_COPY_USER_HIGHPAGE
+
+void copy_user_highpage(struct page *to, struct page *from,
+ unsigned long u_vaddr, struct vm_area_struct *vma);
+void clear_user_page(void *to, unsigned long u_vaddr, struct page *page);
+
+#undef STRICT_MM_TYPECHECKS
+
+#ifdef STRICT_MM_TYPECHECKS
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct {
+ unsigned long pte;
+} pte_t;
+typedef struct {
+ unsigned long pgd;
+} pgd_t;
+typedef struct {
+ unsigned long pgprot;
+} pgprot_t;
+typedef unsigned long pgtable_t;
+
+#define pte_val(x) ((x).pte)
+#define pgd_val(x) ((x).pgd)
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) })
+#define __pgd(x) ((pgd_t) { (x) })
+#define __pgprot(x) ((pgprot_t) { (x) })
+
+#define pte_pgprot(x) __pgprot(pte_val(x))
+
+#else /* !STRICT_MM_TYPECHECKS */
+
+typedef unsigned long pte_t;
+typedef unsigned long pgd_t;
+typedef unsigned long pgprot_t;
+typedef unsigned long pgtable_t;
+
+#define pte_val(x) (x)
+#define pgd_val(x) (x)
+#define pgprot_val(x) (x)
+#define __pte(x) (x)
+#define __pgprot(x) (x)
+#define pte_pgprot(x) (x)
+
+#endif
+
+#define ARCH_PFN_OFFSET (CONFIG_LINUX_LINK_BASE >> PAGE_SHIFT)
+
+#define pfn_valid(pfn) (((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
+
+/*
+ * __pa, __va, virt_to_page (ALERT: deprecated, don't use them)
+ *
+ * These macros have historically been misnamed
+ * virt here means link-address/program-address as embedded in object code.
+ * So if kernel img is linked at 0x8000_0000 onwards, 0x8010_0000 will be
+ * 128th page, and virt_to_page( ) will return the struct page corresp to it.
+ * mem_map[ ] is an array of struct page for each page frame in the system
+ *
+ * Independent of where linux is linked at, link-addr = physical address
+ * So the old macro __pa = vaddr + PAGE_OFFSET - CONFIG_LINUX_LINK_BASE
+ * would have been wrong in case kernel is not at 0x8zs
+ */
+#define __pa(vaddr) ((unsigned long)vaddr)
+#define __va(paddr) ((void *)((unsigned long)(paddr)))
+
+#define virt_to_page(kaddr) \
+ (mem_map + ((__pa(kaddr) - CONFIG_LINUX_LINK_BASE) >> PAGE_SHIFT))
+
+#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+
+/* Default Permissions for stack/heaps pages (Non Executable) */
+#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE)
+
+#define WANT_PAGE_VIRTUAL 1
+
+#include <asm-generic/memory_model.h> /* page_to_pfn, pfn_to_page */
+#include <asm-generic/getorder.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif
diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h
new file mode 100644
index 000000000..2b8880e95
--- /dev/null
+++ b/arch/arc/include/asm/perf_event.h
@@ -0,0 +1,217 @@
+/*
+ * Linux performance counter support for ARC
+ *
+ * Copyright (C) 2011-2013 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ASM_PERF_EVENT_H
+#define __ASM_PERF_EVENT_H
+
+/* real maximum varies per CPU, this is the maximum supported by the driver */
+#define ARC_PMU_MAX_HWEVENTS 64
+
+#define ARC_REG_CC_BUILD 0xF6
+#define ARC_REG_CC_INDEX 0x240
+#define ARC_REG_CC_NAME0 0x241
+#define ARC_REG_CC_NAME1 0x242
+
+#define ARC_REG_PCT_BUILD 0xF5
+#define ARC_REG_PCT_COUNTL 0x250
+#define ARC_REG_PCT_COUNTH 0x251
+#define ARC_REG_PCT_SNAPL 0x252
+#define ARC_REG_PCT_SNAPH 0x253
+#define ARC_REG_PCT_CONFIG 0x254
+#define ARC_REG_PCT_CONTROL 0x255
+#define ARC_REG_PCT_INDEX 0x256
+
+#define ARC_REG_PCT_CONTROL_CC (1 << 16) /* clear counts */
+#define ARC_REG_PCT_CONTROL_SN (1 << 17) /* snapshot */
+
+struct arc_reg_pct_build {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int m:8, c:8, r:6, s:2, v:8;
+#else
+ unsigned int v:8, s:2, r:6, c:8, m:8;
+#endif
+};
+
+struct arc_reg_cc_build {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int c:16, r:8, v:8;
+#else
+ unsigned int v:8, r:8, c:16;
+#endif
+};
+
+#define PERF_COUNT_ARC_DCLM (PERF_COUNT_HW_MAX + 0)
+#define PERF_COUNT_ARC_DCSM (PERF_COUNT_HW_MAX + 1)
+#define PERF_COUNT_ARC_ICM (PERF_COUNT_HW_MAX + 2)
+#define PERF_COUNT_ARC_BPOK (PERF_COUNT_HW_MAX + 3)
+#define PERF_COUNT_ARC_EDTLB (PERF_COUNT_HW_MAX + 4)
+#define PERF_COUNT_ARC_EITLB (PERF_COUNT_HW_MAX + 5)
+#define PERF_COUNT_ARC_LDC (PERF_COUNT_HW_MAX + 6)
+#define PERF_COUNT_ARC_STC (PERF_COUNT_HW_MAX + 7)
+
+#define PERF_COUNT_ARC_HW_MAX (PERF_COUNT_HW_MAX + 8)
+
+/*
+ * Some ARC pct quirks:
+ *
+ * PERF_COUNT_HW_STALLED_CYCLES_BACKEND
+ * PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
+ * The ARC 700 can either measure stalls per pipeline stage, or all stalls
+ * combined; for now we assign all stalls to STALLED_CYCLES_BACKEND
+ * and all pipeline flushes (e.g. caused by mispredicts, etc.) to
+ * STALLED_CYCLES_FRONTEND.
+ *
+ * We could start multiple performance counters and combine everything
+ * afterwards, but that makes it complicated.
+ *
+ * Note that I$ cache misses aren't counted by either of the two!
+ */
+
+/*
+ * ARC PCT has hardware conditions with fixed "names" but variable "indexes"
+ * (based on a specific RTL build)
+ * Below is the static map between perf generic/arc specific event_id and
+ * h/w condition names.
+ * At the time of probe, we loop thru each index and find it's name to
+ * complete the mapping of perf event_id to h/w index as latter is needed
+ * to program the counter really
+ */
+static const char * const arc_pmu_ev_hw_map[] = {
+ /* count cycles */
+ [PERF_COUNT_HW_CPU_CYCLES] = "crun",
+ [PERF_COUNT_HW_REF_CPU_CYCLES] = "crun",
+ [PERF_COUNT_HW_BUS_CYCLES] = "crun",
+
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = "bflush",
+ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = "bstall",
+
+ /* counts condition */
+ [PERF_COUNT_HW_INSTRUCTIONS] = "iall",
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp",
+ [PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */
+ [PERF_COUNT_HW_BRANCH_MISSES] = "bpfail", /* NP-T, PT-NT, PNT-T */
+
+ [PERF_COUNT_ARC_LDC] = "imemrdc", /* Instr: mem read cached */
+ [PERF_COUNT_ARC_STC] = "imemwrc", /* Instr: mem write cached */
+
+ [PERF_COUNT_ARC_DCLM] = "dclm", /* D-cache Load Miss */
+ [PERF_COUNT_ARC_DCSM] = "dcsm", /* D-cache Store Miss */
+ [PERF_COUNT_ARC_ICM] = "icm", /* I-cache Miss */
+ [PERF_COUNT_ARC_EDTLB] = "edtlb", /* D-TLB Miss */
+ [PERF_COUNT_ARC_EITLB] = "eitlb", /* I-TLB Miss */
+};
+
+#define C(_x) PERF_COUNT_HW_CACHE_##_x
+#define CACHE_OP_UNSUPPORTED 0xffff
+
+static const unsigned arc_pmu_cache_map[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
+ [C(L1D)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = PERF_COUNT_ARC_LDC,
+ [C(RESULT_MISS)] = PERF_COUNT_ARC_DCLM,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = PERF_COUNT_ARC_STC,
+ [C(RESULT_MISS)] = PERF_COUNT_ARC_DCSM,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = PERF_COUNT_HW_INSTRUCTIONS,
+ [C(RESULT_MISS)] = PERF_COUNT_ARC_ICM,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(DTLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = PERF_COUNT_ARC_LDC,
+ [C(RESULT_MISS)] = PERF_COUNT_ARC_EDTLB,
+ },
+ /* DTLB LD/ST Miss not segregated by h/w*/
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = PERF_COUNT_ARC_EITLB,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(BPU)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = PERF_COUNT_HW_BRANCH_INSTRUCTIONS,
+ [C(RESULT_MISS)] = PERF_COUNT_HW_BRANCH_MISSES,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(NODE)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+};
+
+#endif /* __ASM_PERF_EVENT_H */
diff --git a/arch/arc/include/asm/pgalloc.h b/arch/arc/include/asm/pgalloc.h
new file mode 100644
index 000000000..81208bfd9
--- /dev/null
+++ b/arch/arc/include/asm/pgalloc.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: June 2011
+ * -"/proc/meminfo | grep PageTables" kept on increasing
+ * Recently added pgtable dtor was not getting called.
+ *
+ * vineetg: May 2011
+ * -Variable pg-sz means that Page Tables could be variable sized themselves
+ * So calculate it based on addr traversal split [pgd-bits:pte-bits:xxx]
+ * -Page Table size capped to max 1 to save memory - hence verified.
+ * -Since these deal with constants, gcc compile-time optimizes them.
+ *
+ * vineetg: Nov 2010
+ * -Added pgtable ctor/dtor used for pgtable mem accounting
+ *
+ * vineetg: April 2010
+ * -Switched pgtable_t from being struct page * to unsigned long
+ * =Needed so that Page Table allocator (pte_alloc_one) is not forced to
+ * to deal with struct page. Thay way in future we can make it allocate
+ * multiple PG Tbls in one Page Frame
+ * =sweet side effect is avoiding calls to ugly page_address( ) from the
+ * pg-tlb allocator sub-sys (pte_alloc_one, ptr_free, pmd_populate
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_PGALLOC_H
+#define _ASM_ARC_PGALLOC_H
+
+#include <linux/mm.h>
+#include <linux/log2.h>
+
+static inline void
+pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
+{
+ pmd_set(pmd, pte);
+}
+
+static inline void
+pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t ptep)
+{
+ pmd_set(pmd, (pte_t *) ptep);
+}
+
+static inline int __get_order_pgd(void)
+{
+ return get_order(PTRS_PER_PGD * 4);
+}
+
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ int num, num2;
+ pgd_t *ret = (pgd_t *) __get_free_pages(GFP_KERNEL, __get_order_pgd());
+
+ if (ret) {
+ num = USER_PTRS_PER_PGD + USER_KERNEL_GUTTER / PGDIR_SIZE;
+ memzero(ret, num * sizeof(pgd_t));
+
+ num2 = VMALLOC_SIZE / PGDIR_SIZE;
+ memcpy(ret + num, swapper_pg_dir + num, num2 * sizeof(pgd_t));
+
+ memzero(ret + num + num2,
+ (PTRS_PER_PGD - num - num2) * sizeof(pgd_t));
+
+ }
+ return ret;
+}
+
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+ free_pages((unsigned long)pgd, __get_order_pgd());
+}
+
+
+/*
+ * With software-only page-tables, addr-split for traversal is tweakable and
+ * that directly governs how big tables would be at each level.
+ * Further, the MMU page size is configurable.
+ * Thus we need to programatically assert the size constraint
+ * All of this is const math, allowing gcc to do constant folding/propagation.
+ */
+
+static inline int __get_order_pte(void)
+{
+ return get_order(PTRS_PER_PTE * 4);
+}
+
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+ unsigned long address)
+{
+ pte_t *pte;
+
+ pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO,
+ __get_order_pte());
+
+ return pte;
+}
+
+static inline pgtable_t
+pte_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+ pgtable_t pte_pg;
+ struct page *page;
+
+ pte_pg = __get_free_pages(GFP_KERNEL | __GFP_REPEAT, __get_order_pte());
+ if (!pte_pg)
+ return 0;
+ memzero((void *)pte_pg, PTRS_PER_PTE * 4);
+ page = virt_to_page(pte_pg);
+ if (!pgtable_page_ctor(page)) {
+ __free_page(page);
+ return 0;
+ }
+
+ return pte_pg;
+}
+
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+ free_pages((unsigned long)pte, __get_order_pte()); /* takes phy addr */
+}
+
+static inline void pte_free(struct mm_struct *mm, pgtable_t ptep)
+{
+ pgtable_page_dtor(virt_to_page(ptep));
+ free_pages(ptep, __get_order_pte());
+}
+
+#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
+
+#define check_pgt_cache() do { } while (0)
+#define pmd_pgtable(pmd) pmd_page_vaddr(pmd)
+
+#endif /* _ASM_ARC_PGALLOC_H */
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
new file mode 100644
index 000000000..9615fe170
--- /dev/null
+++ b/arch/arc/include/asm/pgtable.h
@@ -0,0 +1,388 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: May 2011
+ * -Folded PAGE_PRESENT (used by VM) and PAGE_VALID (used by MMU) into 1.
+ * They are semantically the same although in different contexts
+ * VALID marks a TLB entry exists and it will only happen if PRESENT
+ * - Utilise some unused free bits to confine PTE flags to 12 bits
+ * This is a must for 4k pg-sz
+ *
+ * vineetg: Mar 2011 - changes to accomodate MMU TLB Page Descriptor mods
+ * -TLB Locking never really existed, except for initial specs
+ * -SILENT_xxx not needed for our port
+ * -Per my request, MMU V3 changes the layout of some of the bits
+ * to avoid a few shifts in TLB Miss handlers.
+ *
+ * vineetg: April 2010
+ * -PGD entry no longer contains any flags. If empty it is 0, otherwise has
+ * Pg-Tbl ptr. Thus pmd_present(), pmd_valid(), pmd_set( ) become simpler
+ *
+ * vineetg: April 2010
+ * -Switched form 8:11:13 split for page table lookup to 11:8:13
+ * -this speeds up page table allocation itself as we now have to memset 1K
+ * instead of 8k per page table.
+ * -TODO: Right now page table alloc is 8K and rest 7K is unused
+ * need to optimise it
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_PGTABLE_H
+#define _ASM_ARC_PGTABLE_H
+
+#include <asm/page.h>
+#include <asm/mmu.h>
+#include <asm-generic/pgtable-nopmd.h>
+
+/**************************************************************************
+ * Page Table Flags
+ *
+ * ARC700 MMU only deals with softare managed TLB entries.
+ * Page Tables are purely for Linux VM's consumption and the bits below are
+ * suited to that (uniqueness). Hence some are not implemented in the TLB and
+ * some have different value in TLB.
+ * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible becoz they live in
+ * seperate PD0 and PD1, which combined forms a translation entry)
+ * while for PTE perspective, they are 8 and 9 respectively
+ * with MMU v3: Most bits (except SHARED) represent the exact hardware pos
+ * (saves some bit shift ops in TLB Miss hdlrs)
+ */
+
+#if (CONFIG_ARC_MMU_VER <= 2)
+
+#define _PAGE_ACCESSED (1<<1) /* Page is accessed (S) */
+#define _PAGE_CACHEABLE (1<<2) /* Page is cached (H) */
+#define _PAGE_EXECUTE (1<<3) /* Page has user execute perm (H) */
+#define _PAGE_WRITE (1<<4) /* Page has user write perm (H) */
+#define _PAGE_READ (1<<5) /* Page has user read perm (H) */
+#define _PAGE_MODIFIED (1<<6) /* Page modified (dirty) (S) */
+#define _PAGE_GLOBAL (1<<8) /* Page is global (H) */
+#define _PAGE_PRESENT (1<<10) /* TLB entry is valid (H) */
+
+#else /* MMU v3 onwards */
+
+#define _PAGE_CACHEABLE (1<<0) /* Page is cached (H) */
+#define _PAGE_EXECUTE (1<<1) /* Page has user execute perm (H) */
+#define _PAGE_WRITE (1<<2) /* Page has user write perm (H) */
+#define _PAGE_READ (1<<3) /* Page has user read perm (H) */
+#define _PAGE_ACCESSED (1<<4) /* Page is accessed (S) */
+#define _PAGE_MODIFIED (1<<5) /* Page modified (dirty) (S) */
+#define _PAGE_GLOBAL (1<<8) /* Page is global (H) */
+#define _PAGE_PRESENT (1<<9) /* TLB entry is valid (H) */
+#define _PAGE_SHARED_CODE (1<<11) /* Shared Code page with cmn vaddr
+ usable for shared TLB entries (H) */
+#endif
+
+/* vmalloc permissions */
+#define _K_PAGE_PERMS (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ | \
+ _PAGE_GLOBAL | _PAGE_PRESENT)
+
+#ifdef CONFIG_ARC_CACHE_PAGES
+#define _PAGE_DEF_CACHEABLE _PAGE_CACHEABLE
+#else
+#define _PAGE_DEF_CACHEABLE (0)
+#endif
+
+/* Helper for every "user" page
+ * -kernel can R/W/X
+ * -by default cached, unless config otherwise
+ * -present in memory
+ */
+#define ___DEF (_PAGE_PRESENT | _PAGE_DEF_CACHEABLE)
+
+/* Set of bits not changed in pte_modify */
+#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED)
+
+/* More Abbrevaited helpers */
+#define PAGE_U_NONE __pgprot(___DEF)
+#define PAGE_U_R __pgprot(___DEF | _PAGE_READ)
+#define PAGE_U_W_R __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE)
+#define PAGE_U_X_R __pgprot(___DEF | _PAGE_READ | _PAGE_EXECUTE)
+#define PAGE_U_X_W_R __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE | \
+ _PAGE_EXECUTE)
+
+#define PAGE_SHARED PAGE_U_W_R
+
+/* While kernel runs out of unstranslated space, vmalloc/modules use a chunk of
+ * user vaddr space - visible in all addr spaces, but kernel mode only
+ * Thus Global, all-kernel-access, no-user-access, cached
+ */
+#define PAGE_KERNEL __pgprot(_K_PAGE_PERMS | _PAGE_DEF_CACHEABLE)
+
+/* ioremap */
+#define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS)
+
+/* Masks for actual TLB "PD"s */
+#define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT)
+#define PTE_BITS_RWX (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ)
+#define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE)
+
+/**************************************************************************
+ * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
+ *
+ * Certain cases have 1:1 mapping
+ * e.g. __P101 means VM_READ, VM_EXEC and !VM_SHARED
+ * which directly corresponds to PAGE_U_X_R
+ *
+ * Other rules which cause the divergence from 1:1 mapping
+ *
+ * 1. Although ARC700 can do exclusive execute/write protection (meaning R
+ * can be tracked independet of X/W unlike some other CPUs), still to
+ * keep things consistent with other archs:
+ * -Write implies Read: W => R
+ * -Execute implies Read: X => R
+ *
+ * 2. Pvt Writable doesn't have Write Enabled initially: Pvt-W => !W
+ * This is to enable COW mechanism
+ */
+ /* xwr */
+#define __P000 PAGE_U_NONE
+#define __P001 PAGE_U_R
+#define __P010 PAGE_U_R /* Pvt-W => !W */
+#define __P011 PAGE_U_R /* Pvt-W => !W */
+#define __P100 PAGE_U_X_R /* X => R */
+#define __P101 PAGE_U_X_R
+#define __P110 PAGE_U_X_R /* Pvt-W => !W and X => R */
+#define __P111 PAGE_U_X_R /* Pvt-W => !W */
+
+#define __S000 PAGE_U_NONE
+#define __S001 PAGE_U_R
+#define __S010 PAGE_U_W_R /* W => R */
+#define __S011 PAGE_U_W_R
+#define __S100 PAGE_U_X_R /* X => R */
+#define __S101 PAGE_U_X_R
+#define __S110 PAGE_U_X_W_R /* X => R */
+#define __S111 PAGE_U_X_W_R
+
+/****************************************************************
+ * Page Table Lookup split
+ *
+ * We implement 2 tier paging and since this is all software, we are free
+ * to customize the span of a PGD / PTE entry to suit us
+ *
+ * 32 bit virtual address
+ * -------------------------------------------------------
+ * | BITS_FOR_PGD | BITS_FOR_PTE | BITS_IN_PAGE |
+ * -------------------------------------------------------
+ * | | |
+ * | | --> off in page frame
+ * | |
+ * | ---> index into Page Table
+ * |
+ * ----> index into Page Directory
+ */
+
+#define BITS_IN_PAGE PAGE_SHIFT
+
+/* Optimal Sizing of Pg Tbl - based on MMU page size */
+#if defined(CONFIG_ARC_PAGE_SIZE_8K)
+#define BITS_FOR_PTE 8
+#elif defined(CONFIG_ARC_PAGE_SIZE_16K)
+#define BITS_FOR_PTE 8
+#elif defined(CONFIG_ARC_PAGE_SIZE_4K)
+#define BITS_FOR_PTE 9
+#endif
+
+#define BITS_FOR_PGD (32 - BITS_FOR_PTE - BITS_IN_PAGE)
+
+#define PGDIR_SHIFT (BITS_FOR_PTE + BITS_IN_PAGE)
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT) /* vaddr span, not PDG sz */
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+#ifdef __ASSEMBLY__
+#define PTRS_PER_PTE (1 << BITS_FOR_PTE)
+#define PTRS_PER_PGD (1 << BITS_FOR_PGD)
+#else
+#define PTRS_PER_PTE (1UL << BITS_FOR_PTE)
+#define PTRS_PER_PGD (1UL << BITS_FOR_PGD)
+#endif
+/*
+ * Number of entries a user land program use.
+ * TASK_SIZE is the maximum vaddr that can be used by a userland program.
+ */
+#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
+
+/*
+ * No special requirements for lowest virtual address we permit any user space
+ * mapping to be mapped at.
+ */
+#define FIRST_USER_ADDRESS 0UL
+
+
+/****************************************************************
+ * Bucket load of VM Helpers
+ */
+
+#ifndef __ASSEMBLY__
+
+#define pte_ERROR(e) \
+ pr_crit("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
+#define pgd_ERROR(e) \
+ pr_crit("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+/* the zero page used for uninitialized and anonymous pages */
+extern char empty_zero_page[PAGE_SIZE];
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+
+#define pte_unmap(pte) do { } while (0)
+#define pte_unmap_nested(pte) do { } while (0)
+
+#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
+#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
+
+/* find the page descriptor of the Page Tbl ref by PMD entry */
+#define pmd_page(pmd) virt_to_page(pmd_val(pmd) & PAGE_MASK)
+
+/* find the logical addr (phy for ARC) of the Page Tbl ref by PMD entry */
+#define pmd_page_vaddr(pmd) (pmd_val(pmd) & PAGE_MASK)
+
+/* In a 2 level sys, setup the PGD entry with PTE value */
+static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
+{
+ pmd_val(*pmdp) = (unsigned long)ptep;
+}
+
+#define pte_none(x) (!pte_val(x))
+#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
+#define pte_clear(mm, addr, ptep) set_pte_at(mm, addr, ptep, __pte(0))
+
+#define pmd_none(x) (!pmd_val(x))
+#define pmd_bad(x) ((pmd_val(x) & ~PAGE_MASK))
+#define pmd_present(x) (pmd_val(x))
+#define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0)
+
+#define pte_page(x) (mem_map + \
+ (unsigned long)(((pte_val(x) - CONFIG_LINUX_LINK_BASE) >> \
+ PAGE_SHIFT)))
+
+#define mk_pte(page, pgprot) \
+({ \
+ pte_t pte; \
+ pte_val(pte) = __pa(page_address(page)) + pgprot_val(pgprot); \
+ pte; \
+})
+
+#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
+#define pfn_pte(pfn, prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+
+/*
+ * pte_offset gets a @ptr to PMD entry (PGD in our 2-tier paging system)
+ * and returns ptr to PTE entry corresponding to @addr
+ */
+#define pte_offset(dir, addr) ((pte_t *)(pmd_page_vaddr(*dir)) +\
+ __pte_index(addr))
+
+/* No mapping of Page Tables in high mem etc, so following same as above */
+#define pte_offset_kernel(dir, addr) pte_offset(dir, addr)
+#define pte_offset_map(dir, addr) pte_offset(dir, addr)
+
+/* Zoo of pte_xxx function */
+#define pte_read(pte) (pte_val(pte) & _PAGE_READ)
+#define pte_write(pte) (pte_val(pte) & _PAGE_WRITE)
+#define pte_dirty(pte) (pte_val(pte) & _PAGE_MODIFIED)
+#define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED)
+#define pte_special(pte) (0)
+
+#define PTE_BIT_FUNC(fn, op) \
+ static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
+
+PTE_BIT_FUNC(wrprotect, &= ~(_PAGE_WRITE));
+PTE_BIT_FUNC(mkwrite, |= (_PAGE_WRITE));
+PTE_BIT_FUNC(mkclean, &= ~(_PAGE_MODIFIED));
+PTE_BIT_FUNC(mkdirty, |= (_PAGE_MODIFIED));
+PTE_BIT_FUNC(mkold, &= ~(_PAGE_ACCESSED));
+PTE_BIT_FUNC(mkyoung, |= (_PAGE_ACCESSED));
+PTE_BIT_FUNC(exprotect, &= ~(_PAGE_EXECUTE));
+PTE_BIT_FUNC(mkexec, |= (_PAGE_EXECUTE));
+
+static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
+}
+
+/* Macro to mark a page protection as uncacheable */
+#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE))
+
+static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pteval)
+{
+ set_pte(ptep, pteval);
+}
+
+/*
+ * All kernel related VM pages are in init's mm.
+ */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+#define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
+#define pgd_offset(mm, addr) (((mm)->pgd)+pgd_index(addr))
+
+/*
+ * Macro to quickly access the PGD entry, utlising the fact that some
+ * arch may cache the pointer to Page Directory of "current" task
+ * in a MMU register
+ *
+ * Thus task->mm->pgd (3 pointer dereferences, cache misses etc simply
+ * becomes read a register
+ *
+ * ********CAUTION*******:
+ * Kernel code might be dealing with some mm_struct of NON "current"
+ * Thus use this macro only when you are certain that "current" is current
+ * e.g. when dealing with signal frame setup code etc
+ */
+#ifndef CONFIG_SMP
+#define pgd_offset_fast(mm, addr) \
+({ \
+ pgd_t *pgd_base = (pgd_t *) read_aux_reg(ARC_REG_SCRATCH_DATA0); \
+ pgd_base + pgd_index(addr); \
+})
+#else
+#define pgd_offset_fast(mm, addr) pgd_offset(mm, addr)
+#endif
+
+extern void paging_init(void);
+extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE);
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
+ pte_t *ptep);
+
+/* Encode swap {type,off} tuple into PTE
+ * We reserve 13 bits for 5-bit @type, keeping bits 12-5 zero, ensuring that
+ * PAGE_PRESENT is zero in a PTE holding swap "identifier"
+ */
+#define __swp_entry(type, off) ((swp_entry_t) { \
+ ((type) & 0x1f) | ((off) << 13) })
+
+/* Decode a PTE containing swap "identifier "into constituents */
+#define __swp_type(pte_lookalike) (((pte_lookalike).val) & 0x1f)
+#define __swp_offset(pte_lookalike) ((pte_lookalike).val << 13)
+
+/* NOPs, to keep generic kernel happy */
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+#define kern_addr_valid(addr) (1)
+
+/*
+ * remap a physical page `pfn' of size `size' with page protection `prot'
+ * into virtual address `from'
+ */
+#include <asm-generic/pgtable.h>
+
+/* to cope with aliasing VIPT cache */
+#define HAVE_ARCH_UNMAPPED_AREA
+
+/*
+ * No page table caches to initialise
+ */
+#define pgtable_cache_init() do { } while (0)
+
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
new file mode 100644
index 000000000..52312cb5d
--- /dev/null
+++ b/arch/arc/include/asm/processor.h
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: March 2009
+ * -Implemented task_pt_regs( )
+ *
+ * Amit Bhor, Sameer Dhavale, Ashwin Chaugule: Codito Technologies 2004
+ */
+
+#ifndef __ASM_ARC_PROCESSOR_H
+#define __ASM_ARC_PROCESSOR_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/ptrace.h>
+
+#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
+/* These DPFP regs need to be saved/restored across ctx-sw */
+struct arc_fpu {
+ struct {
+ unsigned int l, h;
+ } aux_dpfp[2];
+};
+#endif
+
+/* Arch specific stuff which needs to be saved per task.
+ * However these items are not so important so as to earn a place in
+ * struct thread_info
+ */
+struct thread_struct {
+ unsigned long ksp; /* kernel mode stack pointer */
+ unsigned long callee_reg; /* pointer to callee regs */
+ unsigned long fault_address; /* dbls as brkpt holder as well */
+#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
+ struct arc_fpu fpu;
+#endif
+};
+
+#define INIT_THREAD { \
+ .ksp = sizeof(init_stack) + (unsigned long) init_stack, \
+}
+
+/* Forward declaration, a strange C thing */
+struct task_struct;
+
+#define task_pt_regs(p) \
+ ((struct pt_regs *)(THREAD_SIZE + (void *)task_stack_page(p)) - 1)
+
+/* Free all resources held by a thread */
+#define release_thread(thread) do { } while (0)
+
+/*
+ * A lot of busy-wait loops in SMP are based off of non-volatile data otherwise
+ * get optimised away by gcc
+ */
+#ifdef CONFIG_SMP
+#define cpu_relax() __asm__ __volatile__ ("" : : : "memory")
+#else
+#define cpu_relax() do { } while (0)
+#endif
+
+#define cpu_relax_lowlatency() cpu_relax()
+
+#define copy_segments(tsk, mm) do { } while (0)
+#define release_segments(mm) do { } while (0)
+
+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->ret)
+#define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp)
+
+/*
+ * Where abouts of Task's sp, fp, blink when it was last seen in kernel mode.
+ * Look in process.c for details of kernel stack layout
+ */
+#define TSK_K_ESP(tsk) (tsk->thread.ksp)
+
+#define TSK_K_REG(tsk, off) (*((unsigned int *)(TSK_K_ESP(tsk) + \
+ sizeof(struct callee_regs) + off)))
+
+#define TSK_K_BLINK(tsk) TSK_K_REG(tsk, 4)
+#define TSK_K_FP(tsk) TSK_K_REG(tsk, 0)
+
+#define thread_saved_pc(tsk) TSK_K_BLINK(tsk)
+
+extern void start_thread(struct pt_regs * regs, unsigned long pc,
+ unsigned long usp);
+
+extern unsigned int get_wchan(struct task_struct *p);
+
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ * Should the PC register be read instead ? This macro does not seem to
+ * be used in many places so this wont be all that bad.
+ */
+#define current_text_addr() ({ __label__ _l; _l: &&_l; })
+
+#endif /* !__ASSEMBLY__ */
+
+/* Kernels Virtual memory area.
+ * Unlike other architectures(MIPS, sh, cris ) ARC 700 does not have a
+ * "kernel translated" region (like KSEG2 in MIPS). So we use a upper part
+ * of the translated bottom 2GB for kernel virtual memory and protect
+ * these pages from user accesses by disabling Ru, Eu and Wu.
+ */
+#define VMALLOC_SIZE (0x10000000) /* 256M */
+#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
+#define VMALLOC_END (PAGE_OFFSET)
+
+/* Most of the architectures seem to be keeping some kind of padding between
+ * userspace TASK_SIZE and PAGE_OFFSET. i.e TASK_SIZE != PAGE_OFFSET.
+ */
+#define USER_KERNEL_GUTTER 0x10000000
+
+/* User address space:
+ * On ARC700, CPU allows the entire lower half of 32 bit address space to be
+ * translated. Thus potentially 2G (0:0x7FFF_FFFF) could be User vaddr space.
+ * However we steal 256M for kernel addr (0x7000_0000:0x7FFF_FFFF) and another
+ * 256M (0x6000_0000:0x6FFF_FFFF) is gutter between user/kernel spaces
+ * Thus total User vaddr space is (0:0x5FFF_FFFF)
+ */
+#define TASK_SIZE (PAGE_OFFSET - VMALLOC_SIZE - USER_KERNEL_GUTTER)
+
+#define STACK_TOP TASK_SIZE
+#define STACK_TOP_MAX STACK_TOP
+
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE (TASK_SIZE / 3)
+
+#endif /* __ASM_ARC_PROCESSOR_H */
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
new file mode 100644
index 000000000..1bfeec2c0
--- /dev/null
+++ b/arch/arc/include/asm/ptrace.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+#ifndef __ASM_ARC_PTRACE_H
+#define __ASM_ARC_PTRACE_H
+
+#include <uapi/asm/ptrace.h>
+
+#ifndef __ASSEMBLY__
+
+/* THE pt_regs: Defines how regs are saved during entry into kernel */
+
+struct pt_regs {
+
+ /* Real registers */
+ long bta; /* bta_l1, bta_l2, erbta */
+
+ long lp_start, lp_end, lp_count;
+
+ long status32; /* status32_l1, status32_l2, erstatus */
+ long ret; /* ilink1, ilink2 or eret */
+ long blink;
+ long fp;
+ long r26; /* gp */
+
+ long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
+
+ long sp; /* user/kernel sp depending on where we came from */
+ long orig_r0;
+
+ /*
+ * To distinguish bet excp, syscall, irq
+ * For traps and exceptions, Exception Cause Register.
+ * ECR: <00> <VV> <CC> <PP>
+ * Last word used by Linux for extra state mgmt (syscall-restart)
+ * For interrupts, use artificial ECR values to note current prio-level
+ */
+ union {
+ struct {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned long state:8, ecr_vec:8,
+ ecr_cause:8, ecr_param:8;
+#else
+ unsigned long ecr_param:8, ecr_cause:8,
+ ecr_vec:8, state:8;
+#endif
+ };
+ unsigned long event;
+ };
+
+ long user_r25;
+};
+
+/* Callee saved registers - need to be saved only when you are scheduled out */
+
+struct callee_regs {
+ long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
+};
+
+#define instruction_pointer(regs) ((regs)->ret)
+#define profile_pc(regs) instruction_pointer(regs)
+
+/* return 1 if user mode or 0 if kernel mode */
+#define user_mode(regs) (regs->status32 & STATUS_U_MASK)
+
+#define user_stack_pointer(regs)\
+({ unsigned int sp; \
+ if (user_mode(regs)) \
+ sp = (regs)->sp;\
+ else \
+ sp = -1; \
+ sp; \
+})
+
+/* return 1 if PC in delay slot */
+#define delay_mode(regs) ((regs->status32 & STATUS_DE_MASK) == STATUS_DE_MASK)
+
+#define in_syscall(regs) ((regs->ecr_vec == ECR_V_TRAP) && !regs->ecr_param)
+#define in_brkpt_trap(regs) ((regs->ecr_vec == ECR_V_TRAP) && regs->ecr_param)
+
+#define STATE_SCALL_RESTARTED 0x01
+
+#define syscall_wont_restart(reg) (reg->state |= STATE_SCALL_RESTARTED)
+#define syscall_restartable(reg) !(reg->state & STATE_SCALL_RESTARTED)
+
+#define current_pt_regs() \
+({ \
+ /* open-coded current_thread_info() */ \
+ register unsigned long sp asm ("sp"); \
+ unsigned long pg_start = (sp & ~(THREAD_SIZE - 1)); \
+ (struct pt_regs *)(pg_start + THREAD_SIZE) - 1; \
+})
+
+static inline long regs_return_value(struct pt_regs *regs)
+{
+ return regs->r0;
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_PTRACE_H */
diff --git a/arch/arc/include/asm/sections.h b/arch/arc/include/asm/sections.h
new file mode 100644
index 000000000..09db952e1
--- /dev/null
+++ b/arch/arc/include/asm/sections.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_SECTIONS_H
+#define _ASM_ARC_SECTIONS_H
+
+#include <asm-generic/sections.h>
+
+extern char __arc_dccm_base[];
+
+#endif
diff --git a/arch/arc/include/asm/segment.h b/arch/arc/include/asm/segment.h
new file mode 100644
index 000000000..da2c45979
--- /dev/null
+++ b/arch/arc/include/asm/segment.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASMARC_SEGMENT_H
+#define __ASMARC_SEGMENT_H
+
+#ifndef __ASSEMBLY__
+
+typedef unsigned long mm_segment_t;
+
+#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
+
+#define KERNEL_DS MAKE_MM_SEG(0)
+#define USER_DS MAKE_MM_SEG(TASK_SIZE)
+
+#define segment_eq(a, b) ((a) == (b))
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASMARC_SEGMENT_H */
diff --git a/arch/arc/include/asm/serial.h b/arch/arc/include/asm/serial.h
new file mode 100644
index 000000000..744a6ae15
--- /dev/null
+++ b/arch/arc/include/asm/serial.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_SERIAL_H
+#define _ASM_ARC_SERIAL_H
+
+/*
+ * early 8250 (now earlycon) requires BASE_BAUD to be defined in this header.
+ * However to still determine it dynamically (for multi-platform images)
+ * we do this in a helper by parsing the FDT early
+ */
+
+extern unsigned int __init arc_early_base_baud(void);
+
+#define BASE_BAUD arc_early_base_baud()
+
+#endif /* _ASM_ARC_SERIAL_H */
diff --git a/arch/arc/include/asm/setup.h b/arch/arc/include/asm/setup.h
new file mode 100644
index 000000000..6e3ef5ba4
--- /dev/null
+++ b/arch/arc/include/asm/setup.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASMARC_SETUP_H
+#define __ASMARC_SETUP_H
+
+
+#include <linux/types.h>
+#include <uapi/asm/setup.h>
+
+#define COMMAND_LINE_SIZE 256
+
+/*
+ * Data structure to map a ID to string
+ * Used a lot for bootup reporting of hardware diversity
+ */
+struct id_to_str {
+ int id;
+ const char *str;
+};
+
+struct cpuinfo_data {
+ struct id_to_str info;
+ int up_range;
+};
+
+extern int root_mountflags, end_mem;
+
+void setup_processor(void);
+void __init setup_arch_memory(void);
+
+#endif /* __ASMARC_SETUP_H */
diff --git a/arch/arc/include/asm/shmparam.h b/arch/arc/include/asm/shmparam.h
new file mode 100644
index 000000000..fffeecc04
--- /dev/null
+++ b/arch/arc/include/asm/shmparam.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARC_ASM_SHMPARAM_H
+#define __ARC_ASM_SHMPARAM_H
+
+/* Handle upto 2 cache bins */
+#define SHMLBA (2 * PAGE_SIZE)
+
+/* Enforce SHMLBA in shmat */
+#define __ARCH_FORCE_SHMLBA
+
+#endif
diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h
new file mode 100644
index 000000000..3845b9e94
--- /dev/null
+++ b/arch/arc/include/asm/smp.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_SMP_H
+#define __ASM_ARC_SMP_H
+
+#ifdef CONFIG_SMP
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/threads.h>
+
+#define raw_smp_processor_id() (current_thread_info()->cpu)
+
+/* including cpumask.h leads to cyclic deps hence this Forward declaration */
+struct cpumask;
+
+/*
+ * APIs provided by arch SMP code to generic code
+ */
+extern void arch_send_call_function_single_ipi(int cpu);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+
+/*
+ * APIs provided by arch SMP code to rest of arch code
+ */
+extern void __init smp_init_cpus(void);
+extern void first_lines_of_secondary(void);
+extern const char *arc_platform_smp_cpuinfo(void);
+
+/*
+ * API expected BY platform smp code (FROM arch smp code)
+ *
+ * smp_ipi_irq_setup:
+ * Takes @cpu and @irq to which the arch-common ISR is hooked up
+ */
+extern int smp_ipi_irq_setup(int cpu, int irq);
+
+/*
+ * struct plat_smp_ops - SMP callbacks provided by platform to ARC SMP
+ *
+ * @info: SoC SMP specific info for /proc/cpuinfo etc
+ * @cpu_kick: For Master to kickstart a cpu (optionally at a PC)
+ * @ipi_send: To send IPI to a @cpu
+ * @ips_clear: To clear IPI received at @irq
+ */
+struct plat_smp_ops {
+ const char *info;
+ void (*cpu_kick)(int cpu, unsigned long pc);
+ void (*ipi_send)(int cpu);
+ void (*ipi_clear)(int irq);
+};
+
+/* TBD: stop exporting it for direct population by platform */
+extern struct plat_smp_ops plat_smp_ops;
+
+#else /* CONFIG_SMP */
+
+static inline void smp_init_cpus(void) {}
+static inline const char *arc_platform_smp_cpuinfo(void)
+{
+ return "";
+}
+
+#endif /* !CONFIG_SMP */
+
+/*
+ * ARC700 doesn't support atomic Read-Modify-Write ops.
+ * Originally Interrupts had to be disabled around code to gaurantee atomicity.
+ * The LLOCK/SCOND insns allow writing interrupt-hassle-free based atomic ops
+ * based on retry-if-irq-in-atomic (with hardware assist).
+ * However despite these, we provide the IRQ disabling variant
+ *
+ * (1) These insn were introduced only in 4.10 release. So for older released
+ * support needed.
+ *
+ * (2) In a SMP setup, the LLOCK/SCOND atomiticity across CPUs needs to be
+ * gaurantted by the platform (not something which core handles).
+ * Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ
+ * disabling for atomicity.
+ *
+ * However exported spinlock API is not usable due to cyclic hdr deps
+ * (even after system.h disintegration upstream)
+ * asm/bitops.h -> linux/spinlock.h -> linux/preempt.h
+ * -> linux/thread_info.h -> linux/bitops.h -> asm/bitops.h
+ *
+ * So the workaround is to use the lowest level arch spinlock API.
+ * The exported spinlock API is smart enough to be NOP for !CONFIG_SMP,
+ * but same is not true for ARCH backend, hence the need for 2 variants
+ */
+#ifndef CONFIG_ARC_HAS_LLSC
+
+#include <linux/irqflags.h>
+#ifdef CONFIG_SMP
+
+#include <asm/spinlock.h>
+
+extern arch_spinlock_t smp_atomic_ops_lock;
+extern arch_spinlock_t smp_bitops_lock;
+
+#define atomic_ops_lock(flags) do { \
+ local_irq_save(flags); \
+ arch_spin_lock(&smp_atomic_ops_lock); \
+} while (0)
+
+#define atomic_ops_unlock(flags) do { \
+ arch_spin_unlock(&smp_atomic_ops_lock); \
+ local_irq_restore(flags); \
+} while (0)
+
+#define bitops_lock(flags) do { \
+ local_irq_save(flags); \
+ arch_spin_lock(&smp_bitops_lock); \
+} while (0)
+
+#define bitops_unlock(flags) do { \
+ arch_spin_unlock(&smp_bitops_lock); \
+ local_irq_restore(flags); \
+} while (0)
+
+#else /* !CONFIG_SMP */
+
+#define atomic_ops_lock(flags) local_irq_save(flags)
+#define atomic_ops_unlock(flags) local_irq_restore(flags)
+
+#define bitops_lock(flags) local_irq_save(flags)
+#define bitops_unlock(flags) local_irq_restore(flags)
+
+#endif /* !CONFIG_SMP */
+
+#endif /* !CONFIG_ARC_HAS_LLSC */
+
+#endif
diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h
new file mode 100644
index 000000000..e1651df6a
--- /dev/null
+++ b/arch/arc/include/asm/spinlock.h
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_SPINLOCK_H
+#define __ASM_SPINLOCK_H
+
+#include <asm/spinlock_types.h>
+#include <asm/processor.h>
+#include <asm/barrier.h>
+
+#define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
+#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
+#define arch_spin_unlock_wait(x) \
+ do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+ unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
+
+ /*
+ * This smp_mb() is technically superfluous, we only need the one
+ * after the lock for providing the ACQUIRE semantics.
+ * However doing the "right" thing was regressing hackbench
+ * so keeping this, pending further investigation
+ */
+ smp_mb();
+
+ __asm__ __volatile__(
+ "1: ex %0, [%1] \n"
+ " breq %0, %2, 1b \n"
+ : "+&r" (tmp)
+ : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
+ : "memory");
+
+ /*
+ * ACQUIRE barrier to ensure load/store after taking the lock
+ * don't "bleed-up" out of the critical section (leak-in is allowed)
+ * http://www.spinics.net/lists/kernel/msg2010409.html
+ *
+ * ARCv2 only has load-load, store-store and all-all barrier
+ * thus need the full all-all barrier
+ */
+ smp_mb();
+}
+
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+ unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
+
+ smp_mb();
+
+ __asm__ __volatile__(
+ "1: ex %0, [%1] \n"
+ : "+r" (tmp)
+ : "r"(&(lock->slock))
+ : "memory");
+
+ smp_mb();
+
+ return (tmp == __ARCH_SPIN_LOCK_UNLOCKED__);
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+ unsigned int tmp = __ARCH_SPIN_LOCK_UNLOCKED__;
+
+ /*
+ * RELEASE barrier: given the instructions avail on ARCv2, full barrier
+ * is the only option
+ */
+ smp_mb();
+
+ __asm__ __volatile__(
+ " ex %0, [%1] \n"
+ : "+r" (tmp)
+ : "r"(&(lock->slock))
+ : "memory");
+
+ /*
+ * superfluous, but keeping for now - see pairing version in
+ * arch_spin_lock above
+ */
+ smp_mb();
+}
+
+/*
+ * Read-write spinlocks, allowing multiple readers but only one writer.
+ *
+ * The spinlock itself is contained in @counter and access to it is
+ * serialized with @lock_mutex.
+ *
+ * Unfair locking as Writers could be starved indefinitely by Reader(s)
+ */
+
+/* Would read_trylock() succeed? */
+#define arch_read_can_lock(x) ((x)->counter > 0)
+
+/* Would write_trylock() succeed? */
+#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
+
+/* 1 - lock taken successfully */
+static inline int arch_read_trylock(arch_rwlock_t *rw)
+{
+ int ret = 0;
+
+ arch_spin_lock(&(rw->lock_mutex));
+
+ /*
+ * zero means writer holds the lock exclusively, deny Reader.
+ * Otherwise grant lock to first/subseq reader
+ */
+ if (rw->counter > 0) {
+ rw->counter--;
+ ret = 1;
+ }
+
+ arch_spin_unlock(&(rw->lock_mutex));
+
+ smp_mb();
+ return ret;
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_write_trylock(arch_rwlock_t *rw)
+{
+ int ret = 0;
+
+ arch_spin_lock(&(rw->lock_mutex));
+
+ /*
+ * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
+ * deny writer. Otherwise if unlocked grant to writer
+ * Hence the claim that Linux rwlocks are unfair to writers.
+ * (can be starved for an indefinite time by readers).
+ */
+ if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
+ rw->counter = 0;
+ ret = 1;
+ }
+ arch_spin_unlock(&(rw->lock_mutex));
+
+ return ret;
+}
+
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+ while (!arch_read_trylock(rw))
+ cpu_relax();
+}
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+ while (!arch_write_trylock(rw))
+ cpu_relax();
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+ arch_spin_lock(&(rw->lock_mutex));
+ rw->counter++;
+ arch_spin_unlock(&(rw->lock_mutex));
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+ arch_spin_lock(&(rw->lock_mutex));
+ rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
+ arch_spin_unlock(&(rw->lock_mutex));
+}
+
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
+
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
+
+#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/arc/include/asm/spinlock_types.h b/arch/arc/include/asm/spinlock_types.h
new file mode 100644
index 000000000..662627ced
--- /dev/null
+++ b/arch/arc/include/asm/spinlock_types.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_SPINLOCK_TYPES_H
+#define __ASM_SPINLOCK_TYPES_H
+
+typedef struct {
+ volatile unsigned int slock;
+} arch_spinlock_t;
+
+#define __ARCH_SPIN_LOCK_UNLOCKED__ 0
+#define __ARCH_SPIN_LOCK_LOCKED__ 1
+
+#define __ARCH_SPIN_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED__ }
+#define __ARCH_SPIN_LOCK_LOCKED { __ARCH_SPIN_LOCK_LOCKED__ }
+
+/*
+ * Unlocked : 0x0100_0000
+ * Read lock(s) : 0x00FF_FFFF to 0x01 (Multiple Readers decrement it)
+ * Write lock : 0x0, but only if prior value is "unlocked" 0x0100_0000
+ */
+typedef struct {
+ volatile unsigned int counter;
+ arch_spinlock_t lock_mutex;
+} arch_rwlock_t;
+
+#define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000
+#define __ARCH_RW_LOCK_UNLOCKED { .counter = __ARCH_RW_LOCK_UNLOCKED__ }
+
+#endif
diff --git a/arch/arc/include/asm/stacktrace.h b/arch/arc/include/asm/stacktrace.h
new file mode 100644
index 000000000..b29b6064e
--- /dev/null
+++ b/arch/arc/include/asm/stacktrace.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ * Copyright (C) 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_STACKTRACE_H
+#define __ASM_STACKTRACE_H
+
+#include <linux/sched.h>
+
+/**
+ * arc_unwind_core - Unwind the kernel mode stack for an execution context
+ * @tsk: NULL for current task, specific task otherwise
+ * @regs: pt_regs used to seed the unwinder {SP, FP, BLINK, PC}
+ * If NULL, use pt_regs of @tsk (if !NULL) otherwise
+ * use the current values of {SP, FP, BLINK, PC}
+ * @consumer_fn: Callback invoked for each frame unwound
+ * Returns 0 to continue unwinding, -1 to stop
+ * @arg: Arg to callback
+ *
+ * Returns the address of first function in stack
+ *
+ * Semantics:
+ * - synchronous unwinding (e.g. dump_stack): @tsk NULL, @regs NULL
+ * - Asynchronous unwinding of sleeping task: @tsk !NULL, @regs NULL
+ * - Asynchronous unwinding of intr/excp etc: @tsk !NULL, @regs !NULL
+ */
+notrace noinline unsigned int arc_unwind_core(
+ struct task_struct *tsk, struct pt_regs *regs,
+ int (*consumer_fn) (unsigned int, void *),
+ void *arg);
+
+#endif /* __ASM_STACKTRACE_H */
diff --git a/arch/arc/include/asm/string.h b/arch/arc/include/asm/string.h
new file mode 100644
index 000000000..95822b550
--- /dev/null
+++ b/arch/arc/include/asm/string.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: May 2011
+ * -We had half-optimised memset/memcpy, got better versions of those
+ * -Added memcmp, strchr, strcpy, strcmp, strlen
+ *
+ * Amit Bhor: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_STRING_H
+#define _ASM_ARC_STRING_H
+
+#include <linux/types.h>
+
+#define __HAVE_ARCH_MEMSET
+#define __HAVE_ARCH_MEMCPY
+#define __HAVE_ARCH_MEMCMP
+#define __HAVE_ARCH_STRCHR
+#define __HAVE_ARCH_STRCPY
+#define __HAVE_ARCH_STRCMP
+#define __HAVE_ARCH_STRLEN
+
+extern void *memset(void *ptr, int, __kernel_size_t);
+extern void *memcpy(void *, const void *, __kernel_size_t);
+extern void memzero(void *ptr, __kernel_size_t n);
+extern int memcmp(const void *, const void *, __kernel_size_t);
+extern char *strchr(const char *s, int c);
+extern char *strcpy(char *dest, const char *src);
+extern int strcmp(const char *cs, const char *ct);
+extern __kernel_size_t strlen(const char *);
+
+#endif /* _ASM_ARC_STRING_H */
diff --git a/arch/arc/include/asm/switch_to.h b/arch/arc/include/asm/switch_to.h
new file mode 100644
index 000000000..1b171ab5f
--- /dev/null
+++ b/arch/arc/include/asm/switch_to.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_SWITCH_TO_H
+#define _ASM_ARC_SWITCH_TO_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/sched.h>
+
+#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
+
+extern void fpu_save_restore(struct task_struct *p, struct task_struct *n);
+#define ARC_FPU_PREV(p, n) fpu_save_restore(p, n)
+#define ARC_FPU_NEXT(t)
+
+#else
+
+#define ARC_FPU_PREV(p, n)
+#define ARC_FPU_NEXT(n)
+
+#endif /* !CONFIG_ARC_FPU_SAVE_RESTORE */
+
+struct task_struct *__switch_to(struct task_struct *p, struct task_struct *n);
+
+#define switch_to(prev, next, last) \
+do { \
+ ARC_FPU_PREV(prev, next); \
+ last = __switch_to(prev, next);\
+ ARC_FPU_NEXT(next); \
+ mb(); \
+} while (0)
+
+#endif
+
+#endif
diff --git a/arch/arc/include/asm/syscall.h b/arch/arc/include/asm/syscall.h
new file mode 100644
index 000000000..29de09804
--- /dev/null
+++ b/arch/arc/include/asm/syscall.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_SYSCALL_H
+#define _ASM_ARC_SYSCALL_H 1
+
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <asm/unistd.h>
+#include <asm/ptrace.h> /* in_syscall() */
+
+static inline long
+syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
+{
+ if (user_mode(regs) && in_syscall(regs))
+ return regs->r8;
+ else
+ return -1;
+}
+
+static inline void
+syscall_rollback(struct task_struct *task, struct pt_regs *regs)
+{
+ regs->r0 = regs->orig_r0;
+}
+
+static inline long
+syscall_get_error(struct task_struct *task, struct pt_regs *regs)
+{
+ /* 0 if syscall succeeded, otherwise -Errorcode */
+ return IS_ERR_VALUE(regs->r0) ? regs->r0 : 0;
+}
+
+static inline long
+syscall_get_return_value(struct task_struct *task, struct pt_regs *regs)
+{
+ return regs->r0;
+}
+
+static inline void
+syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
+ int error, long val)
+{
+ regs->r0 = (long) error ?: val;
+}
+
+/*
+ * @i: argument index [0,5]
+ * @n: number of arguments; n+i must be [1,6].
+ */
+static inline void
+syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
+ unsigned int i, unsigned int n, unsigned long *args)
+{
+ unsigned long *inside_ptregs = &(regs->r0);
+ inside_ptregs -= i;
+
+ BUG_ON((i + n) > 6);
+
+ while (n--) {
+ args[i++] = (*inside_ptregs);
+ inside_ptregs--;
+ }
+}
+
+#endif
diff --git a/arch/arc/include/asm/syscalls.h b/arch/arc/include/asm/syscalls.h
new file mode 100644
index 000000000..e56f9fcc5
--- /dev/null
+++ b/arch/arc/include/asm/syscalls.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_SYSCALLS_H
+#define _ASM_ARC_SYSCALLS_H 1
+
+#include <linux/compiler.h>
+#include <linux/linkage.h>
+#include <linux/types.h>
+
+int sys_clone_wrapper(int, int, int, int, int);
+int sys_cacheflush(uint32_t, uint32_t uint32_t);
+int sys_arc_settls(void *);
+int sys_arc_gettls(void);
+
+#include <asm-generic/syscalls.h>
+
+#endif
diff --git a/arch/arc/include/asm/thread_info.h b/arch/arc/include/asm/thread_info.h
new file mode 100644
index 000000000..aca0d5a45
--- /dev/null
+++ b/arch/arc/include/asm/thread_info.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: Oct 2009
+ * No need for ARC specific thread_info allocator (kmalloc/free). This is
+ * anyways one page allocation, thus slab alloc can be short-circuited and
+ * the generic version (get_free_page) would be loads better.
+ *
+ * Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_THREAD_INFO_H
+#define _ASM_THREAD_INFO_H
+
+#include <asm/page.h>
+
+#ifdef CONFIG_16KSTACKS
+#define THREAD_SIZE_ORDER 1
+#else
+#define THREAD_SIZE_ORDER 0
+#endif
+
+#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
+
+#ifndef __ASSEMBLY__
+
+#include <linux/thread_info.h>
+#include <asm/segment.h>
+
+/*
+ * low level task data that entry.S needs immediate access to
+ * - this struct should fit entirely inside of one cache line
+ * - this struct shares the supervisor stack pages
+ * - if the contents of this structure are changed, the assembly constants
+ * must also be changed
+ */
+struct thread_info {
+ unsigned long flags; /* low level flags */
+ int preempt_count; /* 0 => preemptable, <0 => BUG */
+ struct task_struct *task; /* main task structure */
+ mm_segment_t addr_limit; /* thread address space */
+ __u32 cpu; /* current CPU */
+ unsigned long thr_ptr; /* TLS ptr */
+};
+
+/*
+ * macros/functions for gaining access to the thread information structure
+ *
+ * preempt_count needs to be 1 initially, until the scheduler is functional.
+ */
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .task = &tsk, \
+ .flags = 0, \
+ .cpu = 0, \
+ .preempt_count = INIT_PREEMPT_COUNT, \
+ .addr_limit = KERNEL_DS, \
+}
+
+#define init_thread_info (init_thread_union.thread_info)
+#define init_stack (init_thread_union.stack)
+
+static inline __attribute_const__ struct thread_info *current_thread_info(void)
+{
+ register unsigned long sp asm("sp");
+ return (struct thread_info *)(sp & ~(THREAD_SIZE - 1));
+}
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * thread information flags
+ * - these are process state flags that various assembly files may need to
+ * access
+ * - pending work-to-be-done flags are in LSW
+ * - other flags in MSW
+ */
+#define TIF_RESTORE_SIGMASK 0 /* restore sig mask in do_signal() */
+#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */
+#define TIF_SIGPENDING 2 /* signal pending */
+#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
+#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */
+#define TIF_SYSCALL_TRACE 15 /* syscall trace active */
+
+/* true if poll_idle() is polling TIF_NEED_RESCHED */
+#define TIF_MEMDIE 16
+
+#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
+#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
+#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
+#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
+#define _TIF_MEMDIE (1<<TIF_MEMDIE)
+
+/* work to do on interrupt/exception return */
+#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
+ _TIF_NOTIFY_RESUME)
+
+/*
+ * _TIF_ALLWORK_MASK includes SYSCALL_TRACE, but we don't need it.
+ * SYSCALL_TRACE is anways seperately/unconditionally tested right after a
+ * syscall, so all that reamins to be tested is _TIF_WORK_MASK
+ */
+
+#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/arc/include/asm/timex.h b/arch/arc/include/asm/timex.h
new file mode 100644
index 000000000..0a82960a7
--- /dev/null
+++ b/arch/arc/include/asm/timex.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_TIMEX_H
+#define _ASM_ARC_TIMEX_H
+
+#define CLOCK_TICK_RATE 80000000 /* slated to be removed */
+
+#include <asm-generic/timex.h>
+
+/* XXX: get_cycles() to be implemented with RTSC insn */
+
+#endif /* _ASM_ARC_TIMEX_H */
diff --git a/arch/arc/include/asm/tlb-mmu1.h b/arch/arc/include/asm/tlb-mmu1.h
new file mode 100644
index 000000000..8a1ec9601
--- /dev/null
+++ b/arch/arc/include/asm/tlb-mmu1.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_TLB_MMU_V1_H__
+#define __ASM_TLB_MMU_V1_H__
+
+#include <asm/mmu.h>
+
+#if defined(__ASSEMBLY__) && (CONFIG_ARC_MMU_VER == 1)
+
+.macro TLB_WRITE_HEURISTICS
+
+#define JH_HACK1
+#undef JH_HACK2
+#undef JH_HACK3
+
+#ifdef JH_HACK3
+; Calculate set index for 2-way MMU
+; -avoiding use of GetIndex from MMU
+; and its unpleasant LFSR pseudo-random sequence
+;
+; r1 = TLBPD0 from TLB_RELOAD above
+;
+; -- jh_ex_way_set not cleared on startup
+; didn't want to change setup.c
+; hence extra instruction to clean
+;
+; -- should be in cache since in same line
+; as r0/r1 saves above
+;
+ld r0,[jh_ex_way_sel] ; victim pointer
+and r0,r0,1 ; clean
+xor.f r0,r0,1 ; flip
+st r0,[jh_ex_way_sel] ; store back
+asr r0,r1,12 ; get set # <<1, note bit 12=R=0
+or.nz r0,r0,1 ; set way bit
+and r0,r0,0xff ; clean
+sr r0,[ARC_REG_TLBINDEX]
+#endif
+
+#ifdef JH_HACK2
+; JH hack #2
+; Faster than hack #1 in non-thrash case, but hard-coded for 2-way MMU
+; Slower in thrash case (where it matters) because more code is executed
+; Inefficient due to two-register paradigm of this miss handler
+;
+/* r1 = data TLBPD0 at this point */
+lr r0,[eret] /* instruction address */
+xor r0,r0,r1 /* compare set # */
+and.f r0,r0,0x000fe000 /* 2-way MMU mask */
+bne 88f /* not in same set - no need to probe */
+
+lr r0,[eret] /* instruction address */
+and r0,r0,PAGE_MASK /* VPN of instruction address */
+; lr r1,[ARC_REG_TLBPD0] /* Data VPN+ASID - already in r1 from TLB_RELOAD*/
+and r1,r1,0xff /* Data ASID */
+or r0,r0,r1 /* Instruction address + Data ASID */
+
+lr r1,[ARC_REG_TLBPD0] /* save TLBPD0 containing data TLB*/
+sr r0,[ARC_REG_TLBPD0] /* write instruction address to TLBPD0 */
+sr TLBProbe, [ARC_REG_TLBCOMMAND] /* Look for instruction */
+lr r0,[ARC_REG_TLBINDEX] /* r0 = index where instruction is, if at all */
+sr r1,[ARC_REG_TLBPD0] /* restore TLBPD0 */
+
+xor r0,r0,1 /* flip bottom bit of data index */
+b.d 89f
+sr r0,[ARC_REG_TLBINDEX] /* and put it back */
+88:
+sr TLBGetIndex, [ARC_REG_TLBCOMMAND]
+89:
+#endif
+
+#ifdef JH_HACK1
+;
+; Always checks whether instruction will be kicked out by dtlb miss
+;
+mov_s r3, r1 ; save PD0 prepared by TLB_RELOAD in r3
+lr r0,[eret] /* instruction address */
+and r0,r0,PAGE_MASK /* VPN of instruction address */
+bmsk r1,r3,7 /* Data ASID, bits 7-0 */
+or_s r0,r0,r1 /* Instruction address + Data ASID */
+
+sr r0,[ARC_REG_TLBPD0] /* write instruction address to TLBPD0 */
+sr TLBProbe, [ARC_REG_TLBCOMMAND] /* Look for instruction */
+lr r0,[ARC_REG_TLBINDEX] /* r0 = index where instruction is, if at all */
+sr r3,[ARC_REG_TLBPD0] /* restore TLBPD0 */
+
+sr TLBGetIndex, [ARC_REG_TLBCOMMAND]
+lr r1,[ARC_REG_TLBINDEX] /* r1 = index where MMU wants to put data */
+cmp r0,r1 /* if no match on indices, go around */
+xor.eq r1,r1,1 /* flip bottom bit of data index */
+sr r1,[ARC_REG_TLBINDEX] /* and put it back */
+#endif
+
+.endm
+
+#endif
+
+#endif
diff --git a/arch/arc/include/asm/tlb.h b/arch/arc/include/asm/tlb.h
new file mode 100644
index 000000000..a9db5f62a
--- /dev/null
+++ b/arch/arc/include/asm/tlb.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_TLB_H
+#define _ASM_ARC_TLB_H
+
+#define tlb_flush(tlb) \
+do { \
+ if (tlb->fullmm) \
+ flush_tlb_mm((tlb)->mm); \
+} while (0)
+
+/*
+ * This pair is called at time of munmap/exit to flush cache and TLB entries
+ * for mappings being torn down.
+ * 1) cache-flush part -implemented via tlb_start_vma( ) for VIPT aliasing D$
+ * 2) tlb-flush part - implemted via tlb_end_vma( ) flushes the TLB range
+ *
+ * Note, read http://lkml.org/lkml/2004/1/15/6
+ */
+#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING
+#define tlb_start_vma(tlb, vma)
+#else
+#define tlb_start_vma(tlb, vma) \
+do { \
+ if (!tlb->fullmm) \
+ flush_cache_range(vma, vma->vm_start, vma->vm_end); \
+} while(0)
+#endif
+
+#define tlb_end_vma(tlb, vma) \
+do { \
+ if (!tlb->fullmm) \
+ flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
+} while (0)
+
+#define __tlb_remove_tlb_entry(tlb, ptep, address)
+
+#include <linux/pagemap.h>
+#include <asm-generic/tlb.h>
+
+#endif /* _ASM_ARC_TLB_H */
diff --git a/arch/arc/include/asm/tlbflush.h b/arch/arc/include/asm/tlbflush.h
new file mode 100644
index 000000000..71c7b2e4b
--- /dev/null
+++ b/arch/arc/include/asm/tlbflush.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_TLBFLUSH__
+#define __ASM_ARC_TLBFLUSH__
+
+#include <linux/mm.h>
+
+void local_flush_tlb_all(void);
+void local_flush_tlb_mm(struct mm_struct *mm);
+void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
+void local_flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end);
+
+#ifndef CONFIG_SMP
+#define flush_tlb_range(vma, s, e) local_flush_tlb_range(vma, s, e)
+#define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
+#define flush_tlb_kernel_range(s, e) local_flush_tlb_kernel_range(s, e)
+#define flush_tlb_all() local_flush_tlb_all()
+#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
+#else
+extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
+extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+extern void flush_tlb_all(void);
+extern void flush_tlb_mm(struct mm_struct *mm);
+#endif /* CONFIG_SMP */
+#endif
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
new file mode 100644
index 000000000..30c9baffa
--- /dev/null
+++ b/arch/arc/include/asm/uaccess.h
@@ -0,0 +1,751 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: June 2010
+ * -__clear_user( ) called multiple times during elf load was byte loop
+ * converted to do as much word clear as possible.
+ *
+ * vineetg: Dec 2009
+ * -Hand crafted constant propagation for "constant" copy sizes
+ * -stock kernel shrunk by 33K at -O3
+ *
+ * vineetg: Sept 2009
+ * -Added option to (UN)inline copy_(to|from)_user to reduce code sz
+ * -kernel shrunk by 200K even at -O3 (gcc 4.2.1)
+ * -Enabled when doing -Os
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_UACCESS_H
+#define _ASM_ARC_UACCESS_H
+
+#include <linux/sched.h>
+#include <asm/errno.h>
+#include <linux/string.h> /* for generic string functions */
+
+
+#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
+
+/*
+ * Algorthmically, for __user_ok() we want do:
+ * (start < TASK_SIZE) && (start+len < TASK_SIZE)
+ * where TASK_SIZE could either be retrieved from thread_info->addr_limit or
+ * emitted directly in code.
+ *
+ * This can however be rewritten as follows:
+ * (len <= TASK_SIZE) && (start+len < TASK_SIZE)
+ *
+ * Because it essentially checks if buffer end is within limit and @len is
+ * non-ngeative, which implies that buffer start will be within limit too.
+ *
+ * The reason for rewriting being, for majority of cases, @len is generally
+ * compile time constant, causing first sub-expression to be compile time
+ * subsumed.
+ *
+ * The second part would generate weird large LIMMs e.g. (0x6000_0000 - 0x10),
+ * so we check for TASK_SIZE using get_fs() since the addr_limit load from mem
+ * would already have been done at this call site for __kernel_ok()
+ *
+ */
+#define __user_ok(addr, sz) (((sz) <= TASK_SIZE) && \
+ ((addr) <= (get_fs() - (sz))))
+#define __access_ok(addr, sz) (unlikely(__kernel_ok) || \
+ likely(__user_ok((addr), (sz))))
+
+/*********** Single byte/hword/word copies ******************/
+
+#define __get_user_fn(sz, u, k) \
+({ \
+ long __ret = 0; /* success by default */ \
+ switch (sz) { \
+ case 1: __arc_get_user_one(*(k), u, "ldb", __ret); break; \
+ case 2: __arc_get_user_one(*(k), u, "ldw", __ret); break; \
+ case 4: __arc_get_user_one(*(k), u, "ld", __ret); break; \
+ case 8: __arc_get_user_one_64(*(k), u, __ret); break; \
+ } \
+ __ret; \
+})
+
+/*
+ * Returns 0 on success, -EFAULT if not.
+ * @ret already contains 0 - given that errors will be less likely
+ * (hence +r asm constraint below).
+ * In case of error, fixup code will make it -EFAULT
+ */
+#define __arc_get_user_one(dst, src, op, ret) \
+ __asm__ __volatile__( \
+ "1: "op" %1,[%2]\n" \
+ "2: ;nop\n" \
+ " .section .fixup, \"ax\"\n" \
+ " .align 4\n" \
+ "3: mov %0, %3\n" \
+ " j 2b\n" \
+ " .previous\n" \
+ " .section __ex_table, \"a\"\n" \
+ " .align 4\n" \
+ " .word 1b,3b\n" \
+ " .previous\n" \
+ \
+ : "+r" (ret), "=r" (dst) \
+ : "r" (src), "ir" (-EFAULT))
+
+#define __arc_get_user_one_64(dst, src, ret) \
+ __asm__ __volatile__( \
+ "1: ld %1,[%2]\n" \
+ "4: ld %R1,[%2, 4]\n" \
+ "2: ;nop\n" \
+ " .section .fixup, \"ax\"\n" \
+ " .align 4\n" \
+ "3: mov %0, %3\n" \
+ " j 2b\n" \
+ " .previous\n" \
+ " .section __ex_table, \"a\"\n" \
+ " .align 4\n" \
+ " .word 1b,3b\n" \
+ " .word 4b,3b\n" \
+ " .previous\n" \
+ \
+ : "+r" (ret), "=r" (dst) \
+ : "r" (src), "ir" (-EFAULT))
+
+#define __put_user_fn(sz, u, k) \
+({ \
+ long __ret = 0; /* success by default */ \
+ switch (sz) { \
+ case 1: __arc_put_user_one(*(k), u, "stb", __ret); break; \
+ case 2: __arc_put_user_one(*(k), u, "stw", __ret); break; \
+ case 4: __arc_put_user_one(*(k), u, "st", __ret); break; \
+ case 8: __arc_put_user_one_64(*(k), u, __ret); break; \
+ } \
+ __ret; \
+})
+
+#define __arc_put_user_one(src, dst, op, ret) \
+ __asm__ __volatile__( \
+ "1: "op" %1,[%2]\n" \
+ "2: ;nop\n" \
+ " .section .fixup, \"ax\"\n" \
+ " .align 4\n" \
+ "3: mov %0, %3\n" \
+ " j 2b\n" \
+ " .previous\n" \
+ " .section __ex_table, \"a\"\n" \
+ " .align 4\n" \
+ " .word 1b,3b\n" \
+ " .previous\n" \
+ \
+ : "+r" (ret) \
+ : "r" (src), "r" (dst), "ir" (-EFAULT))
+
+#define __arc_put_user_one_64(src, dst, ret) \
+ __asm__ __volatile__( \
+ "1: st %1,[%2]\n" \
+ "4: st %R1,[%2, 4]\n" \
+ "2: ;nop\n" \
+ " .section .fixup, \"ax\"\n" \
+ " .align 4\n" \
+ "3: mov %0, %3\n" \
+ " j 2b\n" \
+ " .previous\n" \
+ " .section __ex_table, \"a\"\n" \
+ " .align 4\n" \
+ " .word 1b,3b\n" \
+ " .word 4b,3b\n" \
+ " .previous\n" \
+ \
+ : "+r" (ret) \
+ : "r" (src), "r" (dst), "ir" (-EFAULT))
+
+
+static inline unsigned long
+__arc_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ long res = 0;
+ char val;
+ unsigned long tmp1, tmp2, tmp3, tmp4;
+ unsigned long orig_n = n;
+
+ if (n == 0)
+ return 0;
+
+ /* unaligned */
+ if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) {
+
+ unsigned char tmp;
+
+ __asm__ __volatile__ (
+ " mov.f lp_count, %0 \n"
+ " lpnz 2f \n"
+ "1: ldb.ab %1, [%3, 1] \n"
+ " stb.ab %1, [%2, 1] \n"
+ " sub %0,%0,1 \n"
+ "2: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "3: j 2b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 3b \n"
+ " .previous \n"
+
+ : "+r" (n),
+ /*
+ * Note as an '&' earlyclobber operand to make sure the
+ * temporary register inside the loop is not the same as
+ * FROM or TO.
+ */
+ "=&r" (tmp), "+r" (to), "+r" (from)
+ :
+ : "lp_count", "lp_start", "lp_end", "memory");
+
+ return n;
+ }
+
+ /*
+ * Hand-crafted constant propagation to reduce code sz of the
+ * laddered copy 16x,8,4,2,1
+ */
+ if (__builtin_constant_p(orig_n)) {
+ res = orig_n;
+
+ if (orig_n / 16) {
+ orig_n = orig_n % 16;
+
+ __asm__ __volatile__(
+ " lsr lp_count, %7,4 \n"
+ " lp 3f \n"
+ "1: ld.ab %3, [%2, 4] \n"
+ "11: ld.ab %4, [%2, 4] \n"
+ "12: ld.ab %5, [%2, 4] \n"
+ "13: ld.ab %6, [%2, 4] \n"
+ " st.ab %3, [%1, 4] \n"
+ " st.ab %4, [%1, 4] \n"
+ " st.ab %5, [%1, 4] \n"
+ " st.ab %6, [%1, 4] \n"
+ " sub %0,%0,16 \n"
+ "3: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 3b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 4b \n"
+ " .word 11b,4b \n"
+ " .word 12b,4b \n"
+ " .word 13b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from),
+ "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
+ : "ir"(n)
+ : "lp_count", "memory");
+ }
+ if (orig_n / 8) {
+ orig_n = orig_n % 8;
+
+ __asm__ __volatile__(
+ "14: ld.ab %3, [%2,4] \n"
+ "15: ld.ab %4, [%2,4] \n"
+ " st.ab %3, [%1,4] \n"
+ " st.ab %4, [%1,4] \n"
+ " sub %0,%0,8 \n"
+ "31: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 31b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 14b,4b \n"
+ " .word 15b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from),
+ "=r"(tmp1), "=r"(tmp2)
+ :
+ : "memory");
+ }
+ if (orig_n / 4) {
+ orig_n = orig_n % 4;
+
+ __asm__ __volatile__(
+ "16: ld.ab %3, [%2,4] \n"
+ " st.ab %3, [%1,4] \n"
+ " sub %0,%0,4 \n"
+ "32: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 32b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 16b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+ :
+ : "memory");
+ }
+ if (orig_n / 2) {
+ orig_n = orig_n % 2;
+
+ __asm__ __volatile__(
+ "17: ldw.ab %3, [%2,2] \n"
+ " stw.ab %3, [%1,2] \n"
+ " sub %0,%0,2 \n"
+ "33: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 33b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 17b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+ :
+ : "memory");
+ }
+ if (orig_n & 1) {
+ __asm__ __volatile__(
+ "18: ldb.ab %3, [%2,2] \n"
+ " stb.ab %3, [%1,2] \n"
+ " sub %0,%0,1 \n"
+ "34: ; nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 34b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 18b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+ :
+ : "memory");
+ }
+ } else { /* n is NOT constant, so laddered copy of 16x,8,4,2,1 */
+
+ __asm__ __volatile__(
+ " mov %0,%3 \n"
+ " lsr.f lp_count, %3,4 \n" /* 16x bytes */
+ " lpnz 3f \n"
+ "1: ld.ab %5, [%2, 4] \n"
+ "11: ld.ab %6, [%2, 4] \n"
+ "12: ld.ab %7, [%2, 4] \n"
+ "13: ld.ab %8, [%2, 4] \n"
+ " st.ab %5, [%1, 4] \n"
+ " st.ab %6, [%1, 4] \n"
+ " st.ab %7, [%1, 4] \n"
+ " st.ab %8, [%1, 4] \n"
+ " sub %0,%0,16 \n"
+ "3: and.f %3,%3,0xf \n" /* stragglers */
+ " bz 34f \n"
+ " bbit0 %3,3,31f \n" /* 8 bytes left */
+ "14: ld.ab %5, [%2,4] \n"
+ "15: ld.ab %6, [%2,4] \n"
+ " st.ab %5, [%1,4] \n"
+ " st.ab %6, [%1,4] \n"
+ " sub.f %0,%0,8 \n"
+ "31: bbit0 %3,2,32f \n" /* 4 bytes left */
+ "16: ld.ab %5, [%2,4] \n"
+ " st.ab %5, [%1,4] \n"
+ " sub.f %0,%0,4 \n"
+ "32: bbit0 %3,1,33f \n" /* 2 bytes left */
+ "17: ldw.ab %5, [%2,2] \n"
+ " stw.ab %5, [%1,2] \n"
+ " sub.f %0,%0,2 \n"
+ "33: bbit0 %3,0,34f \n"
+ "18: ldb.ab %5, [%2,1] \n" /* 1 byte left */
+ " stb.ab %5, [%1,1] \n"
+ " sub.f %0,%0,1 \n"
+ "34: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 34b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 4b \n"
+ " .word 11b,4b \n"
+ " .word 12b,4b \n"
+ " .word 13b,4b \n"
+ " .word 14b,4b \n"
+ " .word 15b,4b \n"
+ " .word 16b,4b \n"
+ " .word 17b,4b \n"
+ " .word 18b,4b \n"
+ " .previous \n"
+ : "=r" (res), "+r"(to), "+r"(from), "+r"(n), "=r"(val),
+ "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
+ :
+ : "lp_count", "memory");
+ }
+
+ return res;
+}
+
+extern unsigned long slowpath_copy_to_user(void __user *to, const void *from,
+ unsigned long n);
+
+static inline unsigned long
+__arc_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ long res = 0;
+ char val;
+ unsigned long tmp1, tmp2, tmp3, tmp4;
+ unsigned long orig_n = n;
+
+ if (n == 0)
+ return 0;
+
+ /* unaligned */
+ if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) {
+
+ unsigned char tmp;
+
+ __asm__ __volatile__(
+ " mov.f lp_count, %0 \n"
+ " lpnz 3f \n"
+ " ldb.ab %1, [%3, 1] \n"
+ "1: stb.ab %1, [%2, 1] \n"
+ " sub %0, %0, 1 \n"
+ "3: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 3b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 4b \n"
+ " .previous \n"
+
+ : "+r" (n),
+ /* Note as an '&' earlyclobber operand to make sure the
+ * temporary register inside the loop is not the same as
+ * FROM or TO.
+ */
+ "=&r" (tmp), "+r" (to), "+r" (from)
+ :
+ : "lp_count", "lp_start", "lp_end", "memory");
+
+ return n;
+ }
+
+ if (__builtin_constant_p(orig_n)) {
+ res = orig_n;
+
+ if (orig_n / 16) {
+ orig_n = orig_n % 16;
+
+ __asm__ __volatile__(
+ " lsr lp_count, %7,4 \n"
+ " lp 3f \n"
+ " ld.ab %3, [%2, 4] \n"
+ " ld.ab %4, [%2, 4] \n"
+ " ld.ab %5, [%2, 4] \n"
+ " ld.ab %6, [%2, 4] \n"
+ "1: st.ab %3, [%1, 4] \n"
+ "11: st.ab %4, [%1, 4] \n"
+ "12: st.ab %5, [%1, 4] \n"
+ "13: st.ab %6, [%1, 4] \n"
+ " sub %0, %0, 16 \n"
+ "3:;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 3b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 4b \n"
+ " .word 11b,4b \n"
+ " .word 12b,4b \n"
+ " .word 13b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from),
+ "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
+ : "ir"(n)
+ : "lp_count", "memory");
+ }
+ if (orig_n / 8) {
+ orig_n = orig_n % 8;
+
+ __asm__ __volatile__(
+ " ld.ab %3, [%2,4] \n"
+ " ld.ab %4, [%2,4] \n"
+ "14: st.ab %3, [%1,4] \n"
+ "15: st.ab %4, [%1,4] \n"
+ " sub %0, %0, 8 \n"
+ "31:;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 31b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 14b,4b \n"
+ " .word 15b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from),
+ "=r"(tmp1), "=r"(tmp2)
+ :
+ : "memory");
+ }
+ if (orig_n / 4) {
+ orig_n = orig_n % 4;
+
+ __asm__ __volatile__(
+ " ld.ab %3, [%2,4] \n"
+ "16: st.ab %3, [%1,4] \n"
+ " sub %0, %0, 4 \n"
+ "32:;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 32b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 16b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+ :
+ : "memory");
+ }
+ if (orig_n / 2) {
+ orig_n = orig_n % 2;
+
+ __asm__ __volatile__(
+ " ldw.ab %3, [%2,2] \n"
+ "17: stw.ab %3, [%1,2] \n"
+ " sub %0, %0, 2 \n"
+ "33:;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 33b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 17b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+ :
+ : "memory");
+ }
+ if (orig_n & 1) {
+ __asm__ __volatile__(
+ " ldb.ab %3, [%2,1] \n"
+ "18: stb.ab %3, [%1,1] \n"
+ " sub %0, %0, 1 \n"
+ "34: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 34b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 18b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+ :
+ : "memory");
+ }
+ } else { /* n is NOT constant, so laddered copy of 16x,8,4,2,1 */
+
+ __asm__ __volatile__(
+ " mov %0,%3 \n"
+ " lsr.f lp_count, %3,4 \n" /* 16x bytes */
+ " lpnz 3f \n"
+ " ld.ab %5, [%2, 4] \n"
+ " ld.ab %6, [%2, 4] \n"
+ " ld.ab %7, [%2, 4] \n"
+ " ld.ab %8, [%2, 4] \n"
+ "1: st.ab %5, [%1, 4] \n"
+ "11: st.ab %6, [%1, 4] \n"
+ "12: st.ab %7, [%1, 4] \n"
+ "13: st.ab %8, [%1, 4] \n"
+ " sub %0, %0, 16 \n"
+ "3: and.f %3,%3,0xf \n" /* stragglers */
+ " bz 34f \n"
+ " bbit0 %3,3,31f \n" /* 8 bytes left */
+ " ld.ab %5, [%2,4] \n"
+ " ld.ab %6, [%2,4] \n"
+ "14: st.ab %5, [%1,4] \n"
+ "15: st.ab %6, [%1,4] \n"
+ " sub.f %0, %0, 8 \n"
+ "31: bbit0 %3,2,32f \n" /* 4 bytes left */
+ " ld.ab %5, [%2,4] \n"
+ "16: st.ab %5, [%1,4] \n"
+ " sub.f %0, %0, 4 \n"
+ "32: bbit0 %3,1,33f \n" /* 2 bytes left */
+ " ldw.ab %5, [%2,2] \n"
+ "17: stw.ab %5, [%1,2] \n"
+ " sub.f %0, %0, 2 \n"
+ "33: bbit0 %3,0,34f \n"
+ " ldb.ab %5, [%2,1] \n" /* 1 byte left */
+ "18: stb.ab %5, [%1,1] \n"
+ " sub.f %0, %0, 1 \n"
+ "34: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 34b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 4b \n"
+ " .word 11b,4b \n"
+ " .word 12b,4b \n"
+ " .word 13b,4b \n"
+ " .word 14b,4b \n"
+ " .word 15b,4b \n"
+ " .word 16b,4b \n"
+ " .word 17b,4b \n"
+ " .word 18b,4b \n"
+ " .previous \n"
+ : "=r" (res), "+r"(to), "+r"(from), "+r"(n), "=r"(val),
+ "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
+ :
+ : "lp_count", "memory");
+ }
+
+ return res;
+}
+
+static inline unsigned long __arc_clear_user(void __user *to, unsigned long n)
+{
+ long res = n;
+ unsigned char *d_char = to;
+
+ __asm__ __volatile__(
+ " bbit0 %0, 0, 1f \n"
+ "75: stb.ab %2, [%0,1] \n"
+ " sub %1, %1, 1 \n"
+ "1: bbit0 %0, 1, 2f \n"
+ "76: stw.ab %2, [%0,2] \n"
+ " sub %1, %1, 2 \n"
+ "2: asr.f lp_count, %1, 2 \n"
+ " lpnz 3f \n"
+ "77: st.ab %2, [%0,4] \n"
+ " sub %1, %1, 4 \n"
+ "3: bbit0 %1, 1, 4f \n"
+ "78: stw.ab %2, [%0,2] \n"
+ " sub %1, %1, 2 \n"
+ "4: bbit0 %1, 0, 5f \n"
+ "79: stb.ab %2, [%0,1] \n"
+ " sub %1, %1, 1 \n"
+ "5: \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "3: j 5b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 75b, 3b \n"
+ " .word 76b, 3b \n"
+ " .word 77b, 3b \n"
+ " .word 78b, 3b \n"
+ " .word 79b, 3b \n"
+ " .previous \n"
+ : "+r"(d_char), "+r"(res)
+ : "i"(0)
+ : "lp_count", "lp_start", "lp_end", "memory");
+
+ return res;
+}
+
+static inline long
+__arc_strncpy_from_user(char *dst, const char __user *src, long count)
+{
+ long res = count;
+ char val;
+ unsigned int hw_count;
+
+ if (count == 0)
+ return 0;
+
+ __asm__ __volatile__(
+ " lp 2f \n"
+ "1: ldb.ab %3, [%2, 1] \n"
+ " breq.d %3, 0, 2f \n"
+ " stb.ab %3, [%1, 1] \n"
+ "2: sub %0, %6, %4 \n"
+ "3: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: mov %0, %5 \n"
+ " j 3b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 4b \n"
+ " .previous \n"
+ : "=r"(res), "+r"(dst), "+r"(src), "=&r"(val), "=l"(hw_count)
+ : "g"(-EFAULT), "ir"(count), "4"(count) /* this "4" seeds lp_count */
+ : "memory");
+
+ return res;
+}
+
+static inline long __arc_strnlen_user(const char __user *s, long n)
+{
+ long res, tmp1, cnt;
+ char val;
+
+ __asm__ __volatile__(
+ " mov %2, %1 \n"
+ "1: ldb.ab %3, [%0, 1] \n"
+ " breq.d %3, 0, 2f \n"
+ " sub.f %2, %2, 1 \n"
+ " bnz 1b \n"
+ " sub %2, %2, 1 \n"
+ "2: sub %0, %1, %2 \n"
+ "3: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: mov %0, 0 \n"
+ " j 3b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 4b \n"
+ " .previous \n"
+ : "=r"(res), "=r"(tmp1), "=r"(cnt), "=r"(val)
+ : "0"(s), "1"(n)
+ : "memory");
+
+ return res;
+}
+
+#ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
+#define __copy_from_user(t, f, n) __arc_copy_from_user(t, f, n)
+#define __copy_to_user(t, f, n) __arc_copy_to_user(t, f, n)
+#define __clear_user(d, n) __arc_clear_user(d, n)
+#define __strncpy_from_user(d, s, n) __arc_strncpy_from_user(d, s, n)
+#define __strnlen_user(s, n) __arc_strnlen_user(s, n)
+#else
+extern long arc_copy_from_user_noinline(void *to, const void __user * from,
+ unsigned long n);
+extern long arc_copy_to_user_noinline(void __user *to, const void *from,
+ unsigned long n);
+extern unsigned long arc_clear_user_noinline(void __user *to,
+ unsigned long n);
+extern long arc_strncpy_from_user_noinline (char *dst, const char __user *src,
+ long count);
+extern long arc_strnlen_user_noinline(const char __user *src, long n);
+
+#define __copy_from_user(t, f, n) arc_copy_from_user_noinline(t, f, n)
+#define __copy_to_user(t, f, n) arc_copy_to_user_noinline(t, f, n)
+#define __clear_user(d, n) arc_clear_user_noinline(d, n)
+#define __strncpy_from_user(d, s, n) arc_strncpy_from_user_noinline(d, s, n)
+#define __strnlen_user(s, n) arc_strnlen_user_noinline(s, n)
+
+#endif
+
+#include <asm-generic/uaccess.h>
+
+extern int fixup_exception(struct pt_regs *regs);
+
+#endif
diff --git a/arch/arc/include/asm/unaligned.h b/arch/arc/include/asm/unaligned.h
new file mode 100644
index 000000000..6da6b4eda
--- /dev/null
+++ b/arch/arc/include/asm/unaligned.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_UNALIGNED_H
+#define _ASM_ARC_UNALIGNED_H
+
+/* ARC700 can't handle unaligned Data accesses. */
+
+#include <asm-generic/unaligned.h>
+#include <asm/ptrace.h>
+
+#ifdef CONFIG_ARC_EMUL_UNALIGNED
+int misaligned_fixup(unsigned long address, struct pt_regs *regs,
+ struct callee_regs *cregs);
+#else
+static inline int
+misaligned_fixup(unsigned long address, struct pt_regs *regs,
+ struct callee_regs *cregs)
+{
+ /* Not fixed */
+ return 1;
+}
+#endif
+
+#endif /* _ASM_ARC_UNALIGNED_H */
diff --git a/arch/arc/include/asm/unwind.h b/arch/arc/include/asm/unwind.h
new file mode 100644
index 000000000..7ca628b6e
--- /dev/null
+++ b/arch/arc/include/asm/unwind.h
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_UNWIND_H
+#define _ASM_ARC_UNWIND_H
+
+#ifdef CONFIG_ARC_DW2_UNWIND
+
+#include <linux/sched.h>
+
+struct arc700_regs {
+ unsigned long r0;
+ unsigned long r1;
+ unsigned long r2;
+ unsigned long r3;
+ unsigned long r4;
+ unsigned long r5;
+ unsigned long r6;
+ unsigned long r7;
+ unsigned long r8;
+ unsigned long r9;
+ unsigned long r10;
+ unsigned long r11;
+ unsigned long r12;
+ unsigned long r13;
+ unsigned long r14;
+ unsigned long r15;
+ unsigned long r16;
+ unsigned long r17;
+ unsigned long r18;
+ unsigned long r19;
+ unsigned long r20;
+ unsigned long r21;
+ unsigned long r22;
+ unsigned long r23;
+ unsigned long r24;
+ unsigned long r25;
+ unsigned long r26;
+ unsigned long r27; /* fp */
+ unsigned long r28; /* sp */
+ unsigned long r29;
+ unsigned long r30;
+ unsigned long r31; /* blink */
+ unsigned long r63; /* pc */
+};
+
+struct unwind_frame_info {
+ struct arc700_regs regs;
+ struct task_struct *task;
+ unsigned call_frame:1;
+};
+
+#define UNW_PC(frame) ((frame)->regs.r63)
+#define UNW_SP(frame) ((frame)->regs.r28)
+#define UNW_BLINK(frame) ((frame)->regs.r31)
+
+/* Rajesh FIXME */
+#ifdef CONFIG_FRAME_POINTER
+#define UNW_FP(frame) ((frame)->regs.r27)
+#define FRAME_RETADDR_OFFSET 4
+#define FRAME_LINK_OFFSET 0
+#define STACK_BOTTOM_UNW(tsk) STACK_LIMIT((tsk)->thread.ksp)
+#define STACK_TOP_UNW(tsk) ((tsk)->thread.ksp)
+#else
+#define UNW_FP(frame) ((void)(frame), 0)
+#endif
+
+#define STACK_LIMIT(ptr) (((ptr) - 1) & ~(THREAD_SIZE - 1))
+
+#define UNW_REGISTER_INFO \
+ PTREGS_INFO(r0), \
+ PTREGS_INFO(r1), \
+ PTREGS_INFO(r2), \
+ PTREGS_INFO(r3), \
+ PTREGS_INFO(r4), \
+ PTREGS_INFO(r5), \
+ PTREGS_INFO(r6), \
+ PTREGS_INFO(r7), \
+ PTREGS_INFO(r8), \
+ PTREGS_INFO(r9), \
+ PTREGS_INFO(r10), \
+ PTREGS_INFO(r11), \
+ PTREGS_INFO(r12), \
+ PTREGS_INFO(r13), \
+ PTREGS_INFO(r14), \
+ PTREGS_INFO(r15), \
+ PTREGS_INFO(r16), \
+ PTREGS_INFO(r17), \
+ PTREGS_INFO(r18), \
+ PTREGS_INFO(r19), \
+ PTREGS_INFO(r20), \
+ PTREGS_INFO(r21), \
+ PTREGS_INFO(r22), \
+ PTREGS_INFO(r23), \
+ PTREGS_INFO(r24), \
+ PTREGS_INFO(r25), \
+ PTREGS_INFO(r26), \
+ PTREGS_INFO(r27), \
+ PTREGS_INFO(r28), \
+ PTREGS_INFO(r29), \
+ PTREGS_INFO(r30), \
+ PTREGS_INFO(r31), \
+ PTREGS_INFO(r63)
+
+#define UNW_DEFAULT_RA(raItem, dataAlign) \
+ ((raItem).where == Memory && !((raItem).value * (dataAlign) + 4))
+
+extern int arc_unwind(struct unwind_frame_info *frame);
+extern void arc_unwind_init(void);
+extern void arc_unwind_setup(void);
+extern void *unwind_add_table(struct module *module, const void *table_start,
+ unsigned long table_size);
+extern void unwind_remove_table(void *handle, int init_only);
+
+static inline int
+arch_unwind_init_running(struct unwind_frame_info *info,
+ int (*callback) (struct unwind_frame_info *info,
+ void *arg),
+ void *arg)
+{
+ return 0;
+}
+
+static inline int arch_unw_user_mode(const struct unwind_frame_info *info)
+{
+ return 0;
+}
+
+static inline void arch_unw_init_blocked(struct unwind_frame_info *info)
+{
+ return;
+}
+
+static inline void arch_unw_init_frame_info(struct unwind_frame_info *info,
+ struct pt_regs *regs)
+{
+ return;
+}
+
+#else
+
+#define UNW_PC(frame) ((void)(frame), 0)
+#define UNW_SP(frame) ((void)(frame), 0)
+#define UNW_FP(frame) ((void)(frame), 0)
+
+static inline void arc_unwind_init(void)
+{
+}
+
+static inline void arc_unwind_setup(void)
+{
+}
+#define unwind_add_table(a, b, c)
+#define unwind_remove_table(a, b)
+
+#endif /* CONFIG_ARC_DW2_UNWIND */
+
+#endif /* _ASM_ARC_UNWIND_H */
diff --git a/arch/arc/include/uapi/asm/Kbuild b/arch/arc/include/uapi/asm/Kbuild
new file mode 100644
index 000000000..f50d02df7
--- /dev/null
+++ b/arch/arc/include/uapi/asm/Kbuild
@@ -0,0 +1,5 @@
+# UAPI Header export list
+include include/uapi/asm-generic/Kbuild.asm
+header-y += elf.h
+header-y += page.h
+header-y += cachectl.h
diff --git a/arch/arc/include/uapi/asm/byteorder.h b/arch/arc/include/uapi/asm/byteorder.h
new file mode 100644
index 000000000..9da71d415
--- /dev/null
+++ b/arch/arc/include/uapi/asm/byteorder.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_BYTEORDER_H
+#define __ASM_ARC_BYTEORDER_H
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#include <linux/byteorder/big_endian.h>
+#else
+#include <linux/byteorder/little_endian.h>
+#endif
+
+#endif /* ASM_ARC_BYTEORDER_H */
diff --git a/arch/arc/include/uapi/asm/cachectl.h b/arch/arc/include/uapi/asm/cachectl.h
new file mode 100644
index 000000000..51c73f025
--- /dev/null
+++ b/arch/arc/include/uapi/asm/cachectl.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARC_ASM_CACHECTL_H
+#define __ARC_ASM_CACHECTL_H
+
+/*
+ * ARC ABI flags defined for Android's finegrained cacheflush requirements
+ */
+#define CF_I_INV 0x0002
+#define CF_D_FLUSH 0x0010
+#define CF_D_FLUSH_INV 0x0020
+
+#define CF_DEFAULT (CF_I_INV | CF_D_FLUSH)
+
+/*
+ * Standard flags expected by cacheflush system call users
+ */
+#define ICACHE CF_I_INV
+#define DCACHE CF_D_FLUSH
+#define BCACHE (CF_I_INV | CF_D_FLUSH)
+
+#endif
diff --git a/arch/arc/include/uapi/asm/elf.h b/arch/arc/include/uapi/asm/elf.h
new file mode 100644
index 000000000..0f99ac8fc
--- /dev/null
+++ b/arch/arc/include/uapi/asm/elf.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _UAPI__ASM_ARC_ELF_H
+#define _UAPI__ASM_ARC_ELF_H
+
+#include <asm/ptrace.h> /* for user_regs_struct */
+
+/* Machine specific ELF Hdr flags */
+#define EF_ARC_OSABI_MSK 0x00000f00
+#define EF_ARC_OSABI_ORIG 0x00000000 /* MUST be zero for back-compat */
+#define EF_ARC_OSABI_CURRENT 0x00000300 /* v3 (no legacy syscalls) */
+
+typedef unsigned long elf_greg_t;
+typedef unsigned long elf_fpregset_t;
+
+#define ELF_NGREG (sizeof(struct user_regs_struct) / sizeof(elf_greg_t))
+
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+#endif
diff --git a/arch/arc/include/uapi/asm/page.h b/arch/arc/include/uapi/asm/page.h
new file mode 100644
index 000000000..e5d41e082
--- /dev/null
+++ b/arch/arc/include/uapi/asm/page.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _UAPI__ASM_ARC_PAGE_H
+#define _UAPI__ASM_ARC_PAGE_H
+
+/* PAGE_SHIFT determines the page size */
+#if defined(CONFIG_ARC_PAGE_SIZE_16K)
+#define PAGE_SHIFT 14
+#elif defined(CONFIG_ARC_PAGE_SIZE_4K)
+#define PAGE_SHIFT 12
+#else
+/*
+ * Default 8k
+ * done this way (instead of under CONFIG_ARC_PAGE_SIZE_8K) because adhoc
+ * user code (busybox appletlib.h) expects PAGE_SHIFT to be defined w/o
+ * using the correct uClibc header and in their build our autoconf.h is
+ * not available
+ */
+#define PAGE_SHIFT 13
+#endif
+
+#ifdef __ASSEMBLY__
+#define PAGE_SIZE (1 << PAGE_SHIFT)
+#define PAGE_OFFSET (0x80000000)
+#else
+#define PAGE_SIZE (1UL << PAGE_SHIFT) /* Default 8K */
+#define PAGE_OFFSET (0x80000000UL) /* Kernel starts at 2G onwards */
+#endif
+
+#define PAGE_MASK (~(PAGE_SIZE-1))
+
+
+#endif /* _UAPI__ASM_ARC_PAGE_H */
diff --git a/arch/arc/include/uapi/asm/ptrace.h b/arch/arc/include/uapi/asm/ptrace.h
new file mode 100644
index 000000000..76a7739aa
--- /dev/null
+++ b/arch/arc/include/uapi/asm/ptrace.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _UAPI__ASM_ARC_PTRACE_H
+#define _UAPI__ASM_ARC_PTRACE_H
+
+#define PTRACE_GET_THREAD_AREA 25
+
+#ifndef __ASSEMBLY__
+/*
+ * Userspace ABI: Register state needed by
+ * -ptrace (gdbserver)
+ * -sigcontext (SA_SIGNINFO signal frame)
+ *
+ * This is to decouple pt_regs from user-space ABI, to be able to change it
+ * w/o affecting the ABI.
+ *
+ * The intermediate pad,pad2 are relics of initial layout based on pt_regs
+ * for optimizations when copying pt_regs to/from user_regs_struct.
+ * We no longer need them, but can't be changed as they are part of ABI now.
+ *
+ * Also, sigcontext only care about the scratch regs as that is what we really
+ * save/restore for signal handling. However gdb also uses the same struct
+ * hence callee regs need to be in there too.
+*/
+struct user_regs_struct {
+
+ long pad;
+ struct {
+ long bta, lp_start, lp_end, lp_count;
+ long status32, ret, blink, fp, gp;
+ long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
+ long sp;
+ } scratch;
+ long pad2;
+ struct {
+ long r25, r24, r23, r22, r21, r20;
+ long r19, r18, r17, r16, r15, r14, r13;
+ } callee;
+ long efa; /* break pt addr, for break points in delay slots */
+ long stop_pc; /* give dbg stop_pc after ensuring brkpt trap */
+};
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _UAPI__ASM_ARC_PTRACE_H */
diff --git a/arch/arc/include/uapi/asm/setup.h b/arch/arc/include/uapi/asm/setup.h
new file mode 100644
index 000000000..a6d4e4493
--- /dev/null
+++ b/arch/arc/include/uapi/asm/setup.h
@@ -0,0 +1,6 @@
+/*
+ * setup.h is part of userspace header ABI so UAPI scripts have to generate it
+ * even if there's nothing to export - causing empty <uapi/asm/setup.h>
+ * However to prevent "patch" from discarding it we add this placeholder
+ * comment
+ */
diff --git a/arch/arc/include/uapi/asm/sigcontext.h b/arch/arc/include/uapi/asm/sigcontext.h
new file mode 100644
index 000000000..9678a11fc
--- /dev/null
+++ b/arch/arc/include/uapi/asm/sigcontext.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_SIGCONTEXT_H
+#define _ASM_ARC_SIGCONTEXT_H
+
+#include <asm/ptrace.h>
+
+/*
+ * Signal context structure - contains all info to do with the state
+ * before the signal handler was invoked.
+ */
+struct sigcontext {
+ struct user_regs_struct regs;
+};
+
+#endif /* _ASM_ARC_SIGCONTEXT_H */
diff --git a/arch/arc/include/uapi/asm/signal.h b/arch/arc/include/uapi/asm/signal.h
new file mode 100644
index 000000000..fad62f7f4
--- /dev/null
+++ b/arch/arc/include/uapi/asm/signal.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_SIGNAL_H
+#define _ASM_ARC_SIGNAL_H
+
+/*
+ * This is much needed for ARC sigreturn optimization.
+ * This allows uClibc to piggback the addr of a sigreturn stub in sigaction,
+ * which allows sigreturn based re-entry into kernel after handling signal.
+ * W/o this kernel needs to "synthesize" the sigreturn trampoline on user
+ * mode stack which in turn forces the following:
+ * -TLB Flush (after making the stack page executable)
+ * -Cache line Flush (to make I/D Cache lines coherent)
+ */
+#define SA_RESTORER 0x04000000
+
+#include <asm-generic/signal.h>
+
+#endif /* _ASM_ARC_SIGNAL_H */
diff --git a/arch/arc/include/uapi/asm/swab.h b/arch/arc/include/uapi/asm/swab.h
new file mode 100644
index 000000000..095599a73
--- /dev/null
+++ b/arch/arc/include/uapi/asm/swab.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: May 2011
+ * -Support single cycle endian-swap insn in ARC700 4.10
+ *
+ * vineetg: June 2009
+ * -Better htonl implementation (5 instead of 9 ALU instructions)
+ * -Hardware assisted single cycle bswap (Use Case of ARC custom instrn)
+ */
+
+#ifndef __ASM_ARC_SWAB_H
+#define __ASM_ARC_SWAB_H
+
+#include <linux/types.h>
+
+/* Native single cycle endian swap insn */
+#ifdef CONFIG_ARC_HAS_SWAPE
+
+#define __arch_swab32(x) \
+({ \
+ unsigned int tmp = x; \
+ __asm__( \
+ " swape %0, %1 \n" \
+ : "=r" (tmp) \
+ : "r" (tmp)); \
+ tmp; \
+})
+
+#else
+
+/* Several ways of Endian-Swap Emulation for ARC
+ * 0: kernel generic
+ * 1: ARC optimised "C"
+ * 2: ARC Custom instruction
+ */
+#define ARC_BSWAP_TYPE 1
+
+#if (ARC_BSWAP_TYPE == 1) /******* Software only ********/
+
+/* The kernel default implementation of htonl is
+ * return x<<24 | x>>24 |
+ * (x & (__u32)0x0000ff00UL)<<8 | (x & (__u32)0x00ff0000UL)>>8;
+ *
+ * This generates 9 instructions on ARC (excluding the ld/st)
+ *
+ * 8051fd8c: ld r3,[r7,20] ; Mem op : Get the value to be swapped
+ * 8051fd98: asl r5,r3,24 ; get 3rd Byte
+ * 8051fd9c: lsr r2,r3,24 ; get 0th Byte
+ * 8051fda0: and r4,r3,0xff00
+ * 8051fda8: asl r4,r4,8 ; get 1st Byte
+ * 8051fdac: and r3,r3,0x00ff0000
+ * 8051fdb4: or r2,r2,r5 ; combine 0th and 3rd Bytes
+ * 8051fdb8: lsr r3,r3,8 ; 2nd Byte at correct place in Dst Reg
+ * 8051fdbc: or r2,r2,r4 ; combine 0,3 Bytes with 1st Byte
+ * 8051fdc0: or r2,r2,r3 ; combine 0,3,1 Bytes with 2nd Byte
+ * 8051fdc4: st r2,[r1,20] ; Mem op : save result back to mem
+ *
+ * Joern suggested a better "C" algorithm which is great since
+ * (1) It is portable to any architecure
+ * (2) At the same time it takes advantage of ARC ISA (rotate intrns)
+ */
+
+#define __arch_swab32(x) \
+({ unsigned long __in = (x), __tmp; \
+ __tmp = __in << 8 | __in >> 24; /* ror tmp,in,24 */ \
+ __in = __in << 24 | __in >> 8; /* ror in,in,8 */ \
+ __tmp ^= __in; \
+ __tmp &= 0xff00ff; \
+ __tmp ^ __in; \
+})
+
+#elif (ARC_BSWAP_TYPE == 2) /* Custom single cycle bwap instruction */
+
+#define __arch_swab32(x) \
+({ \
+ unsigned int tmp = x; \
+ __asm__( \
+ " .extInstruction bswap, 7, 0x00, SUFFIX_NONE, SYNTAX_2OP \n"\
+ " bswap %0, %1 \n"\
+ : "=r" (tmp) \
+ : "r" (tmp)); \
+ tmp; \
+})
+
+#endif /* ARC_BSWAP_TYPE=zzz */
+
+#endif /* CONFIG_ARC_HAS_SWAPE */
+
+#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
+#define __SWAB_64_THRU_32__
+#endif
+
+#endif
diff --git a/arch/arc/include/uapi/asm/unistd.h b/arch/arc/include/uapi/asm/unistd.h
new file mode 100644
index 000000000..39e58d1cd
--- /dev/null
+++ b/arch/arc/include/uapi/asm/unistd.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/******** no-legacy-syscalls-ABI *******/
+
+/*
+ * Non-typical guard macro to enable inclusion twice in ARCH sys.c
+ * That is how the Generic syscall wrapper generator works
+ */
+#if !defined(_UAPI_ASM_ARC_UNISTD_H) || defined(__SYSCALL)
+#define _UAPI_ASM_ARC_UNISTD_H
+
+#define __ARCH_WANT_SYS_EXECVE
+#define __ARCH_WANT_SYS_CLONE
+#define __ARCH_WANT_SYS_VFORK
+#define __ARCH_WANT_SYS_FORK
+
+#define sys_mmap2 sys_mmap_pgoff
+
+#include <asm-generic/unistd.h>
+
+#define NR_syscalls __NR_syscalls
+
+/* ARC specific syscall */
+#define __NR_cacheflush (__NR_arch_specific_syscall + 0)
+#define __NR_arc_settls (__NR_arch_specific_syscall + 1)
+#define __NR_arc_gettls (__NR_arch_specific_syscall + 2)
+
+__SYSCALL(__NR_cacheflush, sys_cacheflush)
+__SYSCALL(__NR_arc_settls, sys_arc_settls)
+__SYSCALL(__NR_arc_gettls, sys_arc_gettls)
+
+
+/* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */
+#define __NR_sysfs (__NR_arch_specific_syscall + 3)
+__SYSCALL(__NR_sysfs, sys_sysfs)
+
+#undef __SYSCALL
+
+#endif