summaryrefslogtreecommitdiff
path: root/arch/arc/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arc/include')
-rw-r--r--arch/arc/include/asm/atomic.h128
-rw-r--r--arch/arc/include/asm/barrier.h12
-rw-r--r--arch/arc/include/asm/bitops.h60
-rw-r--r--arch/arc/include/asm/clk.h22
-rw-r--r--arch/arc/include/asm/cmpxchg.h76
-rw-r--r--arch/arc/include/asm/entry-compact.h10
-rw-r--r--arch/arc/include/asm/hugepage.h2
-rw-r--r--arch/arc/include/asm/irq.h13
-rw-r--r--arch/arc/include/asm/mmu_context.h2
-rw-r--r--arch/arc/include/asm/page.h4
-rw-r--r--arch/arc/include/asm/pgalloc.h4
-rw-r--r--arch/arc/include/asm/pgtable.h4
-rw-r--r--arch/arc/include/asm/processor.h53
-rw-r--r--arch/arc/include/asm/setup.h4
-rw-r--r--arch/arc/include/asm/smp.h2
-rw-r--r--arch/arc/include/asm/spinlock.h306
-rw-r--r--arch/arc/include/asm/thread_info.h2
-rw-r--r--arch/arc/include/asm/uaccess.h2
-rw-r--r--arch/arc/include/uapi/asm/byteorder.h2
-rw-r--r--arch/arc/include/uapi/asm/swab.h2
-rw-r--r--arch/arc/include/uapi/asm/unistd.h1
21 files changed, 302 insertions, 409 deletions
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 7730d302c..dd683995b 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -17,56 +17,25 @@
#include <asm/barrier.h>
#include <asm/smp.h>
+#ifndef CONFIG_ARC_PLAT_EZNPS
+
#define atomic_read(v) READ_ONCE((v)->counter)
#ifdef CONFIG_ARC_HAS_LLSC
#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
-#ifdef CONFIG_ARC_STAR_9000923308
-
-#define SCOND_FAIL_RETRY_VAR_DEF \
- unsigned int delay = 1, tmp; \
-
-#define SCOND_FAIL_RETRY_ASM \
- " bz 4f \n" \
- " ; --- scond fail delay --- \n" \
- " mov %[tmp], %[delay] \n" /* tmp = delay */ \
- "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
- " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
- " rol %[delay], %[delay] \n" /* delay *= 2 */ \
- " b 1b \n" /* start over */ \
- "4: ; --- success --- \n" \
-
-#define SCOND_FAIL_RETRY_VARS \
- ,[delay] "+&r" (delay),[tmp] "=&r" (tmp) \
-
-#else /* !CONFIG_ARC_STAR_9000923308 */
-
-#define SCOND_FAIL_RETRY_VAR_DEF
-
-#define SCOND_FAIL_RETRY_ASM \
- " bnz 1b \n" \
-
-#define SCOND_FAIL_RETRY_VARS
-
-#endif
-
#define ATOMIC_OP(op, c_op, asm_op) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
- unsigned int val; \
- SCOND_FAIL_RETRY_VAR_DEF \
+ unsigned int val; \
\
__asm__ __volatile__( \
"1: llock %[val], [%[ctr]] \n" \
" " #asm_op " %[val], %[val], %[i] \n" \
" scond %[val], [%[ctr]] \n" \
- " \n" \
- SCOND_FAIL_RETRY_ASM \
- \
+ " bnz 1b \n" \
: [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
- SCOND_FAIL_RETRY_VARS \
: [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
[i] "ir" (i) \
: "cc"); \
@@ -75,8 +44,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
- unsigned int val; \
- SCOND_FAIL_RETRY_VAR_DEF \
+ unsigned int val; \
\
/* \
* Explicit full memory barrier needed before/after as \
@@ -88,11 +56,8 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
"1: llock %[val], [%[ctr]] \n" \
" " #asm_op " %[val], %[val], %[i] \n" \
" scond %[val], [%[ctr]] \n" \
- " \n" \
- SCOND_FAIL_RETRY_ASM \
- \
+ " bnz 1b \n" \
: [val] "=&r" (val) \
- SCOND_FAIL_RETRY_VARS \
: [ctr] "r" (&v->counter), \
[i] "ir" (i) \
: "cc"); \
@@ -180,13 +145,88 @@ ATOMIC_OP(andnot, &= ~, bic)
ATOMIC_OP(or, |=, or)
ATOMIC_OP(xor, ^=, xor)
-#undef ATOMIC_OPS
-#undef ATOMIC_OP_RETURN
-#undef ATOMIC_OP
#undef SCOND_FAIL_RETRY_VAR_DEF
#undef SCOND_FAIL_RETRY_ASM
#undef SCOND_FAIL_RETRY_VARS
+#else /* CONFIG_ARC_PLAT_EZNPS */
+
+static inline int atomic_read(const atomic_t *v)
+{
+ int temp;
+
+ __asm__ __volatile__(
+ " ld.di %0, [%1]"
+ : "=r"(temp)
+ : "r"(&v->counter)
+ : "memory");
+ return temp;
+}
+
+static inline void atomic_set(atomic_t *v, int i)
+{
+ __asm__ __volatile__(
+ " st.di %0,[%1]"
+ :
+ : "r"(i), "r"(&v->counter)
+ : "memory");
+}
+
+#define ATOMIC_OP(op, c_op, asm_op) \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ __asm__ __volatile__( \
+ " mov r2, %0\n" \
+ " mov r3, %1\n" \
+ " .word %2\n" \
+ : \
+ : "r"(i), "r"(&v->counter), "i"(asm_op) \
+ : "r2", "r3", "memory"); \
+} \
+
+#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
+static inline int atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ unsigned int temp = i; \
+ \
+ /* Explicit full memory barrier needed before/after */ \
+ smp_mb(); \
+ \
+ __asm__ __volatile__( \
+ " mov r2, %0\n" \
+ " mov r3, %1\n" \
+ " .word %2\n" \
+ " mov %0, r2" \
+ : "+r"(temp) \
+ : "r"(&v->counter), "i"(asm_op) \
+ : "r2", "r3", "memory"); \
+ \
+ smp_mb(); \
+ \
+ temp c_op i; \
+ \
+ return temp; \
+}
+
+#define ATOMIC_OPS(op, c_op, asm_op) \
+ ATOMIC_OP(op, c_op, asm_op) \
+ ATOMIC_OP_RETURN(op, c_op, asm_op)
+
+ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3)
+#define atomic_sub(i, v) atomic_add(-(i), (v))
+#define atomic_sub_return(i, v) atomic_add_return(-(i), (v))
+
+ATOMIC_OP(and, &=, CTOP_INST_AAND_DI_R2_R2_R3)
+#define atomic_andnot(mask, v) atomic_and(~(mask), (v))
+ATOMIC_OP(or, |=, CTOP_INST_AOR_DI_R2_R2_R3)
+ATOMIC_OP(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3)
+
+#endif /* CONFIG_ARC_PLAT_EZNPS */
+
+#undef ATOMIC_OPS
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
+
/**
* __atomic_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
diff --git a/arch/arc/include/asm/barrier.h b/arch/arc/include/asm/barrier.h
index a7209983e..b1e327495 100644
--- a/arch/arc/include/asm/barrier.h
+++ b/arch/arc/include/asm/barrier.h
@@ -30,9 +30,7 @@
#define rmb() asm volatile("dmb 1\n" : : : "memory")
#define wmb() asm volatile("dmb 2\n" : : : "memory")
-#endif
-
-#ifdef CONFIG_ISA_ARCOMPACT
+#elif !defined(CONFIG_ARC_PLAT_EZNPS) /* CONFIG_ISA_ARCOMPACT */
/*
* ARCompact based cores (ARC700) only have SYNC instruction which is super
@@ -41,6 +39,14 @@
*/
#define mb() asm volatile("sync\n" : : : "memory")
+
+#else /* CONFIG_ARC_PLAT_EZNPS */
+
+#include <plat/ctop.h>
+
+#define mb() asm volatile (".word %0" : : "i"(CTOP_INST_SCHD_RW) : "memory")
+#define rmb() asm volatile (".word %0" : : "i"(CTOP_INST_SCHD_RD) : "memory")
+
#endif
#include <asm-generic/barrier.h>
diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
index 0352fb8d2..8da87feec 100644
--- a/arch/arc/include/asm/bitops.h
+++ b/arch/arc/include/asm/bitops.h
@@ -22,7 +22,7 @@
#include <asm/smp.h>
#endif
-#if defined(CONFIG_ARC_HAS_LLSC)
+#ifdef CONFIG_ARC_HAS_LLSC
/*
* Hardware assisted Atomic-R-M-W
@@ -88,7 +88,7 @@ static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *
return (old & (1 << nr)) != 0; \
}
-#else /* !CONFIG_ARC_HAS_LLSC */
+#elif !defined(CONFIG_ARC_PLAT_EZNPS)
/*
* Non hardware assisted Atomic-R-M-W
@@ -139,7 +139,55 @@ static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *
return (old & (1UL << (nr & 0x1f))) != 0; \
}
-#endif /* CONFIG_ARC_HAS_LLSC */
+#else /* CONFIG_ARC_PLAT_EZNPS */
+
+#define BIT_OP(op, c_op, asm_op) \
+static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
+{ \
+ m += nr >> 5; \
+ \
+ nr = (1UL << (nr & 0x1f)); \
+ if (asm_op == CTOP_INST_AAND_DI_R2_R2_R3) \
+ nr = ~nr; \
+ \
+ __asm__ __volatile__( \
+ " mov r2, %0\n" \
+ " mov r3, %1\n" \
+ " .word %2\n" \
+ : \
+ : "r"(nr), "r"(m), "i"(asm_op) \
+ : "r2", "r3", "memory"); \
+}
+
+#define TEST_N_BIT_OP(op, c_op, asm_op) \
+static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
+{ \
+ unsigned long old; \
+ \
+ m += nr >> 5; \
+ \
+ nr = old = (1UL << (nr & 0x1f)); \
+ if (asm_op == CTOP_INST_AAND_DI_R2_R2_R3) \
+ old = ~old; \
+ \
+ /* Explicit full memory barrier needed before/after */ \
+ smp_mb(); \
+ \
+ __asm__ __volatile__( \
+ " mov r2, %0\n" \
+ " mov r3, %1\n" \
+ " .word %2\n" \
+ " mov %0, r2" \
+ : "+r"(old) \
+ : "r"(m), "i"(asm_op) \
+ : "r2", "r3", "memory"); \
+ \
+ smp_mb(); \
+ \
+ return (old & nr) != 0; \
+}
+
+#endif /* CONFIG_ARC_PLAT_EZNPS */
/***************************************
* Non atomic variants
@@ -181,9 +229,15 @@ static inline int __test_and_##op##_bit(unsigned long nr, volatile unsigned long
/* __test_and_set_bit(), __test_and_clear_bit(), __test_and_change_bit() */\
__TEST_N_BIT_OP(op, c_op, asm_op)
+#ifndef CONFIG_ARC_PLAT_EZNPS
BIT_OPS(set, |, bset)
BIT_OPS(clear, & ~, bclr)
BIT_OPS(change, ^, bxor)
+#else
+BIT_OPS(set, |, CTOP_INST_AOR_DI_R2_R2_R3)
+BIT_OPS(clear, & ~, CTOP_INST_AAND_DI_R2_R2_R3)
+BIT_OPS(change, ^, CTOP_INST_AXOR_DI_R2_R2_R3)
+#endif
/*
* This routine doesn't need to be atomic.
diff --git a/arch/arc/include/asm/clk.h b/arch/arc/include/asm/clk.h
deleted file mode 100644
index bf9d29f5b..000000000
--- a/arch/arc/include/asm/clk.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_ARC_CLK_H
-#define _ASM_ARC_CLK_H
-
-/* Although we can't really hide core_freq, the accessor is still better way */
-extern unsigned long core_freq;
-
-static inline unsigned long arc_get_core_freq(void)
-{
- return core_freq;
-}
-
-extern int arc_set_core_freq(unsigned long);
-
-#endif
diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h
index a444be67c..d819de1c5 100644
--- a/arch/arc/include/asm/cmpxchg.h
+++ b/arch/arc/include/asm/cmpxchg.h
@@ -44,7 +44,7 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
return prev;
}
-#else
+#elif !defined(CONFIG_ARC_PLAT_EZNPS)
static inline unsigned long
__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
@@ -64,23 +64,48 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
return prev;
}
+#else /* CONFIG_ARC_PLAT_EZNPS */
+
+static inline unsigned long
+__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
+{
+ /*
+ * Explicit full memory barrier needed before/after
+ */
+ smp_mb();
+
+ write_aux_reg(CTOP_AUX_GPA1, expected);
+
+ __asm__ __volatile__(
+ " mov r2, %0\n"
+ " mov r3, %1\n"
+ " .word %2\n"
+ " mov %0, r2"
+ : "+r"(new)
+ : "r"(ptr), "i"(CTOP_INST_EXC_DI_R2_R2_R3)
+ : "r2", "r3", "memory");
+
+ smp_mb();
+
+ return new;
+}
+
#endif /* CONFIG_ARC_HAS_LLSC */
#define cmpxchg(ptr, o, n) ((typeof(*(ptr)))__cmpxchg((ptr), \
(unsigned long)(o), (unsigned long)(n)))
/*
- * Since not supported natively, ARC cmpxchg() uses atomic_ops_lock (UP/SMP)
- * just to gaurantee semantics.
- * atomic_cmpxchg() needs to use the same locks as it's other atomic siblings
- * which also happens to be atomic_ops_lock.
- *
- * Thus despite semantically being different, implementation of atomic_cmpxchg()
- * is same as cmpxchg().
+ * atomic_cmpxchg is same as cmpxchg
+ * LLSC: only different in data-type, semantics are exactly same
+ * !LLSC: cmpxchg() has to use an external lock atomic_ops_lock to guarantee
+ * semantics, and this lock also happens to be used by atomic_*()
*/
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+#ifndef CONFIG_ARC_PLAT_EZNPS
+
/*
* xchg (reg with memory) based on "Native atomic" EX insn
*/
@@ -143,6 +168,41 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
#endif
+#else /* CONFIG_ARC_PLAT_EZNPS */
+
+static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
+ int size)
+{
+ extern unsigned long __xchg_bad_pointer(void);
+
+ switch (size) {
+ case 4:
+ /*
+ * Explicit full memory barrier needed before/after
+ */
+ smp_mb();
+
+ __asm__ __volatile__(
+ " mov r2, %0\n"
+ " mov r3, %1\n"
+ " .word %2\n"
+ " mov %0, r2\n"
+ : "+r"(val)
+ : "r"(ptr), "i"(CTOP_INST_XEX_DI_R2_R2_R3)
+ : "r2", "r3", "memory");
+
+ smp_mb();
+
+ return val;
+ }
+ return __xchg_bad_pointer();
+}
+
+#define xchg(ptr, with) ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), \
+ sizeof(*(ptr))))
+
+#endif /* CONFIG_ARC_PLAT_EZNPS */
+
/*
* "atomic" variant of xchg()
* REQ: It needs to follow the same serialization rules as other atomic_xxx()
diff --git a/arch/arc/include/asm/entry-compact.h b/arch/arc/include/asm/entry-compact.h
index 1d8f57cd6..14c310f2e 100644
--- a/arch/arc/include/asm/entry-compact.h
+++ b/arch/arc/include/asm/entry-compact.h
@@ -36,6 +36,10 @@
#include <asm/irqflags-compact.h>
#include <asm/thread_info.h> /* For THREAD_SIZE */
+#ifdef CONFIG_ARC_PLAT_EZNPS
+#include <plat/ctop.h>
+#endif
+
/*--------------------------------------------------------------
* Switch to Kernel Mode stack if SP points to User Mode stack
*
@@ -72,8 +76,8 @@
* We need to be a bit more cautious here. What if a kernel bug in
* L1 ISR, caused SP to go whaco (some small value which looks like
* USER stk) and then we take L2 ISR.
- * Above brlo alone would treat it as a valid L1-L2 sceanrio
- * instead of shouting alound
+ * Above brlo alone would treat it as a valid L1-L2 scenario
+ * instead of shouting around
* The only feasible way is to make sure this L2 happened in
* L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in
* L1 ISR before it switches stack
@@ -296,11 +300,13 @@
bic \reg, sp, (THREAD_SIZE - 1)
.endm
+#ifndef CONFIG_ARC_PLAT_EZNPS
/* Get CPU-ID of this core */
.macro GET_CPU_ID reg
lr \reg, [identity]
lsr \reg, \reg, 8
bmsk \reg, \reg, 7
.endm
+#endif
#endif /* __ASM_ARC_ENTRY_COMPACT_H */
diff --git a/arch/arc/include/asm/hugepage.h b/arch/arc/include/asm/hugepage.h
index 7afe3356b..317ff773e 100644
--- a/arch/arc/include/asm/hugepage.h
+++ b/arch/arc/include/asm/hugepage.h
@@ -61,8 +61,6 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd);
-#define has_transparent_hugepage() 1
-
/* Generic variants assume pgtable_t is struct page *, hence need for these */
#define __HAVE_ARCH_PGTABLE_DEPOSIT
extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
diff --git a/arch/arc/include/asm/irq.h b/arch/arc/include/asm/irq.h
index 49014f0ef..c0fa0d2de 100644
--- a/arch/arc/include/asm/irq.h
+++ b/arch/arc/include/asm/irq.h
@@ -13,21 +13,14 @@
#define NR_IRQS 128 /* allow some CPU external IRQ handling */
/* Platform Independent IRQs */
-#ifdef CONFIG_ISA_ARCOMPACT
-#define TIMER0_IRQ 3
-#define TIMER1_IRQ 4
-#else
-#define TIMER0_IRQ 16
-#define TIMER1_IRQ 17
+#ifdef CONFIG_ISA_ARCV2
+#define IPI_IRQ 19
+#define SOFTIRQ_IRQ 21
#endif
#include <linux/interrupt.h>
#include <asm-generic/irq.h>
extern void arc_init_IRQ(void);
-void arc_local_timer_setup(void);
-void arc_request_percpu_irq(int irq, int cpu,
- irqreturn_t (*isr)(int irq, void *dev),
- const char *irq_nm, void *percpu_dev);
#endif
diff --git a/arch/arc/include/asm/mmu_context.h b/arch/arc/include/asm/mmu_context.h
index 1fd467ef6..b0b87f244 100644
--- a/arch/arc/include/asm/mmu_context.h
+++ b/arch/arc/include/asm/mmu_context.h
@@ -83,7 +83,7 @@ static inline void get_new_mmu_context(struct mm_struct *mm)
local_flush_tlb_all();
/*
- * Above checke for rollover of 8 bit ASID in 32 bit container.
+ * Above check for rollover of 8 bit ASID in 32 bit container.
* If the container itself wrapped around, set it to a non zero
* "generation" to distinguish from no context
*/
diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
index 0d5385488..296c3426a 100644
--- a/arch/arc/include/asm/page.h
+++ b/arch/arc/include/asm/page.h
@@ -31,7 +31,11 @@ void clear_user_page(void *to, unsigned long u_vaddr, struct page *page);
* These are used to make use of C type-checking..
*/
typedef struct {
+#ifdef CONFIG_ARC_HAS_PAE40
+ unsigned long long pte;
+#else
unsigned long pte;
+#endif
} pte_t;
typedef struct {
unsigned long pgd;
diff --git a/arch/arc/include/asm/pgalloc.h b/arch/arc/include/asm/pgalloc.h
index 86ed67128..3749234b7 100644
--- a/arch/arc/include/asm/pgalloc.h
+++ b/arch/arc/include/asm/pgalloc.h
@@ -95,7 +95,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
{
pte_t *pte;
- pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO,
+ pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
__get_order_pte());
return pte;
@@ -107,7 +107,7 @@ pte_alloc_one(struct mm_struct *mm, unsigned long address)
pgtable_t pte_pg;
struct page *page;
- pte_pg = (pgtable_t)__get_free_pages(GFP_KERNEL | __GFP_REPEAT, __get_order_pte());
+ pte_pg = (pgtable_t)__get_free_pages(GFP_KERNEL, __get_order_pte());
if (!pte_pg)
return 0;
memzero((void *)pte_pg, PTRS_PER_PTE * sizeof(pte_t));
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index 10d4b8b8e..858f98ef7 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -47,7 +47,7 @@
* Page Tables are purely for Linux VM's consumption and the bits below are
* suited to that (uniqueness). Hence some are not implemented in the TLB and
* some have different value in TLB.
- * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible becoz they live in
+ * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible because they live in
* seperate PD0 and PD1, which combined forms a translation entry)
* while for PTE perspective, they are 8 and 9 respectively
* with MMU v3: Most bits (except SHARED) represent the exact hardware pos
@@ -217,7 +217,7 @@
#define BITS_FOR_PTE (PGDIR_SHIFT - PAGE_SHIFT)
#define BITS_FOR_PGD (32 - PGDIR_SHIFT)
-#define PGDIR_SIZE (1UL << PGDIR_SHIFT) /* vaddr span, not PDG sz */
+#define PGDIR_SIZE _BITUL(PGDIR_SHIFT) /* vaddr span, not PDG sz */
#define PGDIR_MASK (~(PGDIR_SIZE-1))
#define PTRS_PER_PTE _BITUL(BITS_FOR_PTE)
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
index 1d694c1ef..16b630fbe 100644
--- a/arch/arc/include/asm/processor.h
+++ b/arch/arc/include/asm/processor.h
@@ -57,9 +57,19 @@ struct task_struct;
* A lot of busy-wait loops in SMP are based off of non-volatile data otherwise
* get optimised away by gcc
*/
-#define cpu_relax() __asm__ __volatile__ ("" : : : "memory")
+#ifndef CONFIG_EZNPS_MTM_EXT
-#define cpu_relax_lowlatency() cpu_relax()
+#define cpu_relax() barrier()
+#define cpu_relax_lowlatency() cpu_relax()
+
+#else
+
+#define cpu_relax() \
+ __asm__ __volatile__ (".word %0" : : "i"(CTOP_INST_SCHD_RW) : "memory")
+
+#define cpu_relax_lowlatency() barrier()
+
+#endif
#define copy_segments(tsk, mm) do { } while (0)
#define release_segments(mm) do { } while (0)
@@ -68,7 +78,7 @@ struct task_struct;
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp)
/*
- * Where abouts of Task's sp, fp, blink when it was last seen in kernel mode.
+ * Where about of Task's sp, fp, blink when it was last seen in kernel mode.
* Look in process.c for details of kernel stack layout
*/
#define TSK_K_ESP(tsk) (tsk->thread.ksp)
@@ -97,7 +107,7 @@ extern unsigned int get_wchan(struct task_struct *p);
#endif /* !__ASSEMBLY__ */
/*
- * System Memory Map on ARC
+ * Default System Memory Map on ARC
*
* ---------------------------- (lower 2G, Translated) -------------------------
* 0x0000_0000 0x5FFF_FFFF (user vaddr: TASK_SIZE)
@@ -109,20 +119,37 @@ extern unsigned int get_wchan(struct task_struct *p);
* 0xC000_0000 0xFFFF_FFFF (peripheral uncached space)
* -----------------------------------------------------------------------------
*/
-#define VMALLOC_START 0x70000000
-/*
- * 1 PGDIR_SIZE each for fixmap/pkmap, 2 PGDIR_SIZE gutter
- * See asm/highmem.h for details
- */
-#define VMALLOC_SIZE (PAGE_OFFSET - VMALLOC_START - PGDIR_SIZE * 4)
-#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE)
+#define TASK_SIZE 0x60000000
-#define USER_KERNEL_GUTTER 0x10000000
+#define VMALLOC_START (PAGE_OFFSET - (CONFIG_ARC_KVADDR_SIZE << 20))
-#define TASK_SIZE (VMALLOC_START - USER_KERNEL_GUTTER)
+/* 1 PGDIR_SIZE each for fixmap/pkmap, 2 PGDIR_SIZE gutter (see asm/highmem.h) */
+#define VMALLOC_SIZE ((CONFIG_ARC_KVADDR_SIZE << 20) - PGDIR_SIZE * 4)
+#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE)
+
+#define USER_KERNEL_GUTTER (VMALLOC_START - TASK_SIZE)
+
+#ifdef CONFIG_ARC_PLAT_EZNPS
+/* NPS architecture defines special window of 129M in user address space for
+ * special memory areas, when accessing this window the MMU do not use TLB.
+ * Instead MMU direct the access to:
+ * 0x57f00000:0x57ffffff -- 1M of closely coupled memory (aka CMEM)
+ * 0x58000000:0x5fffffff -- 16 huge pages, 8M each, with fixed map (aka FMTs)
+ *
+ * CMEM - is the fastest memory we got and its size is 16K.
+ * FMT - is used to map either to internal/external memory.
+ * Internal memory is the second fast memory and its size is 16M
+ * External memory is the biggest memory (16G) and also the slowest.
+ *
+ * STACK_TOP need to be PMD align (21bit) that is why we supply 0x57e00000.
+ */
+#define STACK_TOP 0x57e00000
+#else
#define STACK_TOP TASK_SIZE
+#endif
+
#define STACK_TOP_MAX STACK_TOP
/* This decides where the kernel will search for a free chunk of vm
diff --git a/arch/arc/include/asm/setup.h b/arch/arc/include/asm/setup.h
index 307846691..48b37c693 100644
--- a/arch/arc/include/asm/setup.h
+++ b/arch/arc/include/asm/setup.h
@@ -12,7 +12,11 @@
#include <linux/types.h>
#include <uapi/asm/setup.h>
+#ifdef CONFIG_ARC_PLAT_EZNPS
+#define COMMAND_LINE_SIZE 2048
+#else
#define COMMAND_LINE_SIZE 256
+#endif
/*
* Data structure to map a ID to string
diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h
index 991380438..89fdd1b0a 100644
--- a/arch/arc/include/asm/smp.h
+++ b/arch/arc/include/asm/smp.h
@@ -86,7 +86,7 @@ static inline const char *arc_platform_smp_cpuinfo(void)
* (1) These insn were introduced only in 4.10 release. So for older released
* support needed.
*
- * (2) In a SMP setup, the LLOCK/SCOND atomiticity across CPUs needs to be
+ * (2) In a SMP setup, the LLOCK/SCOND atomicity across CPUs needs to be
* gaurantted by the platform (not something which core handles).
* Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ
* disabling for atomicity.
diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h
index db8c59d1e..cded4a9b5 100644
--- a/arch/arc/include/asm/spinlock.h
+++ b/arch/arc/include/asm/spinlock.h
@@ -20,11 +20,6 @@
#ifdef CONFIG_ARC_HAS_LLSC
-/*
- * A normal LLOCK/SCOND based system, w/o need for livelock workaround
- */
-#ifndef CONFIG_ARC_STAR_9000923308
-
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned int val;
@@ -238,293 +233,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
smp_mb();
}
-#else /* CONFIG_ARC_STAR_9000923308 */
-
-/*
- * HS38x4 could get into a LLOCK/SCOND livelock in case of multiple overlapping
- * coherency transactions in the SCU. The exclusive line state keeps rotating
- * among contenting cores leading to a never ending cycle. So break the cycle
- * by deferring the retry of failed exclusive access (SCOND). The actual delay
- * needed is function of number of contending cores as well as the unrelated
- * coherency traffic from other cores. To keep the code simple, start off with
- * small delay of 1 which would suffice most cases and in case of contention
- * double the delay. Eventually the delay is sufficient such that the coherency
- * pipeline is drained, thus a subsequent exclusive access would succeed.
- */
-
-#define SCOND_FAIL_RETRY_VAR_DEF \
- unsigned int delay, tmp; \
-
-#define SCOND_FAIL_RETRY_ASM \
- " ; --- scond fail delay --- \n" \
- " mov %[tmp], %[delay] \n" /* tmp = delay */ \
- "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
- " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
- " rol %[delay], %[delay] \n" /* delay *= 2 */ \
- " b 1b \n" /* start over */ \
- " \n" \
- "4: ; --- done --- \n" \
-
-#define SCOND_FAIL_RETRY_VARS \
- ,[delay] "=&r" (delay), [tmp] "=&r" (tmp) \
-
-static inline void arch_spin_lock(arch_spinlock_t *lock)
-{
- unsigned int val;
- SCOND_FAIL_RETRY_VAR_DEF;
-
- smp_mb();
-
- __asm__ __volatile__(
- "0: mov %[delay], 1 \n"
- "1: llock %[val], [%[slock]] \n"
- " breq %[val], %[LOCKED], 0b \n" /* spin while LOCKED */
- " scond %[LOCKED], [%[slock]] \n" /* acquire */
- " bz 4f \n" /* done */
- " \n"
- SCOND_FAIL_RETRY_ASM
-
- : [val] "=&r" (val)
- SCOND_FAIL_RETRY_VARS
- : [slock] "r" (&(lock->slock)),
- [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
- : "memory", "cc");
-
- smp_mb();
-}
-
-/* 1 - lock taken successfully */
-static inline int arch_spin_trylock(arch_spinlock_t *lock)
-{
- unsigned int val, got_it = 0;
- SCOND_FAIL_RETRY_VAR_DEF;
-
- smp_mb();
-
- __asm__ __volatile__(
- "0: mov %[delay], 1 \n"
- "1: llock %[val], [%[slock]] \n"
- " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
- " scond %[LOCKED], [%[slock]] \n" /* acquire */
- " bz.d 4f \n"
- " mov.z %[got_it], 1 \n" /* got it */
- " \n"
- SCOND_FAIL_RETRY_ASM
-
- : [val] "=&r" (val),
- [got_it] "+&r" (got_it)
- SCOND_FAIL_RETRY_VARS
- : [slock] "r" (&(lock->slock)),
- [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
- : "memory", "cc");
-
- smp_mb();
-
- return got_it;
-}
-
-static inline void arch_spin_unlock(arch_spinlock_t *lock)
-{
- smp_mb();
-
- lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
-
- smp_mb();
-}
-
-/*
- * Read-write spinlocks, allowing multiple readers but only one writer.
- * Unfair locking as Writers could be starved indefinitely by Reader(s)
- */
-
-static inline void arch_read_lock(arch_rwlock_t *rw)
-{
- unsigned int val;
- SCOND_FAIL_RETRY_VAR_DEF;
-
- smp_mb();
-
- /*
- * zero means writer holds the lock exclusively, deny Reader.
- * Otherwise grant lock to first/subseq reader
- *
- * if (rw->counter > 0) {
- * rw->counter--;
- * ret = 1;
- * }
- */
-
- __asm__ __volatile__(
- "0: mov %[delay], 1 \n"
- "1: llock %[val], [%[rwlock]] \n"
- " brls %[val], %[WR_LOCKED], 0b\n" /* <= 0: spin while write locked */
- " sub %[val], %[val], 1 \n" /* reader lock */
- " scond %[val], [%[rwlock]] \n"
- " bz 4f \n" /* done */
- " \n"
- SCOND_FAIL_RETRY_ASM
-
- : [val] "=&r" (val)
- SCOND_FAIL_RETRY_VARS
- : [rwlock] "r" (&(rw->counter)),
- [WR_LOCKED] "ir" (0)
- : "memory", "cc");
-
- smp_mb();
-}
-
-/* 1 - lock taken successfully */
-static inline int arch_read_trylock(arch_rwlock_t *rw)
-{
- unsigned int val, got_it = 0;
- SCOND_FAIL_RETRY_VAR_DEF;
-
- smp_mb();
-
- __asm__ __volatile__(
- "0: mov %[delay], 1 \n"
- "1: llock %[val], [%[rwlock]] \n"
- " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
- " sub %[val], %[val], 1 \n" /* counter-- */
- " scond %[val], [%[rwlock]] \n"
- " bz.d 4f \n"
- " mov.z %[got_it], 1 \n" /* got it */
- " \n"
- SCOND_FAIL_RETRY_ASM
-
- : [val] "=&r" (val),
- [got_it] "+&r" (got_it)
- SCOND_FAIL_RETRY_VARS
- : [rwlock] "r" (&(rw->counter)),
- [WR_LOCKED] "ir" (0)
- : "memory", "cc");
-
- smp_mb();
-
- return got_it;
-}
-
-static inline void arch_write_lock(arch_rwlock_t *rw)
-{
- unsigned int val;
- SCOND_FAIL_RETRY_VAR_DEF;
-
- smp_mb();
-
- /*
- * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
- * deny writer. Otherwise if unlocked grant to writer
- * Hence the claim that Linux rwlocks are unfair to writers.
- * (can be starved for an indefinite time by readers).
- *
- * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
- * rw->counter = 0;
- * ret = 1;
- * }
- */
-
- __asm__ __volatile__(
- "0: mov %[delay], 1 \n"
- "1: llock %[val], [%[rwlock]] \n"
- " brne %[val], %[UNLOCKED], 0b \n" /* while !UNLOCKED spin */
- " mov %[val], %[WR_LOCKED] \n"
- " scond %[val], [%[rwlock]] \n"
- " bz 4f \n"
- " \n"
- SCOND_FAIL_RETRY_ASM
-
- : [val] "=&r" (val)
- SCOND_FAIL_RETRY_VARS
- : [rwlock] "r" (&(rw->counter)),
- [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
- [WR_LOCKED] "ir" (0)
- : "memory", "cc");
-
- smp_mb();
-}
-
-/* 1 - lock taken successfully */
-static inline int arch_write_trylock(arch_rwlock_t *rw)
-{
- unsigned int val, got_it = 0;
- SCOND_FAIL_RETRY_VAR_DEF;
-
- smp_mb();
-
- __asm__ __volatile__(
- "0: mov %[delay], 1 \n"
- "1: llock %[val], [%[rwlock]] \n"
- " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
- " mov %[val], %[WR_LOCKED] \n"
- " scond %[val], [%[rwlock]] \n"
- " bz.d 4f \n"
- " mov.z %[got_it], 1 \n" /* got it */
- " \n"
- SCOND_FAIL_RETRY_ASM
-
- : [val] "=&r" (val),
- [got_it] "+&r" (got_it)
- SCOND_FAIL_RETRY_VARS
- : [rwlock] "r" (&(rw->counter)),
- [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
- [WR_LOCKED] "ir" (0)
- : "memory", "cc");
-
- smp_mb();
-
- return got_it;
-}
-
-static inline void arch_read_unlock(arch_rwlock_t *rw)
-{
- unsigned int val;
-
- smp_mb();
-
- /*
- * rw->counter++;
- */
- __asm__ __volatile__(
- "1: llock %[val], [%[rwlock]] \n"
- " add %[val], %[val], 1 \n"
- " scond %[val], [%[rwlock]] \n"
- " bnz 1b \n"
- " \n"
- : [val] "=&r" (val)
- : [rwlock] "r" (&(rw->counter))
- : "memory", "cc");
-
- smp_mb();
-}
-
-static inline void arch_write_unlock(arch_rwlock_t *rw)
-{
- unsigned int val;
-
- smp_mb();
-
- /*
- * rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
- */
- __asm__ __volatile__(
- "1: llock %[val], [%[rwlock]] \n"
- " scond %[UNLOCKED], [%[rwlock]]\n"
- " bnz 1b \n"
- " \n"
- : [val] "=&r" (val)
- : [rwlock] "r" (&(rw->counter)),
- [UNLOCKED] "r" (__ARCH_RW_LOCK_UNLOCKED__)
- : "memory", "cc");
-
- smp_mb();
-}
-
-#undef SCOND_FAIL_RETRY_VAR_DEF
-#undef SCOND_FAIL_RETRY_ASM
-#undef SCOND_FAIL_RETRY_VARS
-
-#endif /* CONFIG_ARC_STAR_9000923308 */
-
#else /* !CONFIG_ARC_HAS_LLSC */
static inline void arch_spin_lock(arch_spinlock_t *lock)
@@ -610,7 +318,9 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
int ret = 0;
+ unsigned long flags;
+ local_irq_save(flags);
arch_spin_lock(&(rw->lock_mutex));
/*
@@ -623,6 +333,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
}
arch_spin_unlock(&(rw->lock_mutex));
+ local_irq_restore(flags);
smp_mb();
return ret;
@@ -632,7 +343,9 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
int ret = 0;
+ unsigned long flags;
+ local_irq_save(flags);
arch_spin_lock(&(rw->lock_mutex));
/*
@@ -646,6 +359,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
ret = 1;
}
arch_spin_unlock(&(rw->lock_mutex));
+ local_irq_restore(flags);
return ret;
}
@@ -664,16 +378,24 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
static inline void arch_read_unlock(arch_rwlock_t *rw)
{
+ unsigned long flags;
+
+ local_irq_save(flags);
arch_spin_lock(&(rw->lock_mutex));
rw->counter++;
arch_spin_unlock(&(rw->lock_mutex));
+ local_irq_restore(flags);
}
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
+ unsigned long flags;
+
+ local_irq_save(flags);
arch_spin_lock(&(rw->lock_mutex));
rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
arch_spin_unlock(&(rw->lock_mutex));
+ local_irq_restore(flags);
}
#endif
diff --git a/arch/arc/include/asm/thread_info.h b/arch/arc/include/asm/thread_info.h
index 3af674556..2d79e527f 100644
--- a/arch/arc/include/asm/thread_info.h
+++ b/arch/arc/include/asm/thread_info.h
@@ -103,7 +103,7 @@ static inline __attribute_const__ struct thread_info *current_thread_info(void)
/*
* _TIF_ALLWORK_MASK includes SYSCALL_TRACE, but we don't need it.
- * SYSCALL_TRACE is anways seperately/unconditionally tested right after a
+ * SYSCALL_TRACE is anyway seperately/unconditionally tested right after a
* syscall, so all that reamins to be tested is _TIF_WORK_MASK
*/
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
index d1da6032b..a78d56708 100644
--- a/arch/arc/include/asm/uaccess.h
+++ b/arch/arc/include/asm/uaccess.h
@@ -32,7 +32,7 @@
#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
/*
- * Algorthmically, for __user_ok() we want do:
+ * Algorithmically, for __user_ok() we want do:
* (start < TASK_SIZE) && (start+len < TASK_SIZE)
* where TASK_SIZE could either be retrieved from thread_info->addr_limit or
* emitted directly in code.
diff --git a/arch/arc/include/uapi/asm/byteorder.h b/arch/arc/include/uapi/asm/byteorder.h
index 9da71d415..ea5ca444c 100644
--- a/arch/arc/include/uapi/asm/byteorder.h
+++ b/arch/arc/include/uapi/asm/byteorder.h
@@ -9,7 +9,7 @@
#ifndef __ASM_ARC_BYTEORDER_H
#define __ASM_ARC_BYTEORDER_H
-#ifdef CONFIG_CPU_BIG_ENDIAN
+#ifdef __BIG_ENDIAN__
#include <linux/byteorder/big_endian.h>
#else
#include <linux/byteorder/little_endian.h>
diff --git a/arch/arc/include/uapi/asm/swab.h b/arch/arc/include/uapi/asm/swab.h
index 095599a73..71f3918b0 100644
--- a/arch/arc/include/uapi/asm/swab.h
+++ b/arch/arc/include/uapi/asm/swab.h
@@ -74,7 +74,7 @@
__tmp ^ __in; \
})
-#elif (ARC_BSWAP_TYPE == 2) /* Custom single cycle bwap instruction */
+#elif (ARC_BSWAP_TYPE == 2) /* Custom single cycle bswap instruction */
#define __arch_swab32(x) \
({ \
diff --git a/arch/arc/include/uapi/asm/unistd.h b/arch/arc/include/uapi/asm/unistd.h
index 39e58d1cd..41fa2ec9e 100644
--- a/arch/arc/include/uapi/asm/unistd.h
+++ b/arch/arc/include/uapi/asm/unistd.h
@@ -15,6 +15,7 @@
#if !defined(_UAPI_ASM_ARC_UNISTD_H) || defined(__SYSCALL)
#define _UAPI_ASM_ARC_UNISTD_H
+#define __ARCH_WANT_RENAMEAT
#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_SYS_CLONE
#define __ARCH_WANT_SYS_VFORK