summaryrefslogtreecommitdiff
path: root/arch/arm/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r--arch/arm/include/asm/Kbuild1
-rw-r--r--arch/arm/include/asm/assembler.h64
-rw-r--r--arch/arm/include/asm/atomic.h51
-rw-r--r--arch/arm/include/asm/barrier.h17
-rw-r--r--arch/arm/include/asm/bitops.h24
-rw-r--r--arch/arm/include/asm/bug.h1
-rw-r--r--arch/arm/include/asm/cacheflush.h21
-rw-r--r--arch/arm/include/asm/cmpxchg.h47
-rw-r--r--arch/arm/include/asm/dma-mapping.h70
-rw-r--r--arch/arm/include/asm/domain.h57
-rw-r--r--arch/arm/include/asm/fixmap.h15
-rw-r--r--arch/arm/include/asm/futex.h19
-rw-r--r--arch/arm/include/asm/glue-cache.h2
-rw-r--r--arch/arm/include/asm/hardware/it8152.h2
-rw-r--r--arch/arm/include/asm/hw_irq.h6
-rw-r--r--arch/arm/include/asm/irq.h10
-rw-r--r--arch/arm/include/asm/jump_label.h25
-rw-r--r--arch/arm/include/asm/kvm_host.h15
-rw-r--r--arch/arm/include/asm/mach/irq.h4
-rw-r--r--arch/arm/include/asm/mach/pci.h5
-rw-r--r--arch/arm/include/asm/memory.h6
-rw-r--r--arch/arm/include/asm/outercache.h17
-rw-r--r--arch/arm/include/asm/pgtable-2level-hwdef.h1
-rw-r--r--arch/arm/include/asm/pmu.h154
-rw-r--r--arch/arm/include/asm/psci.h23
-rw-r--r--arch/arm/include/asm/smp.h2
-rw-r--r--arch/arm/include/asm/smp_plat.h9
-rw-r--r--arch/arm/include/asm/switch_to.h5
-rw-r--r--arch/arm/include/asm/thread_info.h24
-rw-r--r--arch/arm/include/asm/uaccess.h132
-rw-r--r--arch/arm/include/asm/unistd.h2
-rw-r--r--arch/arm/include/asm/xen/events.h6
-rw-r--r--arch/arm/include/asm/xen/page.h44
33 files changed, 404 insertions, 477 deletions
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index 30b3bc166..be648eb47 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -12,7 +12,6 @@ generic-y += irq_regs.h
generic-y += kdebug.h
generic-y += local.h
generic-y += local64.h
-generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
generic-y += msgbuf.h
generic-y += param.h
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 4abe57279..b2bc8e114 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -108,33 +108,37 @@
.endm
#endif
- .macro asm_trace_hardirqs_off
+ .macro asm_trace_hardirqs_off, save=1
#if defined(CONFIG_TRACE_IRQFLAGS)
+ .if \save
stmdb sp!, {r0-r3, ip, lr}
+ .endif
bl trace_hardirqs_off
+ .if \save
ldmia sp!, {r0-r3, ip, lr}
+ .endif
#endif
.endm
- .macro asm_trace_hardirqs_on_cond, cond
+ .macro asm_trace_hardirqs_on, cond=al, save=1
#if defined(CONFIG_TRACE_IRQFLAGS)
/*
* actually the registers should be pushed and pop'd conditionally, but
* after bl the flags are certainly clobbered
*/
+ .if \save
stmdb sp!, {r0-r3, ip, lr}
+ .endif
bl\cond trace_hardirqs_on
+ .if \save
ldmia sp!, {r0-r3, ip, lr}
+ .endif
#endif
.endm
- .macro asm_trace_hardirqs_on
- asm_trace_hardirqs_on_cond al
- .endm
-
- .macro disable_irq
+ .macro disable_irq, save=1
disable_irq_notrace
- asm_trace_hardirqs_off
+ asm_trace_hardirqs_off \save
.endm
.macro enable_irq
@@ -173,7 +177,7 @@
.macro restore_irqs, oldcpsr
tst \oldcpsr, #PSR_I_BIT
- asm_trace_hardirqs_on_cond eq
+ asm_trace_hardirqs_on cond=eq
restore_irqs_notrace \oldcpsr
.endm
@@ -445,6 +449,48 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
#endif
.endm
+ .macro uaccess_disable, tmp, isb=1
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ /*
+ * Whenever we re-enter userspace, the domains should always be
+ * set appropriately.
+ */
+ mov \tmp, #DACR_UACCESS_DISABLE
+ mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register
+ .if \isb
+ instr_sync
+ .endif
+#endif
+ .endm
+
+ .macro uaccess_enable, tmp, isb=1
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ /*
+ * Whenever we re-enter userspace, the domains should always be
+ * set appropriately.
+ */
+ mov \tmp, #DACR_UACCESS_ENABLE
+ mcr p15, 0, \tmp, c3, c0, 0
+ .if \isb
+ instr_sync
+ .endif
+#endif
+ .endm
+
+ .macro uaccess_save, tmp
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ mrc p15, 0, \tmp, c3, c0, 0
+ str \tmp, [sp, #S_FRAME_SIZE]
+#endif
+ .endm
+
+ .macro uaccess_restore
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ ldr r0, [sp, #S_FRAME_SIZE]
+ mcr p15, 0, r0, c3, c0, 0
+#endif
+ .endm
+
.irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
.macro ret\c, reg
#if __LINUX_ARM_ARCH__ < 6
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index e22c11970..fe3ef397f 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -57,12 +57,11 @@ static inline void atomic_##op(int i, atomic_t *v) \
} \
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
{ \
unsigned long tmp; \
int result; \
\
- smp_mb(); \
prefetchw(&v->counter); \
\
__asm__ __volatile__("@ atomic_" #op "_return\n" \
@@ -75,17 +74,17 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
: "r" (&v->counter), "Ir" (i) \
: "cc"); \
\
- smp_mb(); \
- \
return result; \
}
-static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
+#define atomic_add_return_relaxed atomic_add_return_relaxed
+#define atomic_sub_return_relaxed atomic_sub_return_relaxed
+
+static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
{
int oldval;
unsigned long res;
- smp_mb();
prefetchw(&ptr->counter);
do {
@@ -99,10 +98,9 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
: "cc");
} while (res);
- smp_mb();
-
return oldval;
}
+#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
@@ -194,6 +192,13 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
ATOMIC_OPS(add, +=, add)
ATOMIC_OPS(sub, -=, sub)
+#define atomic_andnot atomic_andnot
+
+ATOMIC_OP(and, &=, and)
+ATOMIC_OP(andnot, &= ~, bic)
+ATOMIC_OP(or, |=, orr)
+ATOMIC_OP(xor, ^=, eor)
+
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
@@ -290,12 +295,12 @@ static inline void atomic64_##op(long long i, atomic64_t *v) \
} \
#define ATOMIC64_OP_RETURN(op, op1, op2) \
-static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
+static inline long long \
+atomic64_##op##_return_relaxed(long long i, atomic64_t *v) \
{ \
long long result; \
unsigned long tmp; \
\
- smp_mb(); \
prefetchw(&v->counter); \
\
__asm__ __volatile__("@ atomic64_" #op "_return\n" \
@@ -309,8 +314,6 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
: "r" (&v->counter), "r" (i) \
: "cc"); \
\
- smp_mb(); \
- \
return result; \
}
@@ -321,17 +324,26 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
ATOMIC64_OPS(add, adds, adc)
ATOMIC64_OPS(sub, subs, sbc)
+#define atomic64_add_return_relaxed atomic64_add_return_relaxed
+#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
+
+#define atomic64_andnot atomic64_andnot
+
+ATOMIC64_OP(and, and, and)
+ATOMIC64_OP(andnot, bic, bic)
+ATOMIC64_OP(or, orr, orr)
+ATOMIC64_OP(xor, eor, eor)
+
#undef ATOMIC64_OPS
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
-static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
- long long new)
+static inline long long
+atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
{
long long oldval;
unsigned long res;
- smp_mb();
prefetchw(&ptr->counter);
do {
@@ -346,17 +358,15 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
: "cc");
} while (res);
- smp_mb();
-
return oldval;
}
+#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
-static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
+static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
{
long long result;
unsigned long tmp;
- smp_mb();
prefetchw(&ptr->counter);
__asm__ __volatile__("@ atomic64_xchg\n"
@@ -368,10 +378,9 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
: "r" (&ptr->counter), "r" (new)
: "cc");
- smp_mb();
-
return result;
}
+#define atomic64_xchg_relaxed atomic64_xchg_relaxed
static inline long long atomic64_dec_if_positive(atomic64_t *v)
{
diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
index 6c2327e1c..3ff5642d9 100644
--- a/arch/arm/include/asm/barrier.h
+++ b/arch/arm/include/asm/barrier.h
@@ -2,7 +2,6 @@
#define __ASM_BARRIER_H
#ifndef __ASSEMBLY__
-#include <asm/outercache.h>
#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
@@ -37,12 +36,20 @@
#define dmb(x) __asm__ __volatile__ ("" : : : "memory")
#endif
+#ifdef CONFIG_ARM_HEAVY_MB
+extern void (*soc_mb)(void);
+extern void arm_heavy_mb(void);
+#define __arm_heavy_mb(x...) do { dsb(x); arm_heavy_mb(); } while (0)
+#else
+#define __arm_heavy_mb(x...) dsb(x)
+#endif
+
#ifdef CONFIG_ARCH_HAS_BARRIERS
#include <mach/barriers.h>
#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP)
-#define mb() do { dsb(); outer_sync(); } while (0)
+#define mb() __arm_heavy_mb()
#define rmb() dsb()
-#define wmb() do { dsb(st); outer_sync(); } while (0)
+#define wmb() __arm_heavy_mb(st)
#define dma_rmb() dmb(osh)
#define dma_wmb() dmb(oshst)
#else
@@ -67,12 +74,12 @@
do { \
compiletime_assert_atomic_type(*p); \
smp_mb(); \
- ACCESS_ONCE(*p) = (v); \
+ WRITE_ONCE(*p, v); \
} while (0)
#define smp_load_acquire(p) \
({ \
- typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
smp_mb(); \
___p1; \
diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h
index 56380995f..e943e6cee 100644
--- a/arch/arm/include/asm/bitops.h
+++ b/arch/arm/include/asm/bitops.h
@@ -35,9 +35,9 @@
static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long flags;
- unsigned long mask = 1UL << (bit & 31);
+ unsigned long mask = BIT_MASK(bit);
- p += bit >> 5;
+ p += BIT_WORD(bit);
raw_local_irq_save(flags);
*p |= mask;
@@ -47,9 +47,9 @@ static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *
static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long flags;
- unsigned long mask = 1UL << (bit & 31);
+ unsigned long mask = BIT_MASK(bit);
- p += bit >> 5;
+ p += BIT_WORD(bit);
raw_local_irq_save(flags);
*p &= ~mask;
@@ -59,9 +59,9 @@ static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long
static inline void ____atomic_change_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long flags;
- unsigned long mask = 1UL << (bit & 31);
+ unsigned long mask = BIT_MASK(bit);
- p += bit >> 5;
+ p += BIT_WORD(bit);
raw_local_irq_save(flags);
*p ^= mask;
@@ -73,9 +73,9 @@ ____atomic_test_and_set_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long flags;
unsigned int res;
- unsigned long mask = 1UL << (bit & 31);
+ unsigned long mask = BIT_MASK(bit);
- p += bit >> 5;
+ p += BIT_WORD(bit);
raw_local_irq_save(flags);
res = *p;
@@ -90,9 +90,9 @@ ____atomic_test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long flags;
unsigned int res;
- unsigned long mask = 1UL << (bit & 31);
+ unsigned long mask = BIT_MASK(bit);
- p += bit >> 5;
+ p += BIT_WORD(bit);
raw_local_irq_save(flags);
res = *p;
@@ -107,9 +107,9 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long flags;
unsigned int res;
- unsigned long mask = 1UL << (bit & 31);
+ unsigned long mask = BIT_MASK(bit);
- p += bit >> 5;
+ p += BIT_WORD(bit);
raw_local_irq_save(flags);
res = *p;
diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h
index b274bde24..e7335a921 100644
--- a/arch/arm/include/asm/bug.h
+++ b/arch/arm/include/asm/bug.h
@@ -40,6 +40,7 @@ do { \
"2:\t.asciz " #__file "\n" \
".popsection\n" \
".pushsection __bug_table,\"a\"\n" \
+ ".align 2\n" \
"3:\t.word 1b, 2b\n" \
"\t.hword " #__line ", 0\n" \
".popsection"); \
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 4812cda8f..d5525bfc7 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -140,8 +140,6 @@ extern struct cpu_cache_fns cpu_cache;
* is visible to DMA, or data written by DMA to system memory is
* visible to the CPU.
*/
-#define dmac_map_area cpu_cache.dma_map_area
-#define dmac_unmap_area cpu_cache.dma_unmap_area
#define dmac_flush_range cpu_cache.dma_flush_range
#else
@@ -161,8 +159,6 @@ extern void __cpuc_flush_dcache_area(void *, size_t);
* is visible to DMA, or data written by DMA to system memory is
* visible to the CPU.
*/
-extern void dmac_map_area(const void *, size_t, int);
-extern void dmac_unmap_area(const void *, size_t, int);
extern void dmac_flush_range(const void *, const void *);
#endif
@@ -506,4 +502,21 @@ static inline void set_kernel_text_ro(void) { }
void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
void *kaddr, unsigned long len);
+/**
+ * secure_flush_area - ensure coherency across the secure boundary
+ * @addr: virtual address
+ * @size: size of region
+ *
+ * Ensure that the specified area of memory is coherent across the secure
+ * boundary from the non-secure side. This is used when calling secure
+ * firmware where the secure firmware does not ensure coherency.
+ */
+static inline void secure_flush_area(const void *addr, size_t size)
+{
+ phys_addr_t phys = __pa(addr);
+
+ __cpuc_flush_dcache_area((void *)addr, size);
+ outer_flush_range(phys, phys + size);
+}
+
#endif
diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
index 1692a05d3..916a2744d 100644
--- a/arch/arm/include/asm/cmpxchg.h
+++ b/arch/arm/include/asm/cmpxchg.h
@@ -35,7 +35,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
unsigned int tmp;
#endif
- smp_mb();
prefetchw((const void *)ptr);
switch (size) {
@@ -98,12 +97,11 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
__bad_xchg(ptr, size), ret = 0;
break;
}
- smp_mb();
return ret;
}
-#define xchg(ptr, x) ({ \
+#define xchg_relaxed(ptr, x) ({ \
(__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \
sizeof(*(ptr))); \
})
@@ -117,6 +115,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
#error "SMP is not supported on this platform"
#endif
+#define xchg xchg_relaxed
+
/*
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available.
@@ -194,23 +194,11 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
return oldval;
}
-static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
- unsigned long new, int size)
-{
- unsigned long ret;
-
- smp_mb();
- ret = __cmpxchg(ptr, old, new, size);
- smp_mb();
-
- return ret;
-}
-
-#define cmpxchg(ptr,o,n) ({ \
- (__typeof__(*(ptr)))__cmpxchg_mb((ptr), \
- (unsigned long)(o), \
- (unsigned long)(n), \
- sizeof(*(ptr))); \
+#define cmpxchg_relaxed(ptr,o,n) ({ \
+ (__typeof__(*(ptr)))__cmpxchg((ptr), \
+ (unsigned long)(o), \
+ (unsigned long)(n), \
+ sizeof(*(ptr))); \
})
static inline unsigned long __cmpxchg_local(volatile void *ptr,
@@ -273,25 +261,6 @@ static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
#define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n))
-static inline unsigned long long __cmpxchg64_mb(unsigned long long *ptr,
- unsigned long long old,
- unsigned long long new)
-{
- unsigned long long ret;
-
- smp_mb();
- ret = __cmpxchg64(ptr, old, new);
- smp_mb();
-
- return ret;
-}
-
-#define cmpxchg64(ptr, o, n) ({ \
- (__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \
- (unsigned long long)(o), \
- (unsigned long long)(n)); \
-})
-
#endif /* __LINUX_ARM_ARCH__ >= 6 */
#endif /* __ASM_ARM_CMPXCHG_H */
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index b52101d37..ccb3aa646 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -8,13 +8,12 @@
#include <linux/dma-attrs.h>
#include <linux/dma-debug.h>
-#include <asm-generic/dma-coherent.h>
#include <asm/memory.h>
#include <xen/xen.h>
#include <asm/xen/hypervisor.h>
-#define DMA_ERROR_CODE (~0)
+#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
extern struct dma_map_ops arm_dma_ops;
extern struct dma_map_ops arm_coherent_dma_ops;
@@ -39,12 +38,15 @@ static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
dev->archdata.dma_ops = ops;
}
-#include <asm-generic/dma-mapping-common.h>
+#define HAVE_ARCH_DMA_SUPPORTED 1
+extern int dma_supported(struct device *dev, u64 mask);
-static inline int dma_set_mask(struct device *dev, u64 mask)
-{
- return get_dma_ops(dev)->set_dma_mask(dev, mask);
-}
+/*
+ * Note that while the generic code provides dummy dma_{alloc,free}_noncoherent
+ * implementations, we don't provide a dma_cache_sync function so drivers using
+ * this API are highlighted with build warnings.
+ */
+#include <asm-generic/dma-mapping-common.h>
#ifdef __arch_page_to_dma
#error Please update to __arch_pfn_to_dma
@@ -167,32 +169,6 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
static inline void dma_mark_clean(void *addr, size_t size) { }
-/*
- * DMA errors are defined by all-bits-set in the DMA address.
- */
-static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- debug_dma_mapping_error(dev, dma_addr);
- return dma_addr == DMA_ERROR_CODE;
-}
-
-/*
- * Dummy noncoherent implementation. We don't provide a dma_cache_sync
- * function so drivers using this API are highlighted with build warnings.
- */
-static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t gfp)
-{
- return NULL;
-}
-
-static inline void dma_free_noncoherent(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t handle)
-{
-}
-
-extern int dma_supported(struct device *dev, u64 mask);
-
extern int arm_dma_set_mask(struct device *dev, u64 dma_mask);
/**
@@ -209,21 +185,6 @@ extern int arm_dma_set_mask(struct device *dev, u64 dma_mask);
extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
gfp_t gfp, struct dma_attrs *attrs);
-#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
-
-static inline void *dma_alloc_attrs(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag,
- struct dma_attrs *attrs)
-{
- struct dma_map_ops *ops = get_dma_ops(dev);
- void *cpu_addr;
- BUG_ON(!ops);
-
- cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
- debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
- return cpu_addr;
-}
-
/**
* arm_dma_free - free memory allocated by arm_dma_alloc
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
@@ -241,19 +202,6 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size,
extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t handle, struct dma_attrs *attrs);
-#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL)
-
-static inline void dma_free_attrs(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
-{
- struct dma_map_ops *ops = get_dma_ops(dev);
- BUG_ON(!ops);
-
- debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
- ops->free(dev, size, cpu_addr, dma_handle, attrs);
-}
-
/**
* arm_dma_mmap - map a coherent DMA allocation into user space
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
index 6ddbe4464..fc8ba1663 100644
--- a/arch/arm/include/asm/domain.h
+++ b/arch/arm/include/asm/domain.h
@@ -12,6 +12,7 @@
#ifndef __ASSEMBLY__
#include <asm/barrier.h>
+#include <asm/thread_info.h>
#endif
/*
@@ -34,15 +35,14 @@
*/
#ifndef CONFIG_IO_36
#define DOMAIN_KERNEL 0
-#define DOMAIN_TABLE 0
#define DOMAIN_USER 1
#define DOMAIN_IO 2
#else
#define DOMAIN_KERNEL 2
-#define DOMAIN_TABLE 2
#define DOMAIN_USER 1
#define DOMAIN_IO 0
#endif
+#define DOMAIN_VECTORS 3
/*
* Domain types
@@ -55,30 +55,65 @@
#define DOMAIN_MANAGER 1
#endif
-#define domain_val(dom,type) ((type) << (2*(dom)))
+#define domain_mask(dom) ((3) << (2 * (dom)))
+#define domain_val(dom,type) ((type) << (2 * (dom)))
+
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+#define DACR_INIT \
+ (domain_val(DOMAIN_USER, DOMAIN_NOACCESS) | \
+ domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
+ domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
+ domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
+#else
+#define DACR_INIT \
+ (domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \
+ domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
+ domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
+ domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
+#endif
+
+#define __DACR_DEFAULT \
+ domain_val(DOMAIN_KERNEL, DOMAIN_CLIENT) | \
+ domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
+ domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT)
+
+#define DACR_UACCESS_DISABLE \
+ (__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
+#define DACR_UACCESS_ENABLE \
+ (__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_CLIENT))
#ifndef __ASSEMBLY__
-#ifdef CONFIG_CPU_USE_DOMAINS
+static inline unsigned int get_domain(void)
+{
+ unsigned int domain;
+
+ asm(
+ "mrc p15, 0, %0, c3, c0 @ get domain"
+ : "=r" (domain)
+ : "m" (current_thread_info()->cpu_domain));
+
+ return domain;
+}
+
static inline void set_domain(unsigned val)
{
asm volatile(
"mcr p15, 0, %0, c3, c0 @ set domain"
- : : "r" (val));
+ : : "r" (val) : "memory");
isb();
}
+#ifdef CONFIG_CPU_USE_DOMAINS
#define modify_domain(dom,type) \
do { \
- struct thread_info *thread = current_thread_info(); \
- unsigned int domain = thread->cpu_domain; \
- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
- thread->cpu_domain = domain | domain_val(dom, type); \
- set_domain(thread->cpu_domain); \
+ unsigned int domain = get_domain(); \
+ domain &= ~domain_mask(dom); \
+ domain = domain | domain_val(dom, type); \
+ set_domain(domain); \
} while (0)
#else
-static inline void set_domain(unsigned val) { }
static inline void modify_domain(unsigned dom, unsigned type) { }
#endif
diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h
index 0415eae1d..58cfe9f1a 100644
--- a/arch/arm/include/asm/fixmap.h
+++ b/arch/arm/include/asm/fixmap.h
@@ -6,9 +6,13 @@
#define FIXADDR_TOP (FIXADDR_END - PAGE_SIZE)
#include <asm/kmap_types.h>
+#include <asm/pgtable.h>
enum fixed_addresses {
- FIX_KMAP_BEGIN,
+ FIX_EARLYCON_MEM_BASE,
+ __end_of_permanent_fixed_addresses,
+
+ FIX_KMAP_BEGIN = __end_of_permanent_fixed_addresses,
FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
/* Support writing RO kernel text via kprobes, jump labels, etc. */
@@ -18,7 +22,16 @@ enum fixed_addresses {
__end_of_fixed_addresses
};
+#define FIXMAP_PAGE_COMMON (L_PTE_YOUNG | L_PTE_PRESENT | L_PTE_XN | L_PTE_DIRTY)
+
+#define FIXMAP_PAGE_NORMAL (FIXMAP_PAGE_COMMON | L_PTE_MT_WRITEBACK)
+
+/* Used by set_fixmap_(io|nocache), both meant for mapping a device */
+#define FIXMAP_PAGE_IO (FIXMAP_PAGE_COMMON | L_PTE_MT_DEV_SHARED | L_PTE_SHARED)
+#define FIXMAP_PAGE_NOCACHE FIXMAP_PAGE_IO
+
void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot);
+void __init early_fixmap_init(void);
#include <asm-generic/fixmap.h>
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index 5eed82809..6795368ad 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -22,8 +22,11 @@
#ifdef CONFIG_SMP
#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
+({ \
+ unsigned int __ua_flags; \
smp_mb(); \
prefetchw(uaddr); \
+ __ua_flags = uaccess_save_and_enable(); \
__asm__ __volatile__( \
"1: ldrex %1, [%3]\n" \
" " insn "\n" \
@@ -34,12 +37,15 @@
__futex_atomic_ex_table("%5") \
: "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \
: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
- : "cc", "memory")
+ : "cc", "memory"); \
+ uaccess_restore(__ua_flags); \
+})
static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
+ unsigned int __ua_flags;
int ret;
u32 val;
@@ -49,6 +55,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
smp_mb();
/* Prefetching cannot fault */
prefetchw(uaddr);
+ __ua_flags = uaccess_save_and_enable();
__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
"1: ldrex %1, [%4]\n"
" teq %1, %2\n"
@@ -61,6 +68,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
: "=&r" (ret), "=&r" (val)
: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
: "cc", "memory");
+ uaccess_restore(__ua_flags);
smp_mb();
*uval = val;
@@ -73,6 +81,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
#include <asm/domain.h>
#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
+({ \
+ unsigned int __ua_flags = uaccess_save_and_enable(); \
__asm__ __volatile__( \
"1: " TUSER(ldr) " %1, [%3]\n" \
" " insn "\n" \
@@ -81,12 +91,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
__futex_atomic_ex_table("%5") \
: "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \
: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
- : "cc", "memory")
+ : "cc", "memory"); \
+ uaccess_restore(__ua_flags); \
+})
static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
+ unsigned int __ua_flags;
int ret = 0;
u32 val;
@@ -94,6 +107,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return -EFAULT;
preempt_disable();
+ __ua_flags = uaccess_save_and_enable();
__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
"1: " TUSER(ldr) " %1, [%4]\n"
" teq %1, %2\n"
@@ -103,6 +117,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
: "+r" (ret), "=&r" (val)
: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
: "cc", "memory");
+ uaccess_restore(__ua_flags);
*uval = val;
preempt_enable();
diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h
index a3c24cd5b..cab07f693 100644
--- a/arch/arm/include/asm/glue-cache.h
+++ b/arch/arm/include/asm/glue-cache.h
@@ -158,8 +158,6 @@ static inline void nop_dma_unmap_area(const void *s, size_t l, int f) { }
#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area)
-#define dmac_map_area __glue(_CACHE,_dma_map_area)
-#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
#endif
diff --git a/arch/arm/include/asm/hardware/it8152.h b/arch/arm/include/asm/hardware/it8152.h
index d36a73d7c..076777ff3 100644
--- a/arch/arm/include/asm/hardware/it8152.h
+++ b/arch/arm/include/asm/hardware/it8152.h
@@ -106,7 +106,7 @@ extern void __iomem *it8152_base_address;
struct pci_dev;
struct pci_sys_data;
-extern void it8152_irq_demux(unsigned int irq, struct irq_desc *desc);
+extern void it8152_irq_demux(struct irq_desc *desc);
extern void it8152_init_irq(void);
extern int it8152_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
extern int it8152_pci_setup(int nr, struct pci_sys_data *sys);
diff --git a/arch/arm/include/asm/hw_irq.h b/arch/arm/include/asm/hw_irq.h
index af79da40a..9beb92914 100644
--- a/arch/arm/include/asm/hw_irq.h
+++ b/arch/arm/include/asm/hw_irq.h
@@ -11,12 +11,6 @@ static inline void ack_bad_irq(int irq)
pr_crit("unexpected IRQ trap at vector %02x\n", irq);
}
-void set_irq_flags(unsigned int irq, unsigned int flags);
-
-#define IRQF_VALID (1 << 0)
-#define IRQF_PROBE (1 << 1)
-#define IRQF_NOAUTOEN (1 << 2)
-
#define ARCH_IRQ_INIT_FLAGS (IRQ_NOREQUEST | IRQ_NOPROBE)
#endif
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
index 53c15dec7..1bd9510de 100644
--- a/arch/arm/include/asm/irq.h
+++ b/arch/arm/include/asm/irq.h
@@ -35,6 +35,16 @@ extern void (*handle_arch_irq)(struct pt_regs *);
extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
#endif
+#ifdef CONFIG_SMP
+extern void arch_trigger_all_cpu_backtrace(bool);
+#define arch_trigger_all_cpu_backtrace(x) arch_trigger_all_cpu_backtrace(x)
+#endif
+
+static inline int nr_legacy_irqs(void)
+{
+ return NR_IRQS_LEGACY;
+}
+
#endif
#endif
diff --git a/arch/arm/include/asm/jump_label.h b/arch/arm/include/asm/jump_label.h
index 5f337dc5c..34f7b6980 100644
--- a/arch/arm/include/asm/jump_label.h
+++ b/arch/arm/include/asm/jump_label.h
@@ -4,23 +4,32 @@
#ifndef __ASSEMBLY__
#include <linux/types.h>
+#include <asm/unified.h>
#define JUMP_LABEL_NOP_SIZE 4
-#ifdef CONFIG_THUMB2_KERNEL
-#define JUMP_LABEL_NOP "nop.w"
-#else
-#define JUMP_LABEL_NOP "nop"
-#endif
+static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
+{
+ asm_volatile_goto("1:\n\t"
+ WASM(nop) "\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ ".word 1b, %l[l_yes], %c0\n\t"
+ ".popsection\n\t"
+ : : "i" (&((char *)key)[branch]) : : l_yes);
+
+ return false;
+l_yes:
+ return true;
+}
-static __always_inline bool arch_static_branch(struct static_key *key)
+static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
{
asm_volatile_goto("1:\n\t"
- JUMP_LABEL_NOP "\n\t"
+ WASM(b) " %l[l_yes]\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".word 1b, %l[l_yes], %c0\n\t"
".popsection\n\t"
- : : "i" (key) : : l_yes);
+ : : "i" (&((char *)key)[branch]) : : l_yes);
return false;
l_yes:
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index e896d2c19..c4072d9f3 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -29,21 +29,18 @@
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
-#if defined(CONFIG_KVM_ARM_MAX_VCPUS)
-#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS
-#else
-#define KVM_MAX_VCPUS 0
-#endif
-
#define KVM_USER_MEM_SLOTS 32
#define KVM_PRIVATE_MEM_SLOTS 4
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
#define KVM_HAVE_ONE_REG
+#define KVM_HALT_POLL_NS_DEFAULT 500000
#define KVM_VCPU_MAX_FEATURES 2
#include <kvm/arm_vgic.h>
+#define KVM_MAX_VCPUS VGIC_V2_MAX_CPUS
+
u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
int __attribute_const__ kvm_target_cpu(void);
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
@@ -148,6 +145,7 @@ struct kvm_vm_stat {
struct kvm_vcpu_stat {
u32 halt_successful_poll;
+ u32 halt_attempted_poll;
u32 halt_wakeup;
};
@@ -231,4 +229,9 @@ static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
+static inline void kvm_arm_init_debug(void) {}
+static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) {}
+
#endif /* __ARM_KVM_HOST_H__ */
diff --git a/arch/arm/include/asm/mach/irq.h b/arch/arm/include/asm/mach/irq.h
index 2092ee1e1..de4634b51 100644
--- a/arch/arm/include/asm/mach/irq.h
+++ b/arch/arm/include/asm/mach/irq.h
@@ -23,10 +23,10 @@ extern int show_fiq_list(struct seq_file *, int);
/*
* This is for easy migration, but should be changed in the source
*/
-#define do_bad_IRQ(irq,desc) \
+#define do_bad_IRQ(desc) \
do { \
raw_spin_lock(&desc->lock); \
- handle_bad_irq(irq, desc); \
+ handle_bad_irq(desc); \
raw_spin_unlock(&desc->lock); \
} while(0)
diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h
index 28b9bb359..8857d2869 100644
--- a/arch/arm/include/asm/mach/pci.h
+++ b/arch/arm/include/asm/mach/pci.h
@@ -19,9 +19,7 @@ struct pci_bus;
struct device;
struct hw_pci {
-#ifdef CONFIG_PCI_MSI
struct msi_controller *msi_ctrl;
-#endif
struct pci_ops *ops;
int nr_controllers;
void **private_data;
@@ -42,9 +40,6 @@ struct hw_pci {
* Per-controller structure
*/
struct pci_sys_data {
-#ifdef CONFIG_PCI_MSI
- struct msi_controller *msi_ctrl;
-#endif
struct list_head node;
int busnr; /* primary bus number */
u64 mem_offset; /* bus->cpu memory mapping offset */
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index b7f6fb462..98d58bb04 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -119,12 +119,6 @@
#endif
/*
- * Convert a physical address to a Page Frame Number and back
- */
-#define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT))
-#define __pfn_to_phys(pfn) ((phys_addr_t)(pfn) << PAGE_SHIFT)
-
-/*
* Convert a page to/from a physical address
*/
#define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page)))
diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
index 563b92fc2..c2bf24f40 100644
--- a/arch/arm/include/asm/outercache.h
+++ b/arch/arm/include/asm/outercache.h
@@ -129,21 +129,4 @@ static inline void outer_resume(void) { }
#endif
-#ifdef CONFIG_OUTER_CACHE_SYNC
-/**
- * outer_sync - perform a sync point for outer cache
- *
- * Ensure that all outer cache operations are complete and any store
- * buffers are drained.
- */
-static inline void outer_sync(void)
-{
- if (outer_cache.sync)
- outer_cache.sync();
-}
-#else
-static inline void outer_sync(void)
-{ }
-#endif
-
#endif /* __ASM_OUTERCACHE_H */
diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
index 5e68278e9..d0131ee6f 100644
--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
@@ -23,6 +23,7 @@
#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
#define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
#define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
+#define PMD_DOMAIN_MASK PMD_DOMAIN(0x0f)
#define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
/*
* - section
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
deleted file mode 100644
index 3fc87dfd7..000000000
--- a/arch/arm/include/asm/pmu.h
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * linux/arch/arm/include/asm/pmu.h
- *
- * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#ifndef __ARM_PMU_H__
-#define __ARM_PMU_H__
-
-#include <linux/interrupt.h>
-#include <linux/perf_event.h>
-
-#include <asm/cputype.h>
-
-/*
- * struct arm_pmu_platdata - ARM PMU platform data
- *
- * @handle_irq: an optional handler which will be called from the
- * interrupt and passed the address of the low level handler,
- * and can be used to implement any platform specific handling
- * before or after calling it.
- */
-struct arm_pmu_platdata {
- irqreturn_t (*handle_irq)(int irq, void *dev,
- irq_handler_t pmu_handler);
-};
-
-#ifdef CONFIG_HW_PERF_EVENTS
-
-/*
- * The ARMv7 CPU PMU supports up to 32 event counters.
- */
-#define ARMPMU_MAX_HWEVENTS 32
-
-#define HW_OP_UNSUPPORTED 0xFFFF
-#define C(_x) PERF_COUNT_HW_CACHE_##_x
-#define CACHE_OP_UNSUPPORTED 0xFFFF
-
-#define PERF_MAP_ALL_UNSUPPORTED \
- [0 ... PERF_COUNT_HW_MAX - 1] = HW_OP_UNSUPPORTED
-
-#define PERF_CACHE_MAP_ALL_UNSUPPORTED \
-[0 ... C(MAX) - 1] = { \
- [0 ... C(OP_MAX) - 1] = { \
- [0 ... C(RESULT_MAX) - 1] = CACHE_OP_UNSUPPORTED, \
- }, \
-}
-
-/* The events for a given PMU register set. */
-struct pmu_hw_events {
- /*
- * The events that are active on the PMU for the given index.
- */
- struct perf_event *events[ARMPMU_MAX_HWEVENTS];
-
- /*
- * A 1 bit for an index indicates that the counter is being used for
- * an event. A 0 means that the counter can be used.
- */
- DECLARE_BITMAP(used_mask, ARMPMU_MAX_HWEVENTS);
-
- /*
- * Hardware lock to serialize accesses to PMU registers. Needed for the
- * read/modify/write sequences.
- */
- raw_spinlock_t pmu_lock;
-
- /*
- * When using percpu IRQs, we need a percpu dev_id. Place it here as we
- * already have to allocate this struct per cpu.
- */
- struct arm_pmu *percpu_pmu;
-};
-
-struct arm_pmu {
- struct pmu pmu;
- cpumask_t active_irqs;
- cpumask_t supported_cpus;
- int *irq_affinity;
- char *name;
- irqreturn_t (*handle_irq)(int irq_num, void *dev);
- void (*enable)(struct perf_event *event);
- void (*disable)(struct perf_event *event);
- int (*get_event_idx)(struct pmu_hw_events *hw_events,
- struct perf_event *event);
- void (*clear_event_idx)(struct pmu_hw_events *hw_events,
- struct perf_event *event);
- int (*set_event_filter)(struct hw_perf_event *evt,
- struct perf_event_attr *attr);
- u32 (*read_counter)(struct perf_event *event);
- void (*write_counter)(struct perf_event *event, u32 val);
- void (*start)(struct arm_pmu *);
- void (*stop)(struct arm_pmu *);
- void (*reset)(void *);
- int (*request_irq)(struct arm_pmu *, irq_handler_t handler);
- void (*free_irq)(struct arm_pmu *);
- int (*map_event)(struct perf_event *event);
- int num_events;
- atomic_t active_events;
- struct mutex reserve_mutex;
- u64 max_period;
- struct platform_device *plat_device;
- struct pmu_hw_events __percpu *hw_events;
- struct notifier_block hotplug_nb;
-};
-
-#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
-
-int armpmu_register(struct arm_pmu *armpmu, int type);
-
-u64 armpmu_event_update(struct perf_event *event);
-
-int armpmu_event_set_period(struct perf_event *event);
-
-int armpmu_map_event(struct perf_event *event,
- const unsigned (*event_map)[PERF_COUNT_HW_MAX],
- const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX],
- u32 raw_event_mask);
-
-struct pmu_probe_info {
- unsigned int cpuid;
- unsigned int mask;
- int (*init)(struct arm_pmu *);
-};
-
-#define PMU_PROBE(_cpuid, _mask, _fn) \
-{ \
- .cpuid = (_cpuid), \
- .mask = (_mask), \
- .init = (_fn), \
-}
-
-#define ARM_PMU_PROBE(_cpuid, _fn) \
- PMU_PROBE(_cpuid, ARM_CPU_PART_MASK, _fn)
-
-#define ARM_PMU_XSCALE_MASK ((0xff << 24) | ARM_CPU_XSCALE_ARCH_MASK)
-
-#define XSCALE_PMU_PROBE(_version, _fn) \
- PMU_PROBE(ARM_CPU_IMP_INTEL << 24 | _version, ARM_PMU_XSCALE_MASK, _fn)
-
-int arm_pmu_device_probe(struct platform_device *pdev,
- const struct of_device_id *of_table,
- const struct pmu_probe_info *probe_table);
-
-#endif /* CONFIG_HW_PERF_EVENTS */
-
-#endif /* __ARM_PMU_H__ */
diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
index c25ef3ec6..68ee3ce17 100644
--- a/arch/arm/include/asm/psci.h
+++ b/arch/arm/include/asm/psci.h
@@ -14,34 +14,11 @@
#ifndef __ASM_ARM_PSCI_H
#define __ASM_ARM_PSCI_H
-#define PSCI_POWER_STATE_TYPE_STANDBY 0
-#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
-
-struct psci_power_state {
- u16 id;
- u8 type;
- u8 affinity_level;
-};
-
-struct psci_operations {
- int (*cpu_suspend)(struct psci_power_state state,
- unsigned long entry_point);
- int (*cpu_off)(struct psci_power_state state);
- int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
- int (*migrate)(unsigned long cpuid);
- int (*affinity_info)(unsigned long target_affinity,
- unsigned long lowest_affinity_level);
- int (*migrate_info_type)(void);
-};
-
-extern struct psci_operations psci_ops;
extern struct smp_operations psci_smp_ops;
#ifdef CONFIG_ARM_PSCI
-int psci_init(void);
bool psci_smp_available(void);
#else
-static inline int psci_init(void) { return 0; }
static inline bool psci_smp_available(void) { return false; }
#endif
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index 2f3ac1ba6..ef356659b 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -74,7 +74,6 @@ extern void secondary_startup_arm(void);
extern int __cpu_disable(void);
extern void __cpu_die(unsigned int cpu);
-extern void cpu_die(void);
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
@@ -105,6 +104,7 @@ struct smp_operations {
#ifdef CONFIG_HOTPLUG_CPU
int (*cpu_kill)(unsigned int cpu);
void (*cpu_die)(unsigned int cpu);
+ bool (*cpu_can_disable)(unsigned int cpu);
int (*cpu_disable)(unsigned int cpu);
#endif
#endif
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h
index 993e5224d..f9080717f 100644
--- a/arch/arm/include/asm/smp_plat.h
+++ b/arch/arm/include/asm/smp_plat.h
@@ -107,4 +107,13 @@ static inline u32 mpidr_hash_size(void)
extern int platform_can_secondary_boot(void);
extern int platform_can_cpu_hotplug(void);
+#ifdef CONFIG_HOTPLUG_CPU
+extern int platform_can_hotplug_cpu(unsigned int cpu);
+#else
+static inline int platform_can_hotplug_cpu(unsigned int cpu)
+{
+ return 0;
+}
+#endif
+
#endif
diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h
index c99e25946..12ebfcc1d 100644
--- a/arch/arm/include/asm/switch_to.h
+++ b/arch/arm/include/asm/switch_to.h
@@ -10,7 +10,9 @@
* CPU.
*/
#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7)
-#define finish_arch_switch(prev) dsb(ish)
+#define __complete_pending_tlbi() dsb(ish)
+#else
+#define __complete_pending_tlbi()
#endif
/*
@@ -22,6 +24,7 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info
#define switch_to(prev,next,last) \
do { \
+ __complete_pending_tlbi(); \
last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
} while (0)
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index bd32eded3..776757d16 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -25,7 +25,6 @@
struct task_struct;
#include <asm/types.h>
-#include <asm/domain.h>
typedef unsigned long mm_segment_t;
@@ -74,9 +73,6 @@ struct thread_info {
.flags = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
}
#define init_thread_info (init_thread_union.thread_info)
@@ -136,22 +132,18 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
/*
* thread information flags:
- * TIF_SYSCALL_TRACE - syscall trace active
- * TIF_SYSCAL_AUDIT - syscall auditing active
- * TIF_SIGPENDING - signal pending
- * TIF_NEED_RESCHED - rescheduling necessary
- * TIF_NOTIFY_RESUME - callback before returning to user
* TIF_USEDFPU - FPU was used by this task this quantum (SMP)
* TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED
*/
-#define TIF_SIGPENDING 0
-#define TIF_NEED_RESCHED 1
+#define TIF_SIGPENDING 0 /* signal pending */
+#define TIF_NEED_RESCHED 1 /* rescheduling necessary */
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
-#define TIF_UPROBE 7
-#define TIF_SYSCALL_TRACE 8
-#define TIF_SYSCALL_AUDIT 9
-#define TIF_SYSCALL_TRACEPOINT 10
-#define TIF_SECCOMP 11 /* seccomp syscall filtering active */
+#define TIF_UPROBE 3 /* breakpointed or singlestepping */
+#define TIF_SYSCALL_TRACE 4 /* syscall trace active */
+#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
+#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
+#define TIF_SECCOMP 7 /* seccomp syscall filtering active */
+
#define TIF_NOHZ 12 /* in adaptive nohz mode */
#define TIF_USING_IWMMXT 17
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 74b17d09e..8cc85a4eb 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -50,6 +50,35 @@ struct exception_table_entry
extern int fixup_exception(struct pt_regs *regs);
/*
+ * These two functions allow hooking accesses to userspace to increase
+ * system integrity by ensuring that the kernel can not inadvertantly
+ * perform such accesses (eg, via list poison values) which could then
+ * be exploited for priviledge escalation.
+ */
+static inline unsigned int uaccess_save_and_enable(void)
+{
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ unsigned int old_domain = get_domain();
+
+ /* Set the current domain access to permit user accesses */
+ set_domain((old_domain & ~domain_mask(DOMAIN_USER)) |
+ domain_val(DOMAIN_USER, DOMAIN_CLIENT));
+
+ return old_domain;
+#else
+ return 0;
+#endif
+}
+
+static inline void uaccess_restore(unsigned int flags)
+{
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ /* Restore the user access mask */
+ set_domain(flags);
+#endif
+}
+
+/*
* These two are intentionally not defined anywhere - if the kernel
* code generates any references to them, that's a bug.
*/
@@ -165,6 +194,7 @@ extern int __get_user_64t_4(void *);
register typeof(x) __r2 asm("r2"); \
register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \
+ unsigned int __ua_flags = uaccess_save_and_enable(); \
switch (sizeof(*(__p))) { \
case 1: \
if (sizeof((x)) >= 8) \
@@ -192,6 +222,7 @@ extern int __get_user_64t_4(void *);
break; \
default: __e = __get_user_bad(); break; \
} \
+ uaccess_restore(__ua_flags); \
x = (typeof(*(p))) __r2; \
__e; \
})
@@ -224,6 +255,7 @@ extern int __put_user_8(void *, unsigned long long);
register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \
+ unsigned int __ua_flags = uaccess_save_and_enable(); \
switch (sizeof(*(__p))) { \
case 1: \
__put_user_x(__r2, __p, __e, __l, 1); \
@@ -239,6 +271,7 @@ extern int __put_user_8(void *, unsigned long long);
break; \
default: __e = __put_user_bad(); break; \
} \
+ uaccess_restore(__ua_flags); \
__e; \
})
@@ -300,20 +333,23 @@ static inline void set_fs(mm_segment_t fs)
do { \
unsigned long __gu_addr = (unsigned long)(ptr); \
unsigned long __gu_val; \
+ unsigned int __ua_flags; \
__chk_user_ptr(ptr); \
might_fault(); \
+ __ua_flags = uaccess_save_and_enable(); \
switch (sizeof(*(ptr))) { \
case 1: __get_user_asm_byte(__gu_val, __gu_addr, err); break; \
case 2: __get_user_asm_half(__gu_val, __gu_addr, err); break; \
case 4: __get_user_asm_word(__gu_val, __gu_addr, err); break; \
default: (__gu_val) = __get_user_bad(); \
} \
+ uaccess_restore(__ua_flags); \
(x) = (__typeof__(*(ptr)))__gu_val; \
} while (0)
-#define __get_user_asm_byte(x, addr, err) \
+#define __get_user_asm(x, addr, err, instr) \
__asm__ __volatile__( \
- "1: " TUSER(ldrb) " %1,[%2],#0\n" \
+ "1: " TUSER(instr) " %1, [%2], #0\n" \
"2:\n" \
" .pushsection .text.fixup,\"ax\"\n" \
" .align 2\n" \
@@ -329,6 +365,9 @@ do { \
: "r" (addr), "i" (-EFAULT) \
: "cc")
+#define __get_user_asm_byte(x, addr, err) \
+ __get_user_asm(x, addr, err, ldrb)
+
#ifndef __ARMEB__
#define __get_user_asm_half(x, __gu_addr, err) \
({ \
@@ -348,22 +387,7 @@ do { \
#endif
#define __get_user_asm_word(x, addr, err) \
- __asm__ __volatile__( \
- "1: " TUSER(ldr) " %1,[%2],#0\n" \
- "2:\n" \
- " .pushsection .text.fixup,\"ax\"\n" \
- " .align 2\n" \
- "3: mov %0, %3\n" \
- " mov %1, #0\n" \
- " b 2b\n" \
- " .popsection\n" \
- " .pushsection __ex_table,\"a\"\n" \
- " .align 3\n" \
- " .long 1b, 3b\n" \
- " .popsection" \
- : "+r" (err), "=&r" (x) \
- : "r" (addr), "i" (-EFAULT) \
- : "cc")
+ __get_user_asm(x, addr, err, ldr)
#define __put_user(x, ptr) \
({ \
@@ -381,9 +405,11 @@ do { \
#define __put_user_err(x, ptr, err) \
do { \
unsigned long __pu_addr = (unsigned long)(ptr); \
+ unsigned int __ua_flags; \
__typeof__(*(ptr)) __pu_val = (x); \
__chk_user_ptr(ptr); \
might_fault(); \
+ __ua_flags = uaccess_save_and_enable(); \
switch (sizeof(*(ptr))) { \
case 1: __put_user_asm_byte(__pu_val, __pu_addr, err); break; \
case 2: __put_user_asm_half(__pu_val, __pu_addr, err); break; \
@@ -391,11 +417,12 @@ do { \
case 8: __put_user_asm_dword(__pu_val, __pu_addr, err); break; \
default: __put_user_bad(); \
} \
+ uaccess_restore(__ua_flags); \
} while (0)
-#define __put_user_asm_byte(x, __pu_addr, err) \
+#define __put_user_asm(x, __pu_addr, err, instr) \
__asm__ __volatile__( \
- "1: " TUSER(strb) " %1,[%2],#0\n" \
+ "1: " TUSER(instr) " %1, [%2], #0\n" \
"2:\n" \
" .pushsection .text.fixup,\"ax\"\n" \
" .align 2\n" \
@@ -410,6 +437,9 @@ do { \
: "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
: "cc")
+#define __put_user_asm_byte(x, __pu_addr, err) \
+ __put_user_asm(x, __pu_addr, err, strb)
+
#ifndef __ARMEB__
#define __put_user_asm_half(x, __pu_addr, err) \
({ \
@@ -427,21 +457,7 @@ do { \
#endif
#define __put_user_asm_word(x, __pu_addr, err) \
- __asm__ __volatile__( \
- "1: " TUSER(str) " %1,[%2],#0\n" \
- "2:\n" \
- " .pushsection .text.fixup,\"ax\"\n" \
- " .align 2\n" \
- "3: mov %0, %3\n" \
- " b 2b\n" \
- " .popsection\n" \
- " .pushsection __ex_table,\"a\"\n" \
- " .align 3\n" \
- " .long 1b, 3b\n" \
- " .popsection" \
- : "+r" (err) \
- : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
- : "cc")
+ __put_user_asm(x, __pu_addr, err, str)
#ifndef __ARMEB__
#define __reg_oper0 "%R2"
@@ -474,11 +490,46 @@ do { \
#ifdef CONFIG_MMU
-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
-extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
-extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
+extern unsigned long __must_check
+arm_copy_from_user(void *to, const void __user *from, unsigned long n);
+
+static inline unsigned long __must_check
+__copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ unsigned int __ua_flags = uaccess_save_and_enable();
+ n = arm_copy_from_user(to, from, n);
+ uaccess_restore(__ua_flags);
+ return n;
+}
+
+extern unsigned long __must_check
+arm_copy_to_user(void __user *to, const void *from, unsigned long n);
+extern unsigned long __must_check
+__copy_to_user_std(void __user *to, const void *from, unsigned long n);
+
+static inline unsigned long __must_check
+__copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ unsigned int __ua_flags = uaccess_save_and_enable();
+ n = arm_copy_to_user(to, from, n);
+ uaccess_restore(__ua_flags);
+ return n;
+}
+
+extern unsigned long __must_check
+arm_clear_user(void __user *addr, unsigned long n);
+extern unsigned long __must_check
+__clear_user_std(void __user *addr, unsigned long n);
+
+static inline unsigned long __must_check
+__clear_user(void __user *addr, unsigned long n)
+{
+ unsigned int __ua_flags = uaccess_save_and_enable();
+ n = arm_clear_user(addr, n);
+ uaccess_restore(__ua_flags);
+ return n;
+}
+
#else
#define __copy_from_user(to, from, n) (memcpy(to, (void __force *)from, n), 0)
#define __copy_to_user(to, from, n) (memcpy((void __force *)to, from, n), 0)
@@ -511,6 +562,7 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo
return n;
}
+/* These are from lib/ code, and use __get_user() and friends */
extern long strncpy_from_user(char *dest, const char __user *src, long count);
extern __must_check long strlen_user(const char __user *str);
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 32640c431..7cba573c2 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -19,7 +19,7 @@
* This may need to be greater than __NR_last_syscall+1 in order to
* account for the padding in the syscall table
*/
-#define __NR_syscalls (388)
+#define __NR_syscalls (392)
/*
* *NOTE*: This is a ghost syscall private to the kernel. Only the
diff --git a/arch/arm/include/asm/xen/events.h b/arch/arm/include/asm/xen/events.h
index 8b1f37bfe..71e473d05 100644
--- a/arch/arm/include/asm/xen/events.h
+++ b/arch/arm/include/asm/xen/events.h
@@ -20,4 +20,10 @@ static inline int xen_irqs_disabled(struct pt_regs *regs)
atomic64_t, \
counter), (val))
+/* Rebind event channel is supported by default */
+static inline bool xen_support_evtchn_rebind(void)
+{
+ return true;
+}
+
#endif /* _ASM_ARM_XEN_EVENTS_H */
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h
index 1bee8ca12..127956353 100644
--- a/arch/arm/include/asm/xen/page.h
+++ b/arch/arm/include/asm/xen/page.h
@@ -34,7 +34,19 @@ typedef struct xpaddr {
unsigned long __pfn_to_mfn(unsigned long pfn);
extern struct rb_root phys_to_mach;
-static inline unsigned long pfn_to_mfn(unsigned long pfn)
+/* Pseudo-physical <-> Guest conversion */
+static inline unsigned long pfn_to_gfn(unsigned long pfn)
+{
+ return pfn;
+}
+
+static inline unsigned long gfn_to_pfn(unsigned long gfn)
+{
+ return gfn;
+}
+
+/* Pseudo-physical <-> BUS conversion */
+static inline unsigned long pfn_to_bfn(unsigned long pfn)
{
unsigned long mfn;
@@ -47,33 +59,21 @@ static inline unsigned long pfn_to_mfn(unsigned long pfn)
return pfn;
}
-static inline unsigned long mfn_to_pfn(unsigned long mfn)
+static inline unsigned long bfn_to_pfn(unsigned long bfn)
{
- return mfn;
+ return bfn;
}
-#define mfn_to_local_pfn(mfn) mfn_to_pfn(mfn)
+#define bfn_to_local_pfn(bfn) bfn_to_pfn(bfn)
-static inline xmaddr_t phys_to_machine(xpaddr_t phys)
-{
- unsigned offset = phys.paddr & ~PAGE_MASK;
- return XMADDR(PFN_PHYS(pfn_to_mfn(PFN_DOWN(phys.paddr))) | offset);
-}
-
-static inline xpaddr_t machine_to_phys(xmaddr_t machine)
-{
- unsigned offset = machine.maddr & ~PAGE_MASK;
- return XPADDR(PFN_PHYS(mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset);
-}
-/* VIRT <-> MACHINE conversion */
-#define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v))))
-#define virt_to_mfn(v) (pfn_to_mfn(virt_to_pfn(v)))
-#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
+/* VIRT <-> GUEST conversion */
+#define virt_to_gfn(v) (pfn_to_gfn(virt_to_pfn(v)))
+#define gfn_to_virt(m) (__va(gfn_to_pfn(m) << PAGE_SHIFT))
+/* Only used in PV code. But ARM guests are always HVM. */
static inline xmaddr_t arbitrary_virt_to_machine(void *vaddr)
{
- /* TODO: assuming it is mapped in the kernel 1:1 */
- return virt_to_machine(vaddr);
+ BUG();
}
/* TODO: this shouldn't be here but it is because the frontend drivers
@@ -108,7 +108,7 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
bool xen_arch_need_swiotlb(struct device *dev,
unsigned long pfn,
- unsigned long mfn);
+ unsigned long bfn);
unsigned long xen_get_swiotlb_free_pages(unsigned int order);
#endif /* _ASM_ARM_XEN_PAGE_H */