diff options
author | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2016-03-25 03:53:42 -0300 |
---|---|---|
committer | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2016-03-25 03:53:42 -0300 |
commit | 03dd4cb26d967f9588437b0fc9cc0e8353322bb7 (patch) | |
tree | fa581f6dc1c0596391690d1f67eceef3af8246dc /arch/arm/include | |
parent | d4e493caf788ef44982e131ff9c786546904d934 (diff) |
Linux-libre 4.5-gnu
Diffstat (limited to 'arch/arm/include')
24 files changed, 347 insertions, 268 deletions
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild index bd425302c..16da6380e 100644 --- a/arch/arm/include/asm/Kbuild +++ b/arch/arm/include/asm/Kbuild @@ -3,6 +3,7 @@ generic-y += bitsperlong.h generic-y += cputime.h generic-y += current.h +generic-y += early_ioremap.h generic-y += emergency-restart.h generic-y += errno.h generic-y += exec.h diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h index 7da5503c0..e08d15184 100644 --- a/arch/arm/include/asm/arch_gicv3.h +++ b/arch/arm/include/asm/arch_gicv3.h @@ -117,6 +117,7 @@ static inline u32 gic_read_iar(void) u32 irqstat; asm volatile("mrc " __stringify(ICC_IAR1) : "=r" (irqstat)); + dsb(sy); return irqstat; } diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h index 3ff5642d9..112cc1a5d 100644 --- a/arch/arm/include/asm/barrier.h +++ b/arch/arm/include/asm/barrier.h @@ -60,38 +60,11 @@ extern void arm_heavy_mb(void); #define dma_wmb() barrier() #endif -#ifndef CONFIG_SMP -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#else -#define smp_mb() dmb(ish) -#define smp_rmb() smp_mb() -#define smp_wmb() dmb(ishst) -#endif - -#define smp_store_release(p, v) \ -do { \ - compiletime_assert_atomic_type(*p); \ - smp_mb(); \ - WRITE_ONCE(*p, v); \ -} while (0) - -#define smp_load_acquire(p) \ -({ \ - typeof(*p) ___p1 = READ_ONCE(*p); \ - compiletime_assert_atomic_type(*p); \ - smp_mb(); \ - ___p1; \ -}) - -#define read_barrier_depends() do { } while(0) -#define smp_read_barrier_depends() do { } while(0) - -#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) +#define __smp_mb() dmb(ish) +#define __smp_rmb() __smp_mb() +#define __smp_wmb() dmb(ishst) -#define smp_mb__before_atomic() smp_mb() -#define smp_mb__after_atomic() smp_mb() +#include <asm-generic/barrier.h> #endif /* !__ASSEMBLY__ */ #endif /* __ASM_BARRIER_H */ diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h index e7335a921..4e6e88a6b 100644 --- a/arch/arm/include/asm/bug.h +++ b/arch/arm/include/asm/bug.h @@ -5,8 +5,6 @@ #include <linux/types.h> #include <asm/opcodes.h> -#ifdef CONFIG_BUG - /* * Use a suitable undefined instruction to use for ARM/Thumb2 bug handling. * We need to be careful not to conflict with those used by other modules and @@ -47,7 +45,7 @@ do { \ unreachable(); \ } while (0) -#else /* not CONFIG_DEBUG_BUGVERBOSE */ +#else #define __BUG(__file, __line, __value) \ do { \ @@ -57,7 +55,6 @@ do { \ #endif /* CONFIG_DEBUG_BUGVERBOSE */ #define HAVE_ARCH_BUG -#endif /* CONFIG_BUG */ #include <asm-generic/bug.h> diff --git a/arch/arm/include/asm/cpuidle.h b/arch/arm/include/asm/cpuidle.h index 0f8424924..3848259be 100644 --- a/arch/arm/include/asm/cpuidle.h +++ b/arch/arm/include/asm/cpuidle.h @@ -30,7 +30,7 @@ static inline int arm_cpuidle_simple_enter(struct cpuidle_device *dev, struct device_node; struct cpuidle_ops { - int (*suspend)(int cpu, unsigned long arg); + int (*suspend)(unsigned long arg); int (*init)(struct device_node *, int cpu); }; diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index 85e374f87..b23c6c81c 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h @@ -228,10 +228,26 @@ static inline int cpu_is_xsc3(void) } #endif -#if !defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_CPU_XSC3) -#define cpu_is_xscale() 0 +#if !defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_CPU_XSC3) && \ + !defined(CONFIG_CPU_MOHAWK) +#define cpu_is_xscale_family() 0 #else -#define cpu_is_xscale() 1 +static inline int cpu_is_xscale_family(void) +{ + unsigned int id; + id = read_cpuid_id() & 0xffffe000; + + switch (id) { + case 0x69052000: /* Intel XScale 1 */ + case 0x69054000: /* Intel XScale 2 */ + case 0x69056000: /* Intel XScale 3 */ + case 0x56056000: /* Marvell XScale 3 */ + case 0x56158000: /* Marvell Mohawk */ + return 1; + } + + return 0; +} #endif /* diff --git a/arch/arm/include/asm/div64.h b/arch/arm/include/asm/div64.h index 662c7bd06..e1f07764b 100644 --- a/arch/arm/include/asm/div64.h +++ b/arch/arm/include/asm/div64.h @@ -5,9 +5,9 @@ #include <asm/compiler.h> /* - * The semantics of do_div() are: + * The semantics of __div64_32() are: * - * uint32_t do_div(uint64_t *n, uint32_t base) + * uint32_t __div64_32(uint64_t *n, uint32_t base) * { * uint32_t remainder = *n % base; * *n = *n / base; @@ -16,8 +16,9 @@ * * In other words, a 64-bit dividend with a 32-bit divisor producing * a 64-bit result and a 32-bit remainder. To accomplish this optimally - * we call a special __do_div64 helper with completely non standard - * calling convention for arguments and results (beware). + * we override the generic version in lib/div64.c to call our __do_div64 + * assembly implementation with completely non standard calling convention + * for arguments and results (beware). */ #ifdef __ARMEB__ @@ -28,199 +29,101 @@ #define __xh "r1" #endif -#define __do_div_asm(n, base) \ -({ \ - register unsigned int __base asm("r4") = base; \ - register unsigned long long __n asm("r0") = n; \ - register unsigned long long __res asm("r2"); \ - register unsigned int __rem asm(__xh); \ - asm( __asmeq("%0", __xh) \ - __asmeq("%1", "r2") \ - __asmeq("%2", "r0") \ - __asmeq("%3", "r4") \ - "bl __do_div64" \ - : "=r" (__rem), "=r" (__res) \ - : "r" (__n), "r" (__base) \ - : "ip", "lr", "cc"); \ - n = __res; \ - __rem; \ -}) - -#if __GNUC__ < 4 || !defined(CONFIG_AEABI) +static inline uint32_t __div64_32(uint64_t *n, uint32_t base) +{ + register unsigned int __base asm("r4") = base; + register unsigned long long __n asm("r0") = *n; + register unsigned long long __res asm("r2"); + register unsigned int __rem asm(__xh); + asm( __asmeq("%0", __xh) + __asmeq("%1", "r2") + __asmeq("%2", "r0") + __asmeq("%3", "r4") + "bl __do_div64" + : "=r" (__rem), "=r" (__res) + : "r" (__n), "r" (__base) + : "ip", "lr", "cc"); + *n = __res; + return __rem; +} +#define __div64_32 __div64_32 + +#if !defined(CONFIG_AEABI) /* - * gcc versions earlier than 4.0 are simply too problematic for the - * optimized implementation below. First there is gcc PR 15089 that - * tend to trig on more complex constructs, spurious .global __udivsi3 - * are inserted even if none of those symbols are referenced in the - * generated code, and those gcc versions are not able to do constant - * propagation on long long values anyway. + * In OABI configurations, some uses of the do_div function + * cause gcc to run out of registers. To work around that, + * we can force the use of the out-of-line version for + * configurations that build a OABI kernel. */ -#define do_div(n, base) __do_div_asm(n, base) - -#elif __GNUC__ >= 4 +#define do_div(n, base) __div64_32(&(n), base) -#include <asm/bug.h> +#else /* - * If the divisor happens to be constant, we determine the appropriate - * inverse at compile time to turn the division into a few inline - * multiplications instead which is much faster. And yet only if compiling - * for ARMv4 or higher (we need umull/umlal) and if the gcc version is - * sufficiently recent to perform proper long long constant propagation. - * (It is unfortunate that gcc doesn't perform all this internally.) + * gcc versions earlier than 4.0 are simply too problematic for the + * __div64_const32() code in asm-generic/div64.h. First there is + * gcc PR 15089 that tend to trig on more complex constructs, spurious + * .global __udivsi3 are inserted even if none of those symbols are + * referenced in the generated code, and those gcc versions are not able + * to do constant propagation on long long values anyway. */ -#define do_div(n, base) \ -({ \ - unsigned int __r, __b = (base); \ - if (!__builtin_constant_p(__b) || __b == 0 || \ - (__LINUX_ARM_ARCH__ < 4 && (__b & (__b - 1)) != 0)) { \ - /* non-constant divisor (or zero): slow path */ \ - __r = __do_div_asm(n, __b); \ - } else if ((__b & (__b - 1)) == 0) { \ - /* Trivial: __b is constant and a power of 2 */ \ - /* gcc does the right thing with this code. */ \ - __r = n; \ - __r &= (__b - 1); \ - n /= __b; \ - } else { \ - /* Multiply by inverse of __b: n/b = n*(p/b)/p */ \ - /* We rely on the fact that most of this code gets */ \ - /* optimized away at compile time due to constant */ \ - /* propagation and only a couple inline assembly */ \ - /* instructions should remain. Better avoid any */ \ - /* code construct that might prevent that. */ \ - unsigned long long __res, __x, __t, __m, __n = n; \ - unsigned int __c, __p, __z = 0; \ - /* preserve low part of n for reminder computation */ \ - __r = __n; \ - /* determine number of bits to represent __b */ \ - __p = 1 << __div64_fls(__b); \ - /* compute __m = ((__p << 64) + __b - 1) / __b */ \ - __m = (~0ULL / __b) * __p; \ - __m += (((~0ULL % __b + 1) * __p) + __b - 1) / __b; \ - /* compute __res = __m*(~0ULL/__b*__b-1)/(__p << 64) */ \ - __x = ~0ULL / __b * __b - 1; \ - __res = (__m & 0xffffffff) * (__x & 0xffffffff); \ - __res >>= 32; \ - __res += (__m & 0xffffffff) * (__x >> 32); \ - __t = __res; \ - __res += (__x & 0xffffffff) * (__m >> 32); \ - __t = (__res < __t) ? (1ULL << 32) : 0; \ - __res = (__res >> 32) + __t; \ - __res += (__m >> 32) * (__x >> 32); \ - __res /= __p; \ - /* Now sanitize and optimize what we've got. */ \ - if (~0ULL % (__b / (__b & -__b)) == 0) { \ - /* those cases can be simplified with: */ \ - __n /= (__b & -__b); \ - __m = ~0ULL / (__b / (__b & -__b)); \ - __p = 1; \ - __c = 1; \ - } else if (__res != __x / __b) { \ - /* We can't get away without a correction */ \ - /* to compensate for bit truncation errors. */ \ - /* To avoid it we'd need an additional bit */ \ - /* to represent __m which would overflow it. */ \ - /* Instead we do m=p/b and n/b=(n*m+m)/p. */ \ - __c = 1; \ - /* Compute __m = (__p << 64) / __b */ \ - __m = (~0ULL / __b) * __p; \ - __m += ((~0ULL % __b + 1) * __p) / __b; \ - } else { \ - /* Reduce __m/__p, and try to clear bit 31 */ \ - /* of __m when possible otherwise that'll */ \ - /* need extra overflow handling later. */ \ - unsigned int __bits = -(__m & -__m); \ - __bits |= __m >> 32; \ - __bits = (~__bits) << 1; \ - /* If __bits == 0 then setting bit 31 is */ \ - /* unavoidable. Simply apply the maximum */ \ - /* possible reduction in that case. */ \ - /* Otherwise the MSB of __bits indicates the */ \ - /* best reduction we should apply. */ \ - if (!__bits) { \ - __p /= (__m & -__m); \ - __m /= (__m & -__m); \ - } else { \ - __p >>= __div64_fls(__bits); \ - __m >>= __div64_fls(__bits); \ - } \ - /* No correction needed. */ \ - __c = 0; \ - } \ - /* Now we have a combination of 2 conditions: */ \ - /* 1) whether or not we need a correction (__c), and */ \ - /* 2) whether or not there might be an overflow in */ \ - /* the cross product (__m & ((1<<63) | (1<<31))) */ \ - /* Select the best insn combination to perform the */ \ - /* actual __m * __n / (__p << 64) operation. */ \ - if (!__c) { \ - asm ( "umull %Q0, %R0, %Q1, %Q2\n\t" \ - "mov %Q0, #0" \ - : "=&r" (__res) \ - : "r" (__m), "r" (__n) \ - : "cc" ); \ - } else if (!(__m & ((1ULL << 63) | (1ULL << 31)))) { \ - __res = __m; \ - asm ( "umlal %Q0, %R0, %Q1, %Q2\n\t" \ - "mov %Q0, #0" \ - : "+&r" (__res) \ - : "r" (__m), "r" (__n) \ - : "cc" ); \ - } else { \ - asm ( "umull %Q0, %R0, %Q1, %Q2\n\t" \ - "cmn %Q0, %Q1\n\t" \ - "adcs %R0, %R0, %R1\n\t" \ - "adc %Q0, %3, #0" \ - : "=&r" (__res) \ - : "r" (__m), "r" (__n), "r" (__z) \ - : "cc" ); \ - } \ - if (!(__m & ((1ULL << 63) | (1ULL << 31)))) { \ - asm ( "umlal %R0, %Q0, %R1, %Q2\n\t" \ - "umlal %R0, %Q0, %Q1, %R2\n\t" \ - "mov %R0, #0\n\t" \ - "umlal %Q0, %R0, %R1, %R2" \ - : "+&r" (__res) \ - : "r" (__m), "r" (__n) \ - : "cc" ); \ - } else { \ - asm ( "umlal %R0, %Q0, %R2, %Q3\n\t" \ - "umlal %R0, %1, %Q2, %R3\n\t" \ - "mov %R0, #0\n\t" \ - "adds %Q0, %1, %Q0\n\t" \ - "adc %R0, %R0, #0\n\t" \ - "umlal %Q0, %R0, %R2, %R3" \ - : "+&r" (__res), "+&r" (__z) \ - : "r" (__m), "r" (__n) \ - : "cc" ); \ - } \ - __res /= __p; \ - /* The reminder can be computed with 32-bit regs */ \ - /* only, and gcc is good at that. */ \ - { \ - unsigned int __res0 = __res; \ - unsigned int __b0 = __b; \ - __r -= __res0 * __b0; \ - } \ - /* BUG_ON(__r >= __b || __res * __b + __r != n); */ \ - n = __res; \ - } \ - __r; \ -}) - -/* our own fls implementation to make sure constant propagation is fine */ -#define __div64_fls(bits) \ -({ \ - unsigned int __left = (bits), __nr = 0; \ - if (__left & 0xffff0000) __nr += 16, __left >>= 16; \ - if (__left & 0x0000ff00) __nr += 8, __left >>= 8; \ - if (__left & 0x000000f0) __nr += 4, __left >>= 4; \ - if (__left & 0x0000000c) __nr += 2, __left >>= 2; \ - if (__left & 0x00000002) __nr += 1; \ - __nr; \ -}) + +#define __div64_const32_is_OK (__GNUC__ >= 4) + +static inline uint64_t __arch_xprod_64(uint64_t m, uint64_t n, bool bias) +{ + unsigned long long res; + unsigned int tmp = 0; + + if (!bias) { + asm ( "umull %Q0, %R0, %Q1, %Q2\n\t" + "mov %Q0, #0" + : "=&r" (res) + : "r" (m), "r" (n) + : "cc"); + } else if (!(m & ((1ULL << 63) | (1ULL << 31)))) { + res = m; + asm ( "umlal %Q0, %R0, %Q1, %Q2\n\t" + "mov %Q0, #0" + : "+&r" (res) + : "r" (m), "r" (n) + : "cc"); + } else { + asm ( "umull %Q0, %R0, %Q1, %Q2\n\t" + "cmn %Q0, %Q1\n\t" + "adcs %R0, %R0, %R1\n\t" + "adc %Q0, %3, #0" + : "=&r" (res) + : "r" (m), "r" (n), "r" (tmp) + : "cc"); + } + + if (!(m & ((1ULL << 63) | (1ULL << 31)))) { + asm ( "umlal %R0, %Q0, %R1, %Q2\n\t" + "umlal %R0, %Q0, %Q1, %R2\n\t" + "mov %R0, #0\n\t" + "umlal %Q0, %R0, %R1, %R2" + : "+&r" (res) + : "r" (m), "r" (n) + : "cc"); + } else { + asm ( "umlal %R0, %Q0, %R2, %Q3\n\t" + "umlal %R0, %1, %Q2, %R3\n\t" + "mov %R0, #0\n\t" + "adds %Q0, %1, %Q0\n\t" + "adc %R0, %R0, #0\n\t" + "umlal %Q0, %R0, %R2, %R3" + : "+&r" (res), "+&r" (tmp) + : "r" (m), "r" (n) + : "cc"); + } + + return res; +} +#define __arch_xprod_64 __arch_xprod_64 + +#include <asm-generic/div64.h> #endif diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index ccb3aa646..6ad1ceda6 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -41,13 +41,6 @@ static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) #define HAVE_ARCH_DMA_SUPPORTED 1 extern int dma_supported(struct device *dev, u64 mask); -/* - * Note that while the generic code provides dummy dma_{alloc,free}_noncoherent - * implementations, we don't provide a dma_cache_sync function so drivers using - * this API are highlighted with build warnings. - */ -#include <asm-generic/dma-mapping-common.h> - #ifdef __arch_page_to_dma #error Please update to __arch_pfn_to_dma #endif diff --git a/arch/arm/include/asm/efi.h b/arch/arm/include/asm/efi.h new file mode 100644 index 000000000..e0eea72de --- /dev/null +++ b/arch/arm/include/asm/efi.h @@ -0,0 +1,83 @@ +/* + * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __ASM_ARM_EFI_H +#define __ASM_ARM_EFI_H + +#include <asm/cacheflush.h> +#include <asm/cachetype.h> +#include <asm/early_ioremap.h> +#include <asm/fixmap.h> +#include <asm/highmem.h> +#include <asm/mach/map.h> +#include <asm/mmu_context.h> +#include <asm/pgtable.h> + +#ifdef CONFIG_EFI +void efi_init(void); + +int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md); + +#define efi_call_virt(f, ...) \ +({ \ + efi_##f##_t *__f; \ + efi_status_t __s; \ + \ + efi_virtmap_load(); \ + __f = efi.systab->runtime->f; \ + __s = __f(__VA_ARGS__); \ + efi_virtmap_unload(); \ + __s; \ +}) + +#define __efi_call_virt(f, ...) \ +({ \ + efi_##f##_t *__f; \ + \ + efi_virtmap_load(); \ + __f = efi.systab->runtime->f; \ + __f(__VA_ARGS__); \ + efi_virtmap_unload(); \ +}) + +static inline void efi_set_pgd(struct mm_struct *mm) +{ + check_and_switch_context(mm, NULL); +} + +void efi_virtmap_load(void); +void efi_virtmap_unload(void); + +#else +#define efi_init() +#endif /* CONFIG_EFI */ + +/* arch specific definitions used by the stub code */ + +#define efi_call_early(f, ...) sys_table_arg->boottime->f(__VA_ARGS__) + +/* + * A reasonable upper bound for the uncompressed kernel size is 32 MBytes, + * so we will reserve that amount of memory. We have no easy way to tell what + * the actuall size of code + data the uncompressed kernel will use. + * If this is insufficient, the decompressor will relocate itself out of the + * way before performing the decompression. + */ +#define MAX_UNCOMP_KERNEL_SIZE SZ_32M + +/* + * The kernel zImage should preferably be located between 32 MB and 128 MB + * from the base of DRAM. The min address leaves space for a maximal size + * uncompressed image, and the max address is due to how the zImage decompressor + * picks a destination address. + */ +#define ZIMAGE_OFFSET_LIMIT SZ_128M +#define MIN_ZIMAGE_OFFSET MAX_UNCOMP_KERNEL_SIZE +#define MAX_FDT_OFFSET ZIMAGE_OFFSET_LIMIT + +#endif /* _ASM_ARM_EFI_H */ diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h index 58cfe9f1a..5c17d2dec 100644 --- a/arch/arm/include/asm/fixmap.h +++ b/arch/arm/include/asm/fixmap.h @@ -19,20 +19,47 @@ enum fixed_addresses { FIX_TEXT_POKE0, FIX_TEXT_POKE1, - __end_of_fixed_addresses + __end_of_fixmap_region, + + /* + * Share the kmap() region with early_ioremap(): this is guaranteed + * not to clash since early_ioremap() is only available before + * paging_init(), and kmap() only after. + */ +#define NR_FIX_BTMAPS 32 +#define FIX_BTMAPS_SLOTS 7 +#define TOTAL_FIX_BTMAPS (NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS) + + FIX_BTMAP_END = __end_of_permanent_fixed_addresses, + FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1, + __end_of_early_ioremap_region }; +static const enum fixed_addresses __end_of_fixed_addresses = + __end_of_fixmap_region > __end_of_early_ioremap_region ? + __end_of_fixmap_region : __end_of_early_ioremap_region; + #define FIXMAP_PAGE_COMMON (L_PTE_YOUNG | L_PTE_PRESENT | L_PTE_XN | L_PTE_DIRTY) #define FIXMAP_PAGE_NORMAL (FIXMAP_PAGE_COMMON | L_PTE_MT_WRITEBACK) +#define FIXMAP_PAGE_RO (FIXMAP_PAGE_NORMAL | L_PTE_RDONLY) /* Used by set_fixmap_(io|nocache), both meant for mapping a device */ #define FIXMAP_PAGE_IO (FIXMAP_PAGE_COMMON | L_PTE_MT_DEV_SHARED | L_PTE_SHARED) #define FIXMAP_PAGE_NOCACHE FIXMAP_PAGE_IO +#define __early_set_fixmap __set_fixmap + +#ifdef CONFIG_MMU + void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot); void __init early_fixmap_init(void); #include <asm-generic/fixmap.h> +#else + +static inline void early_fixmap_init(void) { } + +#endif #endif diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h index fe3ea776d..3d7351c84 100644 --- a/arch/arm/include/asm/hardirq.h +++ b/arch/arm/include/asm/hardirq.h @@ -5,7 +5,7 @@ #include <linux/threads.h> #include <asm/irq.h> -#define NR_IPI 8 +#define NR_IPI 7 typedef struct { unsigned int __softirq_pending; diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h index dc641ddf0..e22089fb4 100644 --- a/arch/arm/include/asm/kvm_arm.h +++ b/arch/arm/include/asm/kvm_arm.h @@ -19,6 +19,7 @@ #ifndef __ARM_KVM_ARM_H__ #define __ARM_KVM_ARM_H__ +#include <linux/const.h> #include <linux/types.h> /* Hyp Configuration Register (HCR) bits */ @@ -132,10 +133,9 @@ * space. */ #define KVM_PHYS_SHIFT (40) -#define KVM_PHYS_SIZE (1ULL << KVM_PHYS_SHIFT) -#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1ULL) -#define PTRS_PER_S2_PGD (1ULL << (KVM_PHYS_SHIFT - 30)) -#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t)) +#define KVM_PHYS_SIZE (_AC(1, ULL) << KVM_PHYS_SHIFT) +#define KVM_PHYS_MASK (KVM_PHYS_SIZE - _AC(1, ULL)) +#define PTRS_PER_S2_PGD (_AC(1, ULL) << (KVM_PHYS_SHIFT - 30)) /* Virtualization Translation Control Register (VTCR) bits */ #define VTCR_SH0 (3 << 12) @@ -162,17 +162,17 @@ #define VTTBR_X (5 - KVM_T0SZ) #endif #define VTTBR_BADDR_SHIFT (VTTBR_X - 1) -#define VTTBR_BADDR_MASK (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT) -#define VTTBR_VMID_SHIFT (48LLU) -#define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT) +#define VTTBR_BADDR_MASK (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT) +#define VTTBR_VMID_SHIFT _AC(48, ULL) +#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT) /* Hyp Syndrome Register (HSR) bits */ #define HSR_EC_SHIFT (26) -#define HSR_EC (0x3fU << HSR_EC_SHIFT) -#define HSR_IL (1U << 25) +#define HSR_EC (_AC(0x3f, UL) << HSR_EC_SHIFT) +#define HSR_IL (_AC(1, UL) << 25) #define HSR_ISS (HSR_IL - 1) #define HSR_ISV_SHIFT (24) -#define HSR_ISV (1U << HSR_ISV_SHIFT) +#define HSR_ISV (_AC(1, UL) << HSR_ISV_SHIFT) #define HSR_SRT_SHIFT (16) #define HSR_SRT_MASK (0xf << HSR_SRT_SHIFT) #define HSR_FSC (0x3f) @@ -180,9 +180,9 @@ #define HSR_SSE (1 << 21) #define HSR_WNR (1 << 6) #define HSR_CV_SHIFT (24) -#define HSR_CV (1U << HSR_CV_SHIFT) +#define HSR_CV (_AC(1, UL) << HSR_CV_SHIFT) #define HSR_COND_SHIFT (20) -#define HSR_COND (0xfU << HSR_COND_SHIFT) +#define HSR_COND (_AC(0xf, UL) << HSR_COND_SHIFT) #define FSC_FAULT (0x04) #define FSC_ACCESS (0x08) @@ -210,13 +210,13 @@ #define HSR_EC_DABT (0x24) #define HSR_EC_DABT_HYP (0x25) -#define HSR_WFI_IS_WFE (1U << 0) +#define HSR_WFI_IS_WFE (_AC(1, UL) << 0) -#define HSR_HVC_IMM_MASK ((1UL << 16) - 1) +#define HSR_HVC_IMM_MASK ((_AC(1, UL) << 16) - 1) -#define HSR_DABT_S1PTW (1U << 7) -#define HSR_DABT_CM (1U << 8) -#define HSR_DABT_EA (1U << 9) +#define HSR_DABT_S1PTW (_AC(1, UL) << 7) +#define HSR_DABT_CM (_AC(1, UL) << 8) +#define HSR_DABT_EA (_AC(1, UL) << 9) #define kvm_arm_exception_type \ {0, "RESET" }, \ diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 6692982c9..f9f27792d 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -150,6 +150,12 @@ struct kvm_vcpu_stat { u32 halt_successful_poll; u32 halt_attempted_poll; u32 halt_wakeup; + u32 hvc_exit_stat; + u64 wfe_exit_stat; + u64 wfi_exit_stat; + u64 mmio_exit_user; + u64 mmio_exit_kernel; + u64 exits; }; int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init); diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index 405aa1883..a520b7987 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -182,7 +182,8 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101; } -static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn, +static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, + kvm_pfn_t pfn, unsigned long size, bool ipa_uncached) { @@ -246,7 +247,7 @@ static inline void __kvm_flush_dcache_pte(pte_t pte) static inline void __kvm_flush_dcache_pmd(pmd_t pmd) { unsigned long size = PMD_SIZE; - pfn_t pfn = pmd_pfn(pmd); + kvm_pfn_t pfn = pmd_pfn(pmd); while (size) { void *va = kmap_atomic_pfn(pfn); @@ -279,6 +280,11 @@ static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd, pgd_t *merged_hyp_pgd, unsigned long hyp_idmap_start) { } +static inline unsigned int kvm_get_vmid_bits(void) +{ + return 8; +} + #endif /* !__ASSEMBLY__ */ #endif /* __ARM_KVM_MMU_H__ */ diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h index f98c7f32c..9b7c328fb 100644 --- a/arch/arm/include/asm/mach/map.h +++ b/arch/arm/include/asm/mach/map.h @@ -42,6 +42,8 @@ enum { extern void iotable_init(struct map_desc *, int); extern void vm_reserve_area_early(unsigned long addr, unsigned long size, void *caller); +extern void create_mapping_late(struct mm_struct *mm, struct map_desc *md, + bool ng); #ifdef CONFIG_DEBUG_LL extern void debug_ll_addr(unsigned long *paddr, unsigned long *vaddr); diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index 9b32f76bb..432ce8176 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h @@ -26,7 +26,7 @@ void __check_vmalloc_seq(struct mm_struct *mm); #ifdef CONFIG_CPU_HAS_ASID void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); -#define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; }) +#define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.id, 0); 0; }) #ifdef CONFIG_ARM_ERRATA_798181 void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, diff --git a/arch/arm/include/asm/paravirt.h b/arch/arm/include/asm/paravirt.h new file mode 100644 index 000000000..8435ff591 --- /dev/null +++ b/arch/arm/include/asm/paravirt.h @@ -0,0 +1,20 @@ +#ifndef _ASM_ARM_PARAVIRT_H +#define _ASM_ARM_PARAVIRT_H + +#ifdef CONFIG_PARAVIRT +struct static_key; +extern struct static_key paravirt_steal_enabled; +extern struct static_key paravirt_steal_rq_enabled; + +struct pv_time_ops { + unsigned long long (*steal_clock)(int cpu); +}; +extern struct pv_time_ops pv_time_ops; + +static inline u64 paravirt_steal_clock(int cpu) +{ + return pv_time_ops.steal_clock(cpu); +} +#endif + +#endif diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index a745a2a53..dc46398bc 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h @@ -88,7 +88,6 @@ #define L_PMD_SECT_VALID (_AT(pmdval_t, 1) << 0) #define L_PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55) -#define L_PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 56) #define L_PMD_SECT_NONE (_AT(pmdval_t, 1) << 57) #define L_PMD_SECT_RDONLY (_AT(pteval_t, 1) << 58) @@ -232,13 +231,6 @@ static inline pte_t pte_mkspecial(pte_t pte) #ifdef CONFIG_TRANSPARENT_HUGEPAGE #define pmd_trans_huge(pmd) (pmd_val(pmd) && !pmd_table(pmd)) -#define pmd_trans_splitting(pmd) (pmd_isset((pmd), L_PMD_SECT_SPLITTING)) - -#ifdef CONFIG_HAVE_RCU_TABLE_FREE -#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH -void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, - pmd_t *pmdp); -#endif #endif #define PMD_BIT_FUNC(fn,op) \ @@ -246,9 +238,9 @@ static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; } PMD_BIT_FUNC(wrprotect, |= L_PMD_SECT_RDONLY); PMD_BIT_FUNC(mkold, &= ~PMD_SECT_AF); -PMD_BIT_FUNC(mksplitting, |= L_PMD_SECT_SPLITTING); PMD_BIT_FUNC(mkwrite, &= ~L_PMD_SECT_RDONLY); PMD_BIT_FUNC(mkdirty, |= L_PMD_SECT_DIRTY); +PMD_BIT_FUNC(mkclean, &= ~L_PMD_SECT_DIRTY); PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF); #define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT)) diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h index b4c6d9936..e1b825dfa 100644 --- a/arch/arm/include/asm/psci.h +++ b/arch/arm/include/asm/psci.h @@ -14,7 +14,7 @@ #ifndef __ASM_ARM_PSCI_H #define __ASM_ARM_PSCI_H -extern struct smp_operations psci_smp_ops; +extern const struct smp_operations psci_smp_ops; #if defined(CONFIG_SMP) && defined(CONFIG_ARM_PSCI) bool psci_smp_available(void); diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h index e0adb9f1b..3613d7e9f 100644 --- a/arch/arm/include/asm/setup.h +++ b/arch/arm/include/asm/setup.h @@ -25,4 +25,10 @@ extern int arm_add_memory(u64 start, u64 size); extern void early_print(const char *str, ...); extern void dump_machine_table(void); +#ifdef CONFIG_ATAGS_PROC +extern void save_atags(const struct tag *tags); +#else +static inline void save_atags(const struct tag *tags) { } +#endif + #endif diff --git a/arch/arm/include/asm/xen/hypercall.h b/arch/arm/include/asm/xen/hypercall.h index 712b50e0a..d769972db 100644 --- a/arch/arm/include/asm/xen/hypercall.h +++ b/arch/arm/include/asm/xen/hypercall.h @@ -35,6 +35,7 @@ #include <xen/interface/xen.h> #include <xen/interface/sched.h> +#include <xen/interface/platform.h> long privcmd_call(unsigned call, unsigned long a1, unsigned long a2, unsigned long a3, @@ -49,6 +50,12 @@ int HYPERVISOR_memory_op(unsigned int cmd, void *arg); int HYPERVISOR_physdev_op(int cmd, void *arg); int HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args); int HYPERVISOR_tmem_op(void *arg); +int HYPERVISOR_platform_op_raw(void *arg); +static inline int HYPERVISOR_platform_op(struct xen_platform_op *op) +{ + op->interface_version = XENPF_INTERFACE_VERSION; + return HYPERVISOR_platform_op_raw(op); +} int HYPERVISOR_multicall(struct multicall_entry *calls, uint32_t nr); static inline int diff --git a/arch/arm/include/asm/xen/interface.h b/arch/arm/include/asm/xen/interface.h index 50066006e..75d596862 100644 --- a/arch/arm/include/asm/xen/interface.h +++ b/arch/arm/include/asm/xen/interface.h @@ -27,6 +27,8 @@ (hnd).p = val; \ } while (0) +#define __HYPERVISOR_platform_op_raw __HYPERVISOR_platform_op + #ifndef __ASSEMBLY__ /* Explicitly size integers that represent pfns in the interface with * Xen so that we can have one ABI that works for 32 and 64 bit guests. @@ -76,6 +78,7 @@ struct pvclock_wall_clock { u32 version; u32 sec; u32 nsec; + u32 sec_hi; } __attribute__((__packed__)); #endif diff --git a/arch/arm/include/debug/dc21285.S b/arch/arm/include/debug/dc21285.S new file mode 100644 index 000000000..02247f313 --- /dev/null +++ b/arch/arm/include/debug/dc21285.S @@ -0,0 +1,42 @@ +/* arch/arm/mach-footbridge/include/mach/debug-macro.S + * + * Debugging macro include header + * + * Copyright (C) 1994-1999 Russell King + * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * +*/ + +#include <asm/hardware/dec21285.h> + +#include <mach/hardware.h> + /* For EBSA285 debugging */ + .equ dc21285_high, ARMCSR_BASE & 0xff000000 + .equ dc21285_low, ARMCSR_BASE & 0x00ffffff + + .macro addruart, rp, rv, tmp + .if dc21285_low + mov \rp, #dc21285_low + .else + mov \rp, #0 + .endif + orr \rv, \rp, #dc21285_high + orr \rp, \rp, #0x42000000 + .endm + + .macro senduart,rd,rx + str \rd, [\rx, #0x160] @ UARTDR + .endm + + .macro busyuart,rd,rx +1001: ldr \rd, [\rx, #0x178] @ UARTFLG + tst \rd, #1 << 3 + bne 1001b + .endm + + .macro waituart,rd,rx + .endm diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h index ede692ffa..5dd2528e9 100644 --- a/arch/arm/include/uapi/asm/unistd.h +++ b/arch/arm/include/uapi/asm/unistd.h @@ -417,6 +417,7 @@ #define __NR_userfaultfd (__NR_SYSCALL_BASE+388) #define __NR_membarrier (__NR_SYSCALL_BASE+389) #define __NR_mlock2 (__NR_SYSCALL_BASE+390) +#define __NR_copy_file_range (__NR_SYSCALL_BASE+391) /* * The following SWIs are ARM private. |