diff options
Diffstat (limited to 'arch/arm/include')
31 files changed, 302 insertions, 163 deletions
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index b2bc8e114..4eaea2173 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -480,13 +480,13 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) .macro uaccess_save, tmp #ifdef CONFIG_CPU_SW_DOMAIN_PAN mrc p15, 0, \tmp, c3, c0, 0 - str \tmp, [sp, #S_FRAME_SIZE] + str \tmp, [sp, #SVC_DACR] #endif .endm .macro uaccess_restore #ifdef CONFIG_CPU_SW_DOMAIN_PAN - ldr r0, [sp, #S_FRAME_SIZE] + ldr r0, [sp, #SVC_DACR] mcr p15, 0, r0, c3, c0, 0 #endif .endm diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index 9e10c4567..66d0e215a 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -77,8 +77,36 @@ static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \ return result; \ } +#define ATOMIC_FETCH_OP(op, c_op, asm_op) \ +static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ +{ \ + unsigned long tmp; \ + int result, val; \ + \ + prefetchw(&v->counter); \ + \ + __asm__ __volatile__("@ atomic_fetch_" #op "\n" \ +"1: ldrex %0, [%4]\n" \ +" " #asm_op " %1, %0, %5\n" \ +" strex %2, %1, [%4]\n" \ +" teq %2, #0\n" \ +" bne 1b" \ + : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \ + : "r" (&v->counter), "Ir" (i) \ + : "cc"); \ + \ + return result; \ +} + #define atomic_add_return_relaxed atomic_add_return_relaxed #define atomic_sub_return_relaxed atomic_sub_return_relaxed +#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed +#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed + +#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed +#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed +#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed +#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new) { @@ -159,6 +187,20 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ return val; \ } +#define ATOMIC_FETCH_OP(op, c_op, asm_op) \ +static inline int atomic_fetch_##op(int i, atomic_t *v) \ +{ \ + unsigned long flags; \ + int val; \ + \ + raw_local_irq_save(flags); \ + val = v->counter; \ + v->counter c_op i; \ + raw_local_irq_restore(flags); \ + \ + return val; \ +} + static inline int atomic_cmpxchg(atomic_t *v, int old, int new) { int ret; @@ -187,19 +229,26 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) #define ATOMIC_OPS(op, c_op, asm_op) \ ATOMIC_OP(op, c_op, asm_op) \ - ATOMIC_OP_RETURN(op, c_op, asm_op) + ATOMIC_OP_RETURN(op, c_op, asm_op) \ + ATOMIC_FETCH_OP(op, c_op, asm_op) ATOMIC_OPS(add, +=, add) ATOMIC_OPS(sub, -=, sub) #define atomic_andnot atomic_andnot -ATOMIC_OP(and, &=, and) -ATOMIC_OP(andnot, &= ~, bic) -ATOMIC_OP(or, |=, orr) -ATOMIC_OP(xor, ^=, eor) +#undef ATOMIC_OPS +#define ATOMIC_OPS(op, c_op, asm_op) \ + ATOMIC_OP(op, c_op, asm_op) \ + ATOMIC_FETCH_OP(op, c_op, asm_op) + +ATOMIC_OPS(and, &=, and) +ATOMIC_OPS(andnot, &= ~, bic) +ATOMIC_OPS(or, |=, orr) +ATOMIC_OPS(xor, ^=, eor) #undef ATOMIC_OPS +#undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN #undef ATOMIC_OP @@ -317,24 +366,61 @@ atomic64_##op##_return_relaxed(long long i, atomic64_t *v) \ return result; \ } +#define ATOMIC64_FETCH_OP(op, op1, op2) \ +static inline long long \ +atomic64_fetch_##op##_relaxed(long long i, atomic64_t *v) \ +{ \ + long long result, val; \ + unsigned long tmp; \ + \ + prefetchw(&v->counter); \ + \ + __asm__ __volatile__("@ atomic64_fetch_" #op "\n" \ +"1: ldrexd %0, %H0, [%4]\n" \ +" " #op1 " %Q1, %Q0, %Q5\n" \ +" " #op2 " %R1, %R0, %R5\n" \ +" strexd %2, %1, %H1, [%4]\n" \ +" teq %2, #0\n" \ +" bne 1b" \ + : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \ + : "r" (&v->counter), "r" (i) \ + : "cc"); \ + \ + return result; \ +} + #define ATOMIC64_OPS(op, op1, op2) \ ATOMIC64_OP(op, op1, op2) \ - ATOMIC64_OP_RETURN(op, op1, op2) + ATOMIC64_OP_RETURN(op, op1, op2) \ + ATOMIC64_FETCH_OP(op, op1, op2) ATOMIC64_OPS(add, adds, adc) ATOMIC64_OPS(sub, subs, sbc) #define atomic64_add_return_relaxed atomic64_add_return_relaxed #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed +#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed +#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed + +#undef ATOMIC64_OPS +#define ATOMIC64_OPS(op, op1, op2) \ + ATOMIC64_OP(op, op1, op2) \ + ATOMIC64_FETCH_OP(op, op1, op2) #define atomic64_andnot atomic64_andnot -ATOMIC64_OP(and, and, and) -ATOMIC64_OP(andnot, bic, bic) -ATOMIC64_OP(or, orr, orr) -ATOMIC64_OP(xor, eor, eor) +ATOMIC64_OPS(and, and, and) +ATOMIC64_OPS(andnot, bic, bic) +ATOMIC64_OPS(or, orr, orr) +ATOMIC64_OPS(xor, eor, eor) + +#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed +#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed +#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed +#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed #undef ATOMIC64_OPS +#undef ATOMIC64_FETCH_OP #undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h index 112cc1a5d..f5d698182 100644 --- a/arch/arm/include/asm/barrier.h +++ b/arch/arm/include/asm/barrier.h @@ -44,9 +44,7 @@ extern void arm_heavy_mb(void); #define __arm_heavy_mb(x...) dsb(x) #endif -#ifdef CONFIG_ARCH_HAS_BARRIERS -#include <mach/barriers.h> -#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) +#if defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) #define mb() __arm_heavy_mb() #define rmb() dsb() #define wmb() __arm_heavy_mb(st) diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h index dff714d88..b1ce037e4 100644 --- a/arch/arm/include/asm/delay.h +++ b/arch/arm/include/asm/delay.h @@ -10,8 +10,8 @@ #include <asm/param.h> /* HZ */ #define MAX_UDELAY_MS 2 -#define UDELAY_MULT ((UL(2199023) * HZ) >> 11) -#define UDELAY_SHIFT 30 +#define UDELAY_MULT UL(2147 * HZ + 483648 * HZ / 1000000) +#define UDELAY_SHIFT 31 #ifndef __ASSEMBLY__ @@ -34,7 +34,7 @@ extern struct arm_delay_ops { * it, it means that you're calling udelay() with an out of range value. * * With currently imposed limits, this means that we support a max delay - * of 2000us. Further limits: HZ<=1000 and bogomips<=3355 + * of 2000us. Further limits: HZ<=1000 */ extern void __bad_udelay(void); diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index a83570f10..bf02dbd9c 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -5,7 +5,6 @@ #include <linux/mm_types.h> #include <linux/scatterlist.h> -#include <linux/dma-attrs.h> #include <linux/dma-debug.h> #include <asm/memory.h> @@ -112,7 +111,7 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) /* The ARM override for dma_max_pfn() */ static inline unsigned long dma_max_pfn(struct device *dev) { - return PHYS_PFN_OFFSET + dma_to_pfn(dev, *dev->dma_mask); + return dma_to_pfn(dev, *dev->dma_mask); } #define dma_max_pfn(dev) dma_max_pfn(dev) @@ -174,7 +173,7 @@ static inline void dma_mark_clean(void *addr, size_t size) { } * to be the device-viewed address. */ extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, - gfp_t gfp, struct dma_attrs *attrs); + gfp_t gfp, unsigned long attrs); /** * arm_dma_free - free memory allocated by arm_dma_alloc @@ -191,7 +190,7 @@ extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, * during and after this call executing are illegal. */ extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t handle, struct dma_attrs *attrs); + dma_addr_t handle, unsigned long attrs); /** * arm_dma_mmap - map a coherent DMA allocation into user space @@ -208,7 +207,7 @@ extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, */ extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, - struct dma_attrs *attrs); + unsigned long attrs); /* * This can be called during early boot to increase the size of the atomic @@ -262,16 +261,16 @@ extern void dmabounce_unregister_dev(struct device *); * The scatter list versions of the above methods. */ extern int arm_dma_map_sg(struct device *, struct scatterlist *, int, - enum dma_data_direction, struct dma_attrs *attrs); + enum dma_data_direction, unsigned long attrs); extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int, - enum dma_data_direction, struct dma_attrs *attrs); + enum dma_data_direction, unsigned long attrs); extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int, enum dma_data_direction); extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int, enum dma_data_direction); extern int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, size_t size, - struct dma_attrs *attrs); + unsigned long attrs); #endif /* __KERNEL__ */ #endif diff --git a/arch/arm/include/asm/efi.h b/arch/arm/include/asm/efi.h index a708fa1f0..766bf9b78 100644 --- a/arch/arm/include/asm/efi.h +++ b/arch/arm/include/asm/efi.h @@ -28,10 +28,10 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md); #define arch_efi_call_virt_setup() efi_virtmap_load() #define arch_efi_call_virt_teardown() efi_virtmap_unload() -#define arch_efi_call_virt(f, args...) \ +#define arch_efi_call_virt(p, f, args...) \ ({ \ efi_##f##_t *__f; \ - __f = efi.systab->runtime->f; \ + __f = p->f; \ __f(args); \ }) diff --git a/arch/arm/include/asm/floppy.h b/arch/arm/include/asm/floppy.h index f4882553f..85a34cc83 100644 --- a/arch/arm/include/asm/floppy.h +++ b/arch/arm/include/asm/floppy.h @@ -17,7 +17,7 @@ #define fd_outb(val,port) \ do { \ - if ((port) == FD_DOR) \ + if ((port) == (u32)FD_DOR) \ fd_setdor((val)); \ else \ outb((val),(port)); \ diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index 781ef5fe2..021692c64 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -282,7 +282,7 @@ extern void _memset_io(volatile void __iomem *, int, size_t); * These perform PCI memory accesses via an ioremap region. They don't * take an address as such, but a cookie. * - * Again, this are defined to perform little endian accesses. See the + * Again, these are defined to perform little endian accesses. See the * IO port primitives for more information. */ #ifndef readl diff --git a/arch/arm/include/asm/kexec.h b/arch/arm/include/asm/kexec.h index c2b9b4bde..1869af6ba 100644 --- a/arch/arm/include/asm/kexec.h +++ b/arch/arm/include/asm/kexec.h @@ -53,6 +53,30 @@ static inline void crash_setup_regs(struct pt_regs *newregs, /* Function pointer to optional machine-specific reinitialization */ extern void (*kexec_reinit)(void); +static inline unsigned long phys_to_boot_phys(phys_addr_t phys) +{ + return phys_to_idmap(phys); +} +#define phys_to_boot_phys phys_to_boot_phys + +static inline phys_addr_t boot_phys_to_phys(unsigned long entry) +{ + return idmap_to_phys(entry); +} +#define boot_phys_to_phys boot_phys_to_phys + +static inline unsigned long page_to_boot_pfn(struct page *page) +{ + return page_to_pfn(page) + (arch_phys_to_idmap_offset >> PAGE_SHIFT); +} +#define page_to_boot_pfn page_to_boot_pfn + +static inline struct page *boot_pfn_to_page(unsigned long boot_pfn) +{ + return pfn_to_page(boot_pfn - (arch_phys_to_idmap_offset >> PAGE_SHIFT)); +} +#define boot_pfn_to_page boot_pfn_to_page + #endif /* __ASSEMBLY__ */ #endif /* CONFIG_KEXEC */ diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h index 3d5a5cd07..58faff5f1 100644 --- a/arch/arm/include/asm/kvm_asm.h +++ b/arch/arm/include/asm/kvm_asm.h @@ -66,6 +66,8 @@ extern void __kvm_tlb_flush_vmid(struct kvm *kvm); extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); extern void __init_stage2_translation(void); + +extern void __kvm_hyp_reset(unsigned long); #endif #endif /* __ARM_KVM_ASM_H__ */ diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 96387d477..de338d93d 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -241,8 +241,7 @@ int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, int exception_index); -static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr, - phys_addr_t pgd_ptr, +static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr, unsigned long hyp_stack_ptr, unsigned long vector_ptr) { @@ -251,18 +250,13 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr, * code. The init code doesn't need to preserve these * registers as r0-r3 are already callee saved according to * the AAPCS. - * Note that we slightly misuse the prototype by casing the + * Note that we slightly misuse the prototype by casting the * stack pointer to a void *. - * - * We don't have enough registers to perform the full init in - * one go. Install the boot PGD first, and then install the - * runtime PGD, stack pointer and vectors. The PGDs are always - * passed as the third argument, in order to be passed into - * r2-r3 to the init code (yes, this is compliant with the - * PCS!). - */ - kvm_call_hyp(NULL, 0, boot_pgd_ptr); + * The PGDs are always passed as the third argument, in order + * to be passed into r2-r3 to the init code (yes, this is + * compliant with the PCS!). + */ kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr); } @@ -272,16 +266,13 @@ static inline void __cpu_init_stage2(void) kvm_call_hyp(__init_stage2_translation); } -static inline void __cpu_reset_hyp_mode(phys_addr_t boot_pgd_ptr, +static inline void __cpu_reset_hyp_mode(unsigned long vector_ptr, phys_addr_t phys_idmap_start) { - /* - * TODO - * kvm_call_reset(boot_pgd_ptr, phys_idmap_start); - */ + kvm_call_hyp((void *)virt_to_idmap(__kvm_hyp_reset), vector_ptr); } -static inline int kvm_arch_dev_ioctl_check_extension(long ext) +static inline int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext) { return 0; } diff --git a/arch/arm/include/asm/kvm_hyp.h b/arch/arm/include/asm/kvm_hyp.h index f0e860761..6eaff28f2 100644 --- a/arch/arm/include/asm/kvm_hyp.h +++ b/arch/arm/include/asm/kvm_hyp.h @@ -25,9 +25,6 @@ #define __hyp_text __section(.hyp.text) notrace -#define kern_hyp_va(v) (v) -#define hyp_kern_va(v) (v) - #define __ACCESS_CP15(CRn, Op1, CRm, Op2) \ "mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32 #define __ACCESS_CP15_64(Op1, CRm) \ diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index f9a650611..3bb803d68 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -26,16 +26,7 @@ * We directly use the kernel VA for the HYP, as we can directly share * the mapping (HTTBR "covers" TTBR1). */ -#define HYP_PAGE_OFFSET_MASK UL(~0) -#define HYP_PAGE_OFFSET PAGE_OFFSET -#define KERN_TO_HYP(kva) (kva) - -/* - * Our virtual mapping for the boot-time MMU-enable code. Must be - * shared across all the page-tables. Conveniently, we use the vectors - * page, where no kernel data will ever be shared with HYP. - */ -#define TRAMPOLINE_VA UL(CONFIG_VECTORS_BASE) +#define kern_hyp_va(kva) (kva) /* * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation levels. @@ -49,9 +40,8 @@ #include <asm/pgalloc.h> #include <asm/stage2_pgtable.h> -int create_hyp_mappings(void *from, void *to); +int create_hyp_mappings(void *from, void *to, pgprot_t prot); int create_hyp_io_mappings(void *from, void *to, phys_addr_t); -void free_boot_hyp_pgd(void); void free_hyp_pgds(void); void stage2_unmap_vm(struct kvm *kvm); @@ -65,7 +55,6 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run); void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); phys_addr_t kvm_mmu_get_httbr(void); -phys_addr_t kvm_mmu_get_boot_httbr(void); phys_addr_t kvm_get_idmap_vector(void); phys_addr_t kvm_get_idmap_start(void); int kvm_mmu_init(void); diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h index 0070e8520..2d88af5be 100644 --- a/arch/arm/include/asm/mach/pci.h +++ b/arch/arm/include/asm/mach/pci.h @@ -22,6 +22,7 @@ struct hw_pci { struct msi_controller *msi_ctrl; struct pci_ops *ops; int nr_controllers; + unsigned int io_optional:1; void **private_data; int (*setup)(int nr, struct pci_sys_data *); struct pci_bus *(*scan)(int nr, struct pci_sys_data *); diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h index 20febb368..b2902a5cd 100644 --- a/arch/arm/include/asm/pgalloc.h +++ b/arch/arm/include/asm/pgalloc.h @@ -57,7 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) extern pgd_t *pgd_alloc(struct mm_struct *mm); extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); -#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO) +#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO) static inline void clean_pte_table(pte_t *pte) { diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h index d0131ee6f..3f82e9da7 100644 --- a/arch/arm/include/asm/pgtable-2level-hwdef.h +++ b/arch/arm/include/asm/pgtable-2level-hwdef.h @@ -47,6 +47,7 @@ #define PMD_SECT_WB (PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE) #define PMD_SECT_MINICACHE (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE) #define PMD_SECT_WBWA (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE) +#define PMD_SECT_CACHE_MASK (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE) #define PMD_SECT_NONSHARED_DEV (PMD_SECT_TEX(2)) /* diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h index f8f1cff62..4cd664abf 100644 --- a/arch/arm/include/asm/pgtable-3level-hwdef.h +++ b/arch/arm/include/asm/pgtable-3level-hwdef.h @@ -62,6 +62,7 @@ #define PMD_SECT_WT (_AT(pmdval_t, 2) << 2) /* normal inner write-through */ #define PMD_SECT_WB (_AT(pmdval_t, 3) << 2) /* normal inner write-back */ #define PMD_SECT_WBWA (_AT(pmdval_t, 7) << 2) /* normal inner write-alloc */ +#define PMD_SECT_CACHE_MASK (_AT(pmdval_t, 7) << 2) /* * + Level 3 descriptor (PTE) diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index d62204060..a8d656d9a 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -97,7 +97,9 @@ extern pgprot_t pgprot_s2_device; #define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY) #define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN) #define PAGE_KERNEL_EXEC pgprot_kernel -#define PAGE_HYP _MOD_PROT(pgprot_kernel, L_PTE_HYP) +#define PAGE_HYP _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_XN) +#define PAGE_HYP_EXEC _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_RDONLY) +#define PAGE_HYP_RO _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_RDONLY | L_PTE_XN) #define PAGE_HYP_DEVICE _MOD_PROT(pgprot_hyp_device, L_PTE_HYP) #define PAGE_S2 _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY) #define PAGE_S2_DEVICE _MOD_PROT(pgprot_s2_device, L_PTE_S2_RDONLY) diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h index 51622ba7c..e9c9a117b 100644 --- a/arch/arm/include/asm/ptrace.h +++ b/arch/arm/include/asm/ptrace.h @@ -13,10 +13,20 @@ #include <uapi/asm/ptrace.h> #ifndef __ASSEMBLY__ +#include <linux/types.h> + struct pt_regs { unsigned long uregs[18]; }; +struct svc_pt_regs { + struct pt_regs regs; + u32 dacr; + u32 addr_limit; +}; + +#define to_svc_pt_regs(r) container_of(r, struct svc_pt_regs, regs) + #define user_mode(regs) \ (((regs)->ARM_cpsr & 0xf) == 0) diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index 0fa418463..4bec45442 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -6,6 +6,8 @@ #endif #include <linux/prefetch.h> +#include <asm/barrier.h> +#include <asm/processor.h> /* * sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K @@ -50,8 +52,21 @@ static inline void dsb_sev(void) * memory. */ -#define arch_spin_unlock_wait(lock) \ - do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) +static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) +{ + u16 owner = READ_ONCE(lock->tickets.owner); + + for (;;) { + arch_spinlock_t tmp = READ_ONCE(*lock); + + if (tmp.tickets.owner == tmp.tickets.next || + tmp.tickets.owner != owner) + break; + + wfe(); + } + smp_acquire__after_ctrl_dep(); +} #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index 3cadb726e..1e25cd805 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h @@ -209,17 +209,38 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) tlb_flush(tlb); } -static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) +static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page) { + if (tlb->nr == tlb->max) + return true; tlb->pages[tlb->nr++] = page; - VM_BUG_ON(tlb->nr > tlb->max); - return tlb->max - tlb->nr; + return false; } static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) { - if (!__tlb_remove_page(tlb, page)) + if (__tlb_remove_page(tlb, page)) { tlb_flush_mmu(tlb); + __tlb_remove_page(tlb, page); + } +} + +static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, + struct page *page, int page_size) +{ + return __tlb_remove_page(tlb, page); +} + +static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, + struct page *page) +{ + return __tlb_remove_page(tlb, page); +} + +static inline void tlb_remove_page_size(struct mmu_gather *tlb, + struct page *page, int page_size) +{ + return tlb_remove_page(tlb, page); } static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 35c9db857..a93c0f99a 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -104,14 +104,6 @@ static inline void set_fs(mm_segment_t fs) #define segment_eq(a, b) ((a) == (b)) -#define __addr_ok(addr) ({ \ - unsigned long flag; \ - __asm__("cmp %2, %0; movlo %0, #0" \ - : "=&r" (flag) \ - : "0" (current_thread_info()->addr_limit), "r" (addr) \ - : "cc"); \ - (flag == 0); }) - /* We use 33-bit arithmetic here... */ #define __range_ok(addr, size) ({ \ unsigned long flag, roksum; \ @@ -238,49 +230,23 @@ extern int __put_user_2(void *, unsigned int); extern int __put_user_4(void *, unsigned int); extern int __put_user_8(void *, unsigned long long); -#define __put_user_x(__r2, __p, __e, __l, __s) \ - __asm__ __volatile__ ( \ - __asmeq("%0", "r0") __asmeq("%2", "r2") \ - __asmeq("%3", "r1") \ - "bl __put_user_" #__s \ - : "=&r" (__e) \ - : "0" (__p), "r" (__r2), "r" (__l) \ - : "ip", "lr", "cc") - -#define __put_user_check(x, p) \ +#define __put_user_check(__pu_val, __ptr, __err, __s) \ ({ \ unsigned long __limit = current_thread_info()->addr_limit - 1; \ - const typeof(*(p)) __user *__tmp_p = (p); \ - register const typeof(*(p)) __r2 asm("r2") = (x); \ - register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \ + register typeof(__pu_val) __r2 asm("r2") = __pu_val; \ + register const void __user *__p asm("r0") = __ptr; \ register unsigned long __l asm("r1") = __limit; \ register int __e asm("r0"); \ - unsigned int __ua_flags = uaccess_save_and_enable(); \ - switch (sizeof(*(__p))) { \ - case 1: \ - __put_user_x(__r2, __p, __e, __l, 1); \ - break; \ - case 2: \ - __put_user_x(__r2, __p, __e, __l, 2); \ - break; \ - case 4: \ - __put_user_x(__r2, __p, __e, __l, 4); \ - break; \ - case 8: \ - __put_user_x(__r2, __p, __e, __l, 8); \ - break; \ - default: __e = __put_user_bad(); break; \ - } \ - uaccess_restore(__ua_flags); \ - __e; \ + __asm__ __volatile__ ( \ + __asmeq("%0", "r0") __asmeq("%2", "r2") \ + __asmeq("%3", "r1") \ + "bl __put_user_" #__s \ + : "=&r" (__e) \ + : "0" (__p), "r" (__r2), "r" (__l) \ + : "ip", "lr", "cc"); \ + __err = __e; \ }) -#define put_user(x, p) \ - ({ \ - might_fault(); \ - __put_user_check(x, p); \ - }) - #else /* CONFIG_MMU */ /* @@ -298,7 +264,7 @@ static inline void set_fs(mm_segment_t fs) } #define get_user(x, p) __get_user(x, p) -#define put_user(x, p) __put_user(x, p) +#define __put_user_check __put_user_nocheck #endif /* CONFIG_MMU */ @@ -389,36 +355,54 @@ do { \ #define __get_user_asm_word(x, addr, err) \ __get_user_asm(x, addr, err, ldr) + +#define __put_user_switch(x, ptr, __err, __fn) \ + do { \ + const __typeof__(*(ptr)) __user *__pu_ptr = (ptr); \ + __typeof__(*(ptr)) __pu_val = (x); \ + unsigned int __ua_flags; \ + might_fault(); \ + __ua_flags = uaccess_save_and_enable(); \ + switch (sizeof(*(ptr))) { \ + case 1: __fn(__pu_val, __pu_ptr, __err, 1); break; \ + case 2: __fn(__pu_val, __pu_ptr, __err, 2); break; \ + case 4: __fn(__pu_val, __pu_ptr, __err, 4); break; \ + case 8: __fn(__pu_val, __pu_ptr, __err, 8); break; \ + default: __err = __put_user_bad(); break; \ + } \ + uaccess_restore(__ua_flags); \ + } while (0) + +#define put_user(x, ptr) \ +({ \ + int __pu_err = 0; \ + __put_user_switch((x), (ptr), __pu_err, __put_user_check); \ + __pu_err; \ +}) + #define __put_user(x, ptr) \ ({ \ long __pu_err = 0; \ - __put_user_err((x), (ptr), __pu_err); \ + __put_user_switch((x), (ptr), __pu_err, __put_user_nocheck); \ __pu_err; \ }) #define __put_user_error(x, ptr, err) \ ({ \ - __put_user_err((x), (ptr), err); \ + __put_user_switch((x), (ptr), (err), __put_user_nocheck); \ (void) 0; \ }) -#define __put_user_err(x, ptr, err) \ -do { \ - unsigned long __pu_addr = (unsigned long)(ptr); \ - unsigned int __ua_flags; \ - __typeof__(*(ptr)) __pu_val = (x); \ - __chk_user_ptr(ptr); \ - might_fault(); \ - __ua_flags = uaccess_save_and_enable(); \ - switch (sizeof(*(ptr))) { \ - case 1: __put_user_asm_byte(__pu_val, __pu_addr, err); break; \ - case 2: __put_user_asm_half(__pu_val, __pu_addr, err); break; \ - case 4: __put_user_asm_word(__pu_val, __pu_addr, err); break; \ - case 8: __put_user_asm_dword(__pu_val, __pu_addr, err); break; \ - default: __put_user_bad(); \ - } \ - uaccess_restore(__ua_flags); \ -} while (0) +#define __put_user_nocheck(x, __pu_ptr, __err, __size) \ + do { \ + unsigned long __pu_addr = (unsigned long)__pu_ptr; \ + __put_user_nocheck_##__size(x, __pu_addr, __err); \ + } while (0) + +#define __put_user_nocheck_1 __put_user_asm_byte +#define __put_user_nocheck_2 __put_user_asm_half +#define __put_user_nocheck_4 __put_user_asm_word +#define __put_user_nocheck_8 __put_user_asm_dword #define __put_user_asm(x, __pu_addr, err, instr) \ __asm__ __volatile__( \ @@ -496,7 +480,10 @@ arm_copy_from_user(void *to, const void __user *from, unsigned long n); static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n) { - unsigned int __ua_flags = uaccess_save_and_enable(); + unsigned int __ua_flags; + + check_object_size(to, n, false); + __ua_flags = uaccess_save_and_enable(); n = arm_copy_from_user(to, from, n); uaccess_restore(__ua_flags); return n; @@ -511,11 +498,15 @@ static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n) { #ifndef CONFIG_UACCESS_WITH_MEMCPY - unsigned int __ua_flags = uaccess_save_and_enable(); + unsigned int __ua_flags; + + check_object_size(from, n, true); + __ua_flags = uaccess_save_and_enable(); n = arm_copy_to_user(to, from, n); uaccess_restore(__ua_flags); return n; #else + check_object_size(from, n, true); return arm_copy_to_user(to, from, n); #endif } diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h index d4ceaf5f2..a2e75b84e 100644 --- a/arch/arm/include/asm/virt.h +++ b/arch/arm/include/asm/virt.h @@ -80,6 +80,10 @@ static inline bool is_kernel_in_hyp_mode(void) return false; } +/* The section containing the hypervisor idmap text */ +extern char __hyp_idmap_text_start[]; +extern char __hyp_idmap_text_end[]; + /* The section containing the hypervisor text */ extern char __hyp_text_start[]; extern char __hyp_text_end[]; diff --git a/arch/arm/include/asm/xen/hypercall.h b/arch/arm/include/asm/xen/hypercall.h index b6b962d70..9d874db13 100644 --- a/arch/arm/include/asm/xen/hypercall.h +++ b/arch/arm/include/asm/xen/hypercall.h @@ -52,6 +52,7 @@ int HYPERVISOR_memory_op(unsigned int cmd, void *arg); int HYPERVISOR_physdev_op(int cmd, void *arg); int HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args); int HYPERVISOR_tmem_op(void *arg); +int HYPERVISOR_vm_assist(unsigned int cmd, unsigned int type); int HYPERVISOR_platform_op_raw(void *arg); static inline int HYPERVISOR_platform_op(struct xen_platform_op *op) { diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h index 9408a994c..95ce6ac3a 100644 --- a/arch/arm/include/asm/xen/page-coherent.h +++ b/arch/arm/include/asm/xen/page-coherent.h @@ -2,15 +2,14 @@ #define _ASM_ARM_XEN_PAGE_COHERENT_H #include <asm/page.h> -#include <linux/dma-attrs.h> #include <linux/dma-mapping.h> void __xen_dma_map_page(struct device *hwdev, struct page *page, dma_addr_t dev_addr, unsigned long offset, size_t size, - enum dma_data_direction dir, struct dma_attrs *attrs); + enum dma_data_direction dir, unsigned long attrs); void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs); + unsigned long attrs); void __xen_dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t handle, size_t size, enum dma_data_direction dir); @@ -18,22 +17,20 @@ void __xen_dma_sync_single_for_device(struct device *hwdev, dma_addr_t handle, size_t size, enum dma_data_direction dir); static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, - dma_addr_t *dma_handle, gfp_t flags, - struct dma_attrs *attrs) + dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) { return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs); } static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, - void *cpu_addr, dma_addr_t dma_handle, - struct dma_attrs *attrs) + void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs) { __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs); } static inline void xen_dma_map_page(struct device *hwdev, struct page *page, dma_addr_t dev_addr, unsigned long offset, size_t size, - enum dma_data_direction dir, struct dma_attrs *attrs) + enum dma_data_direction dir, unsigned long attrs) { unsigned long page_pfn = page_to_xen_pfn(page); unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr); @@ -58,8 +55,7 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page, } static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, - size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + size_t size, enum dma_data_direction dir, unsigned long attrs) { unsigned long pfn = PFN_DOWN(handle); /* diff --git a/arch/arm/include/asm/xen/xen-ops.h b/arch/arm/include/asm/xen/xen-ops.h new file mode 100644 index 000000000..ec154e719 --- /dev/null +++ b/arch/arm/include/asm/xen/xen-ops.h @@ -0,0 +1,6 @@ +#ifndef _ASM_XEN_OPS_H +#define _ASM_XEN_OPS_H + +void xen_efi_runtime_setup(void); + +#endif /* _ASM_XEN_OPS_H */ diff --git a/arch/arm/include/debug/at91.S b/arch/arm/include/debug/at91.S index d4ae3b8e2..0098401e5 100644 --- a/arch/arm/include/debug/at91.S +++ b/arch/arm/include/debug/at91.S @@ -9,14 +9,6 @@ * */ -#ifdef CONFIG_MMU -#define AT91_IO_P2V(x) ((x) - 0x01000000) -#else -#define AT91_IO_P2V(x) (x) -#endif - -#define AT91_DEBUG_UART_VIRT AT91_IO_P2V(CONFIG_DEBUG_UART_PHYS) - #define AT91_DBGU_SR (0x14) /* Status Register */ #define AT91_DBGU_THR (0x1c) /* Transmitter Holding Register */ #define AT91_DBGU_TXRDY (1 << 1) /* Transmitter Ready */ @@ -24,7 +16,7 @@ .macro addruart, rp, rv, tmp ldr \rp, =CONFIG_DEBUG_UART_PHYS @ System peripherals (phys address) - ldr \rv, =AT91_DEBUG_UART_VIRT @ System peripherals (virt address) + ldr \rv, =CONFIG_DEBUG_UART_VIRT @ System peripherals (virt address) .endm .macro senduart,rd,rx diff --git a/arch/arm/include/debug/clps711x.S b/arch/arm/include/debug/clps711x.S index abe225436..c17ac5c9e 100644 --- a/arch/arm/include/debug/clps711x.S +++ b/arch/arm/include/debug/clps711x.S @@ -9,10 +9,10 @@ #ifndef CONFIG_DEBUG_CLPS711X_UART2 #define CLPS711X_UART_PADDR (0x80000000 + 0x0000) -#define CLPS711X_UART_VADDR (0xfeff0000 + 0x0000) +#define CLPS711X_UART_VADDR (0xfeff4000 + 0x0000) #else #define CLPS711X_UART_PADDR (0x80000000 + 0x1000) -#define CLPS711X_UART_VADDR (0xfeff0000 + 0x1000) +#define CLPS711X_UART_VADDR (0xfeff4000 + 0x1000) #endif #define SYSFLG (0x0140) diff --git a/arch/arm/include/debug/exynos.S b/arch/arm/include/debug/exynos.S index b17fdb7fb..60bf3c232 100644 --- a/arch/arm/include/debug/exynos.S +++ b/arch/arm/include/debug/exynos.S @@ -24,7 +24,11 @@ mrc p15, 0, \tmp, c0, c0, 0 and \tmp, \tmp, #0xf0 teq \tmp, #0xf0 @@ A15 - ldreq \rp, =EXYNOS5_PA_UART + beq 100f + mrc p15, 0, \tmp, c0, c0, 5 + and \tmp, \tmp, #0xf00 + teq \tmp, #0x100 @@ A15 + A7 but boot to A7 +100: ldreq \rp, =EXYNOS5_PA_UART movne \rp, #EXYNOS4_PA_UART @@ EXYNOS4 ldr \rv, =S3C_VA_UART #if CONFIG_DEBUG_S3C_UART != 0 diff --git a/arch/arm/include/debug/samsung.S b/arch/arm/include/debug/samsung.S index 8d8d922e5..f4eeed2a1 100644 --- a/arch/arm/include/debug/samsung.S +++ b/arch/arm/include/debug/samsung.S @@ -15,11 +15,13 @@ .macro fifo_level_s5pv210 rd, rx ldr \rd, [\rx, # S3C2410_UFSTAT] +ARM_BE8(rev \rd, \rd) and \rd, \rd, #S5PV210_UFSTAT_TXMASK .endm .macro fifo_full_s5pv210 rd, rx ldr \rd, [\rx, # S3C2410_UFSTAT] +ARM_BE8(rev \rd, \rd) tst \rd, #S5PV210_UFSTAT_TXFULL .endm @@ -28,6 +30,7 @@ .macro fifo_level_s3c2440 rd, rx ldr \rd, [\rx, # S3C2410_UFSTAT] +ARM_BE8(rev \rd, \rd) and \rd, \rd, #S3C2440_UFSTAT_TXMASK .endm @@ -37,6 +40,7 @@ .macro fifo_full_s3c2440 rd, rx ldr \rd, [\rx, # S3C2410_UFSTAT] +ARM_BE8(rev \rd, \rd) tst \rd, #S3C2440_UFSTAT_TXFULL .endm @@ -50,6 +54,7 @@ .macro busyuart, rd, rx ldr \rd, [\rx, # S3C2410_UFCON] +ARM_BE8(rev \rd, \rd) tst \rd, #S3C2410_UFCON_FIFOMODE @ fifo enabled? beq 1001f @ @ FIFO enabled... @@ -61,6 +66,7 @@ 1001: @ busy waiting for non fifo ldr \rd, [\rx, # S3C2410_UTRSTAT] +ARM_BE8(rev \rd, \rd) tst \rd, #S3C2410_UTRSTAT_TXFE beq 1001b @@ -69,6 +75,7 @@ .macro waituart,rd,rx ldr \rd, [\rx, # S3C2410_UFCON] +ARM_BE8(rev \rd, \rd) tst \rd, #S3C2410_UFCON_FIFOMODE @ fifo enabled? beq 1001f @ @ FIFO enabled... @@ -80,6 +87,7 @@ 1001: @ idle waiting for non fifo ldr \rd, [\rx, # S3C2410_UTRSTAT] +ARM_BE8(rev \rd, \rd) tst \rd, #S3C2410_UTRSTAT_TXFE beq 1001b diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h index df3f60cb1..a2b3eb313 100644 --- a/arch/arm/include/uapi/asm/kvm.h +++ b/arch/arm/include/uapi/asm/kvm.h @@ -139,8 +139,8 @@ struct kvm_arch_memory_slot { #define ARM_CP15_REG64(...) __ARM_CP15_REG64(__VA_ARGS__) #define KVM_REG_ARM_TIMER_CTL ARM_CP15_REG32(0, 14, 3, 1) -#define KVM_REG_ARM_TIMER_CNT ARM_CP15_REG64(1, 14) -#define KVM_REG_ARM_TIMER_CVAL ARM_CP15_REG64(3, 14) +#define KVM_REG_ARM_TIMER_CNT ARM_CP15_REG64(1, 14) +#define KVM_REG_ARM_TIMER_CVAL ARM_CP15_REG64(3, 14) /* Normal registers are mapped as coprocessor 16. */ #define KVM_REG_ARM_CORE (0x0010 << KVM_REG_ARM_COPROC_SHIFT) |