From 8d91c1e411f55d7ea91b1183a2e9f8088fb4d5be Mon Sep 17 00:00:00 2001 From: André Fabian Silva Delgado Date: Tue, 15 Dec 2015 14:52:16 -0300 Subject: Linux-libre 4.3.2-gnu --- arch/arm64/mm/cache.S | 7 ++++++- arch/arm64/mm/context.c | 16 ---------------- arch/arm64/mm/dma-mapping.c | 35 +++++++++++++++++++++-------------- arch/arm64/mm/fault.c | 28 ++++++++++++++++++++++++++-- arch/arm64/mm/flush.c | 4 ---- arch/arm64/mm/hugetlbpage.c | 4 ---- arch/arm64/mm/init.c | 4 ++-- arch/arm64/mm/mmu.c | 13 +------------ arch/arm64/mm/proc.S | 21 +++++++++++++++------ 9 files changed, 71 insertions(+), 61 deletions(-) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index bdeb5d38c..eb48d5df4 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -143,7 +143,12 @@ __dma_clean_range: dcache_line_size x2, x3 sub x3, x2, #1 bic x0, x0, x3 -1: alternative_insn "dc cvac, x0", "dc civac, x0", ARM64_WORKAROUND_CLEAN_CACHE +1: +alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE + dc cvac, x0 +alternative_else + dc civac, x0 +alternative_endif add x0, x0, x2 cmp x0, x1 b.lo 1b diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index 76c1e6cd3..d70ff14db 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -53,8 +53,6 @@ static void flush_context(void) __flush_icache_all(); } -#ifdef CONFIG_SMP - static void set_mm_context(struct mm_struct *mm, unsigned int asid) { unsigned long flags; @@ -110,23 +108,12 @@ static void reset_context(void *info) cpu_switch_mm(mm->pgd, mm); } -#else - -static inline void set_mm_context(struct mm_struct *mm, unsigned int asid) -{ - mm->context.id = asid; - cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id())); -} - -#endif - void __new_context(struct mm_struct *mm) { unsigned int asid; unsigned int bits = asid_bits(); raw_spin_lock(&cpu_asid_lock); -#ifdef CONFIG_SMP /* * Check the ASID again, in case the change was broadcast from another * CPU before we acquired the lock. @@ -136,7 +123,6 @@ void __new_context(struct mm_struct *mm) raw_spin_unlock(&cpu_asid_lock); return; } -#endif /* * At this point, it is guaranteed that the current mm (with an old * ASID) isn't active on any other CPU since the ASIDs are changed @@ -155,10 +141,8 @@ void __new_context(struct mm_struct *mm) cpu_last_asid = ASID_FIRST_VERSION; asid = cpu_last_asid + smp_processor_id(); flush_context(); -#ifdef CONFIG_SMP smp_wmb(); smp_call_function(reset_context, NULL, 1); -#endif cpu_last_asid += NR_CPUS - 1; } diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index d16a1cead..99224dceb 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -100,7 +100,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size, if (IS_ENABLED(CONFIG_ZONE_DMA) && dev->coherent_dma_mask <= DMA_BIT_MASK(32)) flags |= GFP_DMA; - if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) { + if (dev_get_cma_area(dev) && (flags & __GFP_WAIT)) { struct page *page; void *addr; @@ -144,6 +144,7 @@ static void *__dma_alloc(struct device *dev, size_t size, struct page *page; void *ptr, *coherent_ptr; bool coherent = is_device_dma_coherent(dev); + pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false); size = PAGE_ALIGN(size); @@ -171,9 +172,7 @@ static void *__dma_alloc(struct device *dev, size_t size, /* create a coherent mapping */ page = virt_to_page(ptr); coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP, - __get_dma_pgprot(attrs, - __pgprot(PROT_NORMAL_NC), false), - NULL); + prot, NULL); if (!coherent_ptr) goto no_map; @@ -303,9 +302,10 @@ static void __swiotlb_sync_sg_for_device(struct device *dev, sg->length, dir); } -/* vma->vm_page_prot must be set appropriately before calling this function */ -static int __dma_common_mmap(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size) +static int __swiotlb_mmap(struct device *dev, + struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size, + struct dma_attrs *attrs) { int ret = -ENXIO; unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> @@ -314,6 +314,9 @@ static int __dma_common_mmap(struct device *dev, struct vm_area_struct *vma, unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT; unsigned long off = vma->vm_pgoff; + vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, + is_device_dma_coherent(dev)); + if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) return ret; @@ -327,20 +330,24 @@ static int __dma_common_mmap(struct device *dev, struct vm_area_struct *vma, return ret; } -static int __swiotlb_mmap(struct device *dev, - struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size, - struct dma_attrs *attrs) +static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t handle, size_t size, + struct dma_attrs *attrs) { - vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, - is_device_dma_coherent(dev)); - return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); + int ret = sg_alloc_table(sgt, 1, GFP_KERNEL); + + if (!ret) + sg_set_page(sgt->sgl, phys_to_page(dma_to_phys(dev, handle)), + PAGE_ALIGN(size), 0); + + return ret; } static struct dma_map_ops swiotlb_dma_ops = { .alloc = __dma_alloc, .free = __dma_free, .mmap = __swiotlb_mmap, + .get_sgtable = __swiotlb_get_sgtable, .map_page = __swiotlb_map_page, .unmap_page = __swiotlb_unmap_page, .map_sg = __swiotlb_map_sg_attrs, diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 27c3e6fd2..9fadf6d70 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -30,9 +30,11 @@ #include #include +#include #include #include #include +#include #include #include #include @@ -223,6 +225,13 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, mm_flags |= FAULT_FLAG_WRITE; } + /* + * PAN bit set implies the fault happened in kernel space, but not + * in the arch's user access functions. + */ + if (IS_ENABLED(CONFIG_ARM64_PAN) && (regs->pstate & PSR_PAN_BIT)) + goto no_context; + /* * As per x86, we may deadlock here. However, since the kernel only * validly references user space from well defined areas of the code, @@ -493,14 +502,22 @@ asmlinkage void __exception do_sp_pc_abort(unsigned long addr, arm64_notify_die("Oops - SP/PC alignment exception", regs, &info, esr); } -static struct fault_info debug_fault_info[] = { +int __init early_brk64(unsigned long addr, unsigned int esr, + struct pt_regs *regs); + +/* + * __refdata because early_brk64 is __init, but the reference to it is + * clobbered at arch_initcall time. + * See traps.c and debug-monitors.c:debug_traps_init(). + */ +static struct fault_info __refdata debug_fault_info[] = { { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware breakpoint" }, { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware single-step" }, { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware watchpoint" }, { do_bad, SIGBUS, 0, "unknown 3" }, { do_bad, SIGTRAP, TRAP_BRKPT, "aarch32 BKPT" }, { do_bad, SIGTRAP, 0, "aarch32 vector catch" }, - { do_bad, SIGTRAP, TRAP_BRKPT, "aarch64 BRK" }, + { early_brk64, SIGTRAP, TRAP_BRKPT, "aarch64 BRK" }, { do_bad, SIGBUS, 0, "unknown 7" }, }; @@ -537,3 +554,10 @@ asmlinkage int __exception do_debug_exception(unsigned long addr, return 0; } + +#ifdef CONFIG_ARM64_PAN +void cpu_enable_pan(void) +{ + config_sctlr_el1(SCTLR_EL1_SPAN, 0); +} +#endif /* CONFIG_ARM64_PAN */ diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c index 4dfa3975c..c26b80401 100644 --- a/arch/arm64/mm/flush.c +++ b/arch/arm64/mm/flush.c @@ -60,14 +60,10 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long uaddr, void *dst, const void *src, unsigned long len) { -#ifdef CONFIG_SMP preempt_disable(); -#endif memcpy(dst, src, len); flush_ptrace_access(vma, page, uaddr, dst, len); -#ifdef CONFIG_SMP preempt_enable(); -#endif } void __sync_icache_dcache(pte_t pte, unsigned long addr) diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index 831ec534d..383b03ff3 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -13,10 +13,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index ad87ce826..f5c0680d1 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -358,9 +358,9 @@ void free_initmem(void) #ifdef CONFIG_BLK_DEV_INITRD -static int keep_initrd; +static int keep_initrd __initdata; -void free_initrd_mem(unsigned long start, unsigned long end) +void __init free_initrd_mem(unsigned long start, unsigned long end) { if (!keep_initrd) free_reserved_area((void *)start, (void *)end, 0, "initrd"); diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index a4ede4e2d..9211b8527 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -267,7 +267,7 @@ static void *late_alloc(unsigned long size) return ptr; } -static void __ref create_mapping(phys_addr_t phys, unsigned long virt, +static void __init create_mapping(phys_addr_t phys, unsigned long virt, phys_addr_t size, pgprot_t prot) { if (virt < VMALLOC_START) { @@ -460,17 +460,6 @@ void __init paging_init(void) cpu_set_default_tcr_t0sz(); } -/* - * Enable the identity mapping to allow the MMU disabling. - */ -void setup_mm_for_reboot(void) -{ - cpu_set_reserved_ttbr0(); - flush_tlb_all(); - cpu_set_idmap_tcr_t0sz(); - cpu_switch_mm(idmap_pg_dir, &init_mm); -} - /* * Check whether a kernel address is valid (derived from arch/x86/). */ diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 39139a3aa..e4ee7bd88 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -34,11 +34,7 @@ #define TCR_TG_FLAGS TCR_TG0_4K | TCR_TG1_4K #endif -#ifdef CONFIG_SMP #define TCR_SMP_FLAGS TCR_SHARED -#else -#define TCR_SMP_FLAGS 0 -#endif /* PTWs cacheable, inner/outer WBWA */ #define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA @@ -150,13 +146,13 @@ ENDPROC(cpu_do_switch_mm) * value of the SCTLR_EL1 register. */ ENTRY(__cpu_setup) - ic iallu // I+BTB cache invalidate tlbi vmalle1is // invalidate I + D TLBs dsb ish mov x0, #3 << 20 msr cpacr_el1, x0 // Enable FP/ASIMD - msr mdscr_el1, xzr // Reset mdscr_el1 + mov x0, #1 << 12 // Reset mdscr_el1 and disable + msr mdscr_el1, x0 // access to the DCC from EL0 /* * Memory region attributes for LPAE: * @@ -196,6 +192,19 @@ ENTRY(__cpu_setup) */ mrs x9, ID_AA64MMFR0_EL1 bfi x10, x9, #32, #3 +#ifdef CONFIG_ARM64_HW_AFDBM + /* + * Hardware update of the Access and Dirty bits. + */ + mrs x9, ID_AA64MMFR1_EL1 + and x9, x9, #0xf + cbz x9, 2f + cmp x9, #2 + b.lt 1f + orr x10, x10, #TCR_HD // hardware Dirty flag update +1: orr x10, x10, #TCR_HA // hardware Access flag update +2: +#endif /* CONFIG_ARM64_HW_AFDBM */ msr tcr_el1, x10 ret // return to head.S ENDPROC(__cpu_setup) -- cgit v1.2.3-54-g00ecf