diff options
author | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2015-08-05 17:04:01 -0300 |
---|---|---|
committer | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2015-08-05 17:04:01 -0300 |
commit | 57f0f512b273f60d52568b8c6b77e17f5636edc0 (patch) | |
tree | 5e910f0e82173f4ef4f51111366a3f1299037a7b /arch/arm64/mm |
Initial import
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r-- | arch/arm64/mm/Makefile | 6 | ||||
-rw-r--r-- | arch/arm64/mm/cache.S | 268 | ||||
-rw-r--r-- | arch/arm64/mm/context.c | 167 | ||||
-rw-r--r-- | arch/arm64/mm/copypage.c | 36 | ||||
-rw-r--r-- | arch/arm64/mm/dma-mapping.c | 436 | ||||
-rw-r--r-- | arch/arm64/mm/dump.c | 342 | ||||
-rw-r--r-- | arch/arm64/mm/extable.c | 17 | ||||
-rw-r--r-- | arch/arm64/mm/fault.c | 532 | ||||
-rw-r--r-- | arch/arm64/mm/flush.c | 122 | ||||
-rw-r--r-- | arch/arm64/mm/hugetlbpage.c | 68 | ||||
-rw-r--r-- | arch/arm64/mm/init.c | 376 | ||||
-rw-r--r-- | arch/arm64/mm/ioremap.c | 113 | ||||
-rw-r--r-- | arch/arm64/mm/mm.h | 3 | ||||
-rw-r--r-- | arch/arm64/mm/mmap.c | 138 | ||||
-rw-r--r-- | arch/arm64/mm/mmu.c | 645 | ||||
-rw-r--r-- | arch/arm64/mm/pageattr.c | 98 | ||||
-rw-r--r-- | arch/arm64/mm/pgd.c | 61 | ||||
-rw-r--r-- | arch/arm64/mm/proc-macros.S | 64 | ||||
-rw-r--r-- | arch/arm64/mm/proc.S | 264 |
19 files changed, 3756 insertions, 0 deletions
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile new file mode 100644 index 000000000..773d37a14 --- /dev/null +++ b/arch/arm64/mm/Makefile @@ -0,0 +1,6 @@ +obj-y := dma-mapping.o extable.o fault.o init.o \ + cache.o copypage.o flush.o \ + ioremap.o mmap.o pgd.o mmu.o \ + context.o proc.o pageattr.o +obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o +obj-$(CONFIG_ARM64_PTDUMP) += dump.o diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S new file mode 100644 index 000000000..2560e1e15 --- /dev/null +++ b/arch/arm64/mm/cache.S @@ -0,0 +1,268 @@ +/* + * Cache maintenance + * + * Copyright (C) 2001 Deep Blue Solutions Ltd. + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/errno.h> +#include <linux/linkage.h> +#include <linux/init.h> +#include <asm/assembler.h> +#include <asm/cpufeature.h> +#include <asm/alternative-asm.h> + +#include "proc-macros.S" + +/* + * __flush_dcache_all() + * + * Flush the whole D-cache. + * + * Corrupted registers: x0-x7, x9-x11 + */ +__flush_dcache_all: + dmb sy // ensure ordering with previous memory accesses + mrs x0, clidr_el1 // read clidr + and x3, x0, #0x7000000 // extract loc from clidr + lsr x3, x3, #23 // left align loc bit field + cbz x3, finished // if loc is 0, then no need to clean + mov x10, #0 // start clean at cache level 0 +loop1: + add x2, x10, x10, lsr #1 // work out 3x current cache level + lsr x1, x0, x2 // extract cache type bits from clidr + and x1, x1, #7 // mask of the bits for current cache only + cmp x1, #2 // see what cache we have at this level + b.lt skip // skip if no cache, or just i-cache + save_and_disable_irqs x9 // make CSSELR and CCSIDR access atomic + msr csselr_el1, x10 // select current cache level in csselr + isb // isb to sych the new cssr&csidr + mrs x1, ccsidr_el1 // read the new ccsidr + restore_irqs x9 + and x2, x1, #7 // extract the length of the cache lines + add x2, x2, #4 // add 4 (line length offset) + mov x4, #0x3ff + and x4, x4, x1, lsr #3 // find maximum number on the way size + clz w5, w4 // find bit position of way size increment + mov x7, #0x7fff + and x7, x7, x1, lsr #13 // extract max number of the index size +loop2: + mov x9, x4 // create working copy of max way size +loop3: + lsl x6, x9, x5 + orr x11, x10, x6 // factor way and cache number into x11 + lsl x6, x7, x2 + orr x11, x11, x6 // factor index number into x11 + dc cisw, x11 // clean & invalidate by set/way + subs x9, x9, #1 // decrement the way + b.ge loop3 + subs x7, x7, #1 // decrement the index + b.ge loop2 +skip: + add x10, x10, #2 // increment cache number + cmp x3, x10 + b.gt loop1 +finished: + mov x10, #0 // swith back to cache level 0 + msr csselr_el1, x10 // select current cache level in csselr + dsb sy + isb + ret +ENDPROC(__flush_dcache_all) + +/* + * flush_cache_all() + * + * Flush the entire cache system. The data cache flush is now achieved + * using atomic clean / invalidates working outwards from L1 cache. This + * is done using Set/Way based cache maintainance instructions. The + * instruction cache can still be invalidated back to the point of + * unification in a single instruction. + */ +ENTRY(flush_cache_all) + mov x12, lr + bl __flush_dcache_all + mov x0, #0 + ic ialluis // I+BTB cache invalidate + ret x12 +ENDPROC(flush_cache_all) + +/* + * flush_icache_range(start,end) + * + * Ensure that the I and D caches are coherent within specified region. + * This is typically used when code has been written to a memory region, + * and will be executed. + * + * - start - virtual start address of region + * - end - virtual end address of region + */ +ENTRY(flush_icache_range) + /* FALLTHROUGH */ + +/* + * __flush_cache_user_range(start,end) + * + * Ensure that the I and D caches are coherent within specified region. + * This is typically used when code has been written to a memory region, + * and will be executed. + * + * - start - virtual start address of region + * - end - virtual end address of region + */ +ENTRY(__flush_cache_user_range) + dcache_line_size x2, x3 + sub x3, x2, #1 + bic x4, x0, x3 +1: +USER(9f, dc cvau, x4 ) // clean D line to PoU + add x4, x4, x2 + cmp x4, x1 + b.lo 1b + dsb ish + + icache_line_size x2, x3 + sub x3, x2, #1 + bic x4, x0, x3 +1: +USER(9f, ic ivau, x4 ) // invalidate I line PoU + add x4, x4, x2 + cmp x4, x1 + b.lo 1b + dsb ish + isb + mov x0, #0 + ret +9: + mov x0, #-EFAULT + ret +ENDPROC(flush_icache_range) +ENDPROC(__flush_cache_user_range) + +/* + * __flush_dcache_area(kaddr, size) + * + * Ensure that the data held in the page kaddr is written back to the + * page in question. + * + * - kaddr - kernel address + * - size - size in question + */ +ENTRY(__flush_dcache_area) + dcache_line_size x2, x3 + add x1, x0, x1 + sub x3, x2, #1 + bic x0, x0, x3 +1: dc civac, x0 // clean & invalidate D line / unified line + add x0, x0, x2 + cmp x0, x1 + b.lo 1b + dsb sy + ret +ENDPROC(__flush_dcache_area) + +/* + * __inval_cache_range(start, end) + * - start - start address of region + * - end - end address of region + */ +ENTRY(__inval_cache_range) + /* FALLTHROUGH */ + +/* + * __dma_inv_range(start, end) + * - start - virtual start address of region + * - end - virtual end address of region + */ +__dma_inv_range: + dcache_line_size x2, x3 + sub x3, x2, #1 + tst x1, x3 // end cache line aligned? + bic x1, x1, x3 + b.eq 1f + dc civac, x1 // clean & invalidate D / U line +1: tst x0, x3 // start cache line aligned? + bic x0, x0, x3 + b.eq 2f + dc civac, x0 // clean & invalidate D / U line + b 3f +2: dc ivac, x0 // invalidate D / U line +3: add x0, x0, x2 + cmp x0, x1 + b.lo 2b + dsb sy + ret +ENDPROC(__inval_cache_range) +ENDPROC(__dma_inv_range) + +/* + * __dma_clean_range(start, end) + * - start - virtual start address of region + * - end - virtual end address of region + */ +__dma_clean_range: + dcache_line_size x2, x3 + sub x3, x2, #1 + bic x0, x0, x3 +1: alternative_insn "dc cvac, x0", "dc civac, x0", ARM64_WORKAROUND_CLEAN_CACHE + add x0, x0, x2 + cmp x0, x1 + b.lo 1b + dsb sy + ret +ENDPROC(__dma_clean_range) + +/* + * __dma_flush_range(start, end) + * - start - virtual start address of region + * - end - virtual end address of region + */ +ENTRY(__dma_flush_range) + dcache_line_size x2, x3 + sub x3, x2, #1 + bic x0, x0, x3 +1: dc civac, x0 // clean & invalidate D / U line + add x0, x0, x2 + cmp x0, x1 + b.lo 1b + dsb sy + ret +ENDPROC(__dma_flush_range) + +/* + * __dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(__dma_map_area) + add x1, x1, x0 + cmp w2, #DMA_FROM_DEVICE + b.eq __dma_inv_range + b __dma_clean_range +ENDPROC(__dma_map_area) + +/* + * __dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(__dma_unmap_area) + add x1, x1, x0 + cmp w2, #DMA_TO_DEVICE + b.ne __dma_inv_range + ret +ENDPROC(__dma_unmap_area) diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c new file mode 100644 index 000000000..76c1e6cd3 --- /dev/null +++ b/arch/arm64/mm/context.c @@ -0,0 +1,167 @@ +/* + * Based on arch/arm/mm/context.c + * + * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved. + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/init.h> +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/smp.h> +#include <linux/percpu.h> + +#include <asm/mmu_context.h> +#include <asm/tlbflush.h> +#include <asm/cachetype.h> + +#define asid_bits(reg) \ + (((read_cpuid(ID_AA64MMFR0_EL1) & 0xf0) >> 2) + 8) + +#define ASID_FIRST_VERSION (1 << MAX_ASID_BITS) + +static DEFINE_RAW_SPINLOCK(cpu_asid_lock); +unsigned int cpu_last_asid = ASID_FIRST_VERSION; + +/* + * We fork()ed a process, and we need a new context for the child to run in. + */ +void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) +{ + mm->context.id = 0; + raw_spin_lock_init(&mm->context.id_lock); +} + +static void flush_context(void) +{ + /* set the reserved TTBR0 before flushing the TLB */ + cpu_set_reserved_ttbr0(); + flush_tlb_all(); + if (icache_is_aivivt()) + __flush_icache_all(); +} + +#ifdef CONFIG_SMP + +static void set_mm_context(struct mm_struct *mm, unsigned int asid) +{ + unsigned long flags; + + /* + * Locking needed for multi-threaded applications where the same + * mm->context.id could be set from different CPUs during the + * broadcast. This function is also called via IPI so the + * mm->context.id_lock has to be IRQ-safe. + */ + raw_spin_lock_irqsave(&mm->context.id_lock, flags); + if (likely((mm->context.id ^ cpu_last_asid) >> MAX_ASID_BITS)) { + /* + * Old version of ASID found. Set the new one and reset + * mm_cpumask(mm). + */ + mm->context.id = asid; + cpumask_clear(mm_cpumask(mm)); + } + raw_spin_unlock_irqrestore(&mm->context.id_lock, flags); + + /* + * Set the mm_cpumask(mm) bit for the current CPU. + */ + cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); +} + +/* + * Reset the ASID on the current CPU. This function call is broadcast from the + * CPU handling the ASID rollover and holding cpu_asid_lock. + */ +static void reset_context(void *info) +{ + unsigned int asid; + unsigned int cpu = smp_processor_id(); + struct mm_struct *mm = current->active_mm; + + /* + * current->active_mm could be init_mm for the idle thread immediately + * after secondary CPU boot or hotplug. TTBR0_EL1 is already set to + * the reserved value, so no need to reset any context. + */ + if (mm == &init_mm) + return; + + smp_rmb(); + asid = cpu_last_asid + cpu; + + flush_context(); + set_mm_context(mm, asid); + + /* set the new ASID */ + cpu_switch_mm(mm->pgd, mm); +} + +#else + +static inline void set_mm_context(struct mm_struct *mm, unsigned int asid) +{ + mm->context.id = asid; + cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id())); +} + +#endif + +void __new_context(struct mm_struct *mm) +{ + unsigned int asid; + unsigned int bits = asid_bits(); + + raw_spin_lock(&cpu_asid_lock); +#ifdef CONFIG_SMP + /* + * Check the ASID again, in case the change was broadcast from another + * CPU before we acquired the lock. + */ + if (!unlikely((mm->context.id ^ cpu_last_asid) >> MAX_ASID_BITS)) { + cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); + raw_spin_unlock(&cpu_asid_lock); + return; + } +#endif + /* + * At this point, it is guaranteed that the current mm (with an old + * ASID) isn't active on any other CPU since the ASIDs are changed + * simultaneously via IPI. + */ + asid = ++cpu_last_asid; + + /* + * If we've used up all our ASIDs, we need to start a new version and + * flush the TLB. + */ + if (unlikely((asid & ((1 << bits) - 1)) == 0)) { + /* increment the ASID version */ + cpu_last_asid += (1 << MAX_ASID_BITS) - (1 << bits); + if (cpu_last_asid == 0) + cpu_last_asid = ASID_FIRST_VERSION; + asid = cpu_last_asid + smp_processor_id(); + flush_context(); +#ifdef CONFIG_SMP + smp_wmb(); + smp_call_function(reset_context, NULL, 1); +#endif + cpu_last_asid += NR_CPUS - 1; + } + + set_mm_context(mm, asid); + raw_spin_unlock(&cpu_asid_lock); +} diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c new file mode 100644 index 000000000..13bbc3be6 --- /dev/null +++ b/arch/arm64/mm/copypage.c @@ -0,0 +1,36 @@ +/* + * Based on arch/arm/mm/copypage.c + * + * Copyright (C) 2002 Deep Blue Solutions Ltd, All Rights Reserved. + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/mm.h> + +#include <asm/page.h> +#include <asm/cacheflush.h> + +void __cpu_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) +{ + copy_page(kto, kfrom); + __flush_dcache_area(kto, PAGE_SIZE); +} +EXPORT_SYMBOL_GPL(__cpu_copy_user_page); + +void __cpu_clear_user_page(void *kaddr, unsigned long vaddr) +{ + clear_page(kaddr); +} +EXPORT_SYMBOL_GPL(__cpu_clear_user_page); diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c new file mode 100644 index 000000000..b0bd4e5fd --- /dev/null +++ b/arch/arm64/mm/dma-mapping.c @@ -0,0 +1,436 @@ +/* + * SWIOTLB-based DMA API implementation + * + * Copyright (C) 2012 ARM Ltd. + * Author: Catalin Marinas <catalin.marinas@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/gfp.h> +#include <linux/export.h> +#include <linux/slab.h> +#include <linux/genalloc.h> +#include <linux/dma-mapping.h> +#include <linux/dma-contiguous.h> +#include <linux/vmalloc.h> +#include <linux/swiotlb.h> + +#include <asm/cacheflush.h> + +struct dma_map_ops *dma_ops; +EXPORT_SYMBOL(dma_ops); + +static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot, + bool coherent) +{ + if (!coherent || dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs)) + return pgprot_writecombine(prot); + return prot; +} + +static struct gen_pool *atomic_pool; + +#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K +static size_t atomic_pool_size = DEFAULT_DMA_COHERENT_POOL_SIZE; + +static int __init early_coherent_pool(char *p) +{ + atomic_pool_size = memparse(p, &p); + return 0; +} +early_param("coherent_pool", early_coherent_pool); + +static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags) +{ + unsigned long val; + void *ptr = NULL; + + if (!atomic_pool) { + WARN(1, "coherent pool not initialised!\n"); + return NULL; + } + + val = gen_pool_alloc(atomic_pool, size); + if (val) { + phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val); + + *ret_page = phys_to_page(phys); + ptr = (void *)val; + memset(ptr, 0, size); + } + + return ptr; +} + +static bool __in_atomic_pool(void *start, size_t size) +{ + return addr_in_gen_pool(atomic_pool, (unsigned long)start, size); +} + +static int __free_from_pool(void *start, size_t size) +{ + if (!__in_atomic_pool(start, size)) + return 0; + + gen_pool_free(atomic_pool, (unsigned long)start, size); + + return 1; +} + +static void *__dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flags, + struct dma_attrs *attrs) +{ + if (dev == NULL) { + WARN_ONCE(1, "Use an actual device structure for DMA allocation\n"); + return NULL; + } + + if (IS_ENABLED(CONFIG_ZONE_DMA) && + dev->coherent_dma_mask <= DMA_BIT_MASK(32)) + flags |= GFP_DMA; + if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) { + struct page *page; + void *addr; + + page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, + get_order(size)); + if (!page) + return NULL; + + *dma_handle = phys_to_dma(dev, page_to_phys(page)); + addr = page_address(page); + memset(addr, 0, size); + return addr; + } else { + return swiotlb_alloc_coherent(dev, size, dma_handle, flags); + } +} + +static void __dma_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle, + struct dma_attrs *attrs) +{ + bool freed; + phys_addr_t paddr = dma_to_phys(dev, dma_handle); + + if (dev == NULL) { + WARN_ONCE(1, "Use an actual device structure for DMA allocation\n"); + return; + } + + freed = dma_release_from_contiguous(dev, + phys_to_page(paddr), + size >> PAGE_SHIFT); + if (!freed) + swiotlb_free_coherent(dev, size, vaddr, dma_handle); +} + +static void *__dma_alloc(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flags, + struct dma_attrs *attrs) +{ + struct page *page; + void *ptr, *coherent_ptr; + bool coherent = is_device_dma_coherent(dev); + + size = PAGE_ALIGN(size); + + if (!coherent && !(flags & __GFP_WAIT)) { + struct page *page = NULL; + void *addr = __alloc_from_pool(size, &page, flags); + + if (addr) + *dma_handle = phys_to_dma(dev, page_to_phys(page)); + + return addr; + } + + ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs); + if (!ptr) + goto no_mem; + + /* no need for non-cacheable mapping if coherent */ + if (coherent) + return ptr; + + /* remove any dirty cache lines on the kernel alias */ + __dma_flush_range(ptr, ptr + size); + + /* create a coherent mapping */ + page = virt_to_page(ptr); + coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP, + __get_dma_pgprot(attrs, + __pgprot(PROT_NORMAL_NC), false), + NULL); + if (!coherent_ptr) + goto no_map; + + return coherent_ptr; + +no_map: + __dma_free_coherent(dev, size, ptr, *dma_handle, attrs); +no_mem: + *dma_handle = DMA_ERROR_CODE; + return NULL; +} + +static void __dma_free(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle, + struct dma_attrs *attrs) +{ + void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle)); + + size = PAGE_ALIGN(size); + + if (!is_device_dma_coherent(dev)) { + if (__free_from_pool(vaddr, size)) + return; + vunmap(vaddr); + } + __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs); +} + +static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + dma_addr_t dev_addr; + + dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs); + if (!is_device_dma_coherent(dev)) + __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); + + return dev_addr; +} + + +static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr, + size_t size, enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + if (!is_device_dma_coherent(dev)) + __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); + swiotlb_unmap_page(dev, dev_addr, size, dir, attrs); +} + +static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl, + int nelems, enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + struct scatterlist *sg; + int i, ret; + + ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs); + if (!is_device_dma_coherent(dev)) + for_each_sg(sgl, sg, ret, i) + __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), + sg->length, dir); + + return ret; +} + +static void __swiotlb_unmap_sg_attrs(struct device *dev, + struct scatterlist *sgl, int nelems, + enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + struct scatterlist *sg; + int i; + + if (!is_device_dma_coherent(dev)) + for_each_sg(sgl, sg, nelems, i) + __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), + sg->length, dir); + swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs); +} + +static void __swiotlb_sync_single_for_cpu(struct device *dev, + dma_addr_t dev_addr, size_t size, + enum dma_data_direction dir) +{ + if (!is_device_dma_coherent(dev)) + __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); + swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir); +} + +static void __swiotlb_sync_single_for_device(struct device *dev, + dma_addr_t dev_addr, size_t size, + enum dma_data_direction dir) +{ + swiotlb_sync_single_for_device(dev, dev_addr, size, dir); + if (!is_device_dma_coherent(dev)) + __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); +} + +static void __swiotlb_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sgl, int nelems, + enum dma_data_direction dir) +{ + struct scatterlist *sg; + int i; + + if (!is_device_dma_coherent(dev)) + for_each_sg(sgl, sg, nelems, i) + __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), + sg->length, dir); + swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir); +} + +static void __swiotlb_sync_sg_for_device(struct device *dev, + struct scatterlist *sgl, int nelems, + enum dma_data_direction dir) +{ + struct scatterlist *sg; + int i; + + swiotlb_sync_sg_for_device(dev, sgl, nelems, dir); + if (!is_device_dma_coherent(dev)) + for_each_sg(sgl, sg, nelems, i) + __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), + sg->length, dir); +} + +/* vma->vm_page_prot must be set appropriately before calling this function */ +static int __dma_common_mmap(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size) +{ + int ret = -ENXIO; + unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> + PAGE_SHIFT; + unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; + unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT; + unsigned long off = vma->vm_pgoff; + + if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) + return ret; + + if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) { + ret = remap_pfn_range(vma, vma->vm_start, + pfn + off, + vma->vm_end - vma->vm_start, + vma->vm_page_prot); + } + + return ret; +} + +static int __swiotlb_mmap(struct device *dev, + struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size, + struct dma_attrs *attrs) +{ + vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, + is_device_dma_coherent(dev)); + return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); +} + +static struct dma_map_ops swiotlb_dma_ops = { + .alloc = __dma_alloc, + .free = __dma_free, + .mmap = __swiotlb_mmap, + .map_page = __swiotlb_map_page, + .unmap_page = __swiotlb_unmap_page, + .map_sg = __swiotlb_map_sg_attrs, + .unmap_sg = __swiotlb_unmap_sg_attrs, + .sync_single_for_cpu = __swiotlb_sync_single_for_cpu, + .sync_single_for_device = __swiotlb_sync_single_for_device, + .sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu, + .sync_sg_for_device = __swiotlb_sync_sg_for_device, + .dma_supported = swiotlb_dma_supported, + .mapping_error = swiotlb_dma_mapping_error, +}; + +static int __init atomic_pool_init(void) +{ + pgprot_t prot = __pgprot(PROT_NORMAL_NC); + unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT; + struct page *page; + void *addr; + unsigned int pool_size_order = get_order(atomic_pool_size); + + if (dev_get_cma_area(NULL)) + page = dma_alloc_from_contiguous(NULL, nr_pages, + pool_size_order); + else + page = alloc_pages(GFP_DMA, pool_size_order); + + if (page) { + int ret; + void *page_addr = page_address(page); + + memset(page_addr, 0, atomic_pool_size); + __dma_flush_range(page_addr, page_addr + atomic_pool_size); + + atomic_pool = gen_pool_create(PAGE_SHIFT, -1); + if (!atomic_pool) + goto free_page; + + addr = dma_common_contiguous_remap(page, atomic_pool_size, + VM_USERMAP, prot, atomic_pool_init); + + if (!addr) + goto destroy_genpool; + + ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr, + page_to_phys(page), + atomic_pool_size, -1); + if (ret) + goto remove_mapping; + + gen_pool_set_algo(atomic_pool, + gen_pool_first_fit_order_align, + (void *)PAGE_SHIFT); + + pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n", + atomic_pool_size / 1024); + return 0; + } + goto out; + +remove_mapping: + dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP); +destroy_genpool: + gen_pool_destroy(atomic_pool); + atomic_pool = NULL; +free_page: + if (!dma_release_from_contiguous(NULL, page, nr_pages)) + __free_pages(page, pool_size_order); +out: + pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n", + atomic_pool_size / 1024); + return -ENOMEM; +} + +static int __init arm64_dma_init(void) +{ + int ret; + + dma_ops = &swiotlb_dma_ops; + + ret = atomic_pool_init(); + + return ret; +} +arch_initcall(arm64_dma_init); + +#define PREALLOC_DMA_DEBUG_ENTRIES 4096 + +static int __init dma_debug_do_init(void) +{ + dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); + return 0; +} +fs_initcall(dma_debug_do_init); diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c new file mode 100644 index 000000000..f3d6221cd --- /dev/null +++ b/arch/arm64/mm/dump.c @@ -0,0 +1,342 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * Debug helper to dump the current kernel pagetables of the system + * so that we can see what the various memory ranges are set to. + * + * Derived from x86 and arm implementation: + * (C) Copyright 2008 Intel Corporation + * + * Author: Arjan van de Ven <arjan@linux.intel.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ +#include <linux/debugfs.h> +#include <linux/errno.h> +#include <linux/fs.h> +#include <linux/io.h> +#include <linux/init.h> +#include <linux/mm.h> +#include <linux/sched.h> +#include <linux/seq_file.h> + +#include <asm/fixmap.h> +#include <asm/memory.h> +#include <asm/pgtable.h> +#include <asm/pgtable-hwdef.h> + +#define LOWEST_ADDR (UL(0xffffffffffffffff) << VA_BITS) + +struct addr_marker { + unsigned long start_address; + const char *name; +}; + +enum address_markers_idx { + VMALLOC_START_NR = 0, + VMALLOC_END_NR, +#ifdef CONFIG_SPARSEMEM_VMEMMAP + VMEMMAP_START_NR, + VMEMMAP_END_NR, +#endif + FIXADDR_START_NR, + FIXADDR_END_NR, + PCI_START_NR, + PCI_END_NR, + MODULES_START_NR, + MODUELS_END_NR, + KERNEL_SPACE_NR, +}; + +static struct addr_marker address_markers[] = { + { VMALLOC_START, "vmalloc() Area" }, + { VMALLOC_END, "vmalloc() End" }, +#ifdef CONFIG_SPARSEMEM_VMEMMAP + { 0, "vmemmap start" }, + { 0, "vmemmap end" }, +#endif + { FIXADDR_START, "Fixmap start" }, + { FIXADDR_TOP, "Fixmap end" }, + { PCI_IO_START, "PCI I/O start" }, + { PCI_IO_END, "PCI I/O end" }, + { MODULES_VADDR, "Modules start" }, + { MODULES_END, "Modules end" }, + { PAGE_OFFSET, "Kernel Mapping" }, + { -1, NULL }, +}; + +struct pg_state { + struct seq_file *seq; + const struct addr_marker *marker; + unsigned long start_address; + unsigned level; + u64 current_prot; +}; + +struct prot_bits { + u64 mask; + u64 val; + const char *set; + const char *clear; +}; + +static const struct prot_bits pte_bits[] = { + { + .mask = PTE_USER, + .val = PTE_USER, + .set = "USR", + .clear = " ", + }, { + .mask = PTE_RDONLY, + .val = PTE_RDONLY, + .set = "ro", + .clear = "RW", + }, { + .mask = PTE_PXN, + .val = PTE_PXN, + .set = "NX", + .clear = "x ", + }, { + .mask = PTE_SHARED, + .val = PTE_SHARED, + .set = "SHD", + .clear = " ", + }, { + .mask = PTE_AF, + .val = PTE_AF, + .set = "AF", + .clear = " ", + }, { + .mask = PTE_NG, + .val = PTE_NG, + .set = "NG", + .clear = " ", + }, { + .mask = PTE_UXN, + .val = PTE_UXN, + .set = "UXN", + }, { + .mask = PTE_ATTRINDX_MASK, + .val = PTE_ATTRINDX(MT_DEVICE_nGnRnE), + .set = "DEVICE/nGnRnE", + }, { + .mask = PTE_ATTRINDX_MASK, + .val = PTE_ATTRINDX(MT_DEVICE_nGnRE), + .set = "DEVICE/nGnRE", + }, { + .mask = PTE_ATTRINDX_MASK, + .val = PTE_ATTRINDX(MT_DEVICE_GRE), + .set = "DEVICE/GRE", + }, { + .mask = PTE_ATTRINDX_MASK, + .val = PTE_ATTRINDX(MT_NORMAL_NC), + .set = "MEM/NORMAL-NC", + }, { + .mask = PTE_ATTRINDX_MASK, + .val = PTE_ATTRINDX(MT_NORMAL), + .set = "MEM/NORMAL", + } +}; + +struct pg_level { + const struct prot_bits *bits; + size_t num; + u64 mask; +}; + +static struct pg_level pg_level[] = { + { + }, { /* pgd */ + .bits = pte_bits, + .num = ARRAY_SIZE(pte_bits), + }, { /* pud */ + .bits = pte_bits, + .num = ARRAY_SIZE(pte_bits), + }, { /* pmd */ + .bits = pte_bits, + .num = ARRAY_SIZE(pte_bits), + }, { /* pte */ + .bits = pte_bits, + .num = ARRAY_SIZE(pte_bits), + }, +}; + +static void dump_prot(struct pg_state *st, const struct prot_bits *bits, + size_t num) +{ + unsigned i; + + for (i = 0; i < num; i++, bits++) { + const char *s; + + if ((st->current_prot & bits->mask) == bits->val) + s = bits->set; + else + s = bits->clear; + + if (s) + seq_printf(st->seq, " %s", s); + } +} + +static void note_page(struct pg_state *st, unsigned long addr, unsigned level, + u64 val) +{ + static const char units[] = "KMGTPE"; + u64 prot = val & pg_level[level].mask; + + if (!st->level) { + st->level = level; + st->current_prot = prot; + st->start_address = addr; + seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); + } else if (prot != st->current_prot || level != st->level || + addr >= st->marker[1].start_address) { + const char *unit = units; + unsigned long delta; + + if (st->current_prot) { + seq_printf(st->seq, "0x%16lx-0x%16lx ", + st->start_address, addr); + + delta = (addr - st->start_address) >> 10; + while (!(delta & 1023) && unit[1]) { + delta >>= 10; + unit++; + } + seq_printf(st->seq, "%9lu%c", delta, *unit); + if (pg_level[st->level].bits) + dump_prot(st, pg_level[st->level].bits, + pg_level[st->level].num); + seq_puts(st->seq, "\n"); + } + + if (addr >= st->marker[1].start_address) { + st->marker++; + seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); + } + + st->start_address = addr; + st->current_prot = prot; + st->level = level; + } + + if (addr >= st->marker[1].start_address) { + st->marker++; + seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); + } + +} + +static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start) +{ + pte_t *pte = pte_offset_kernel(pmd, 0); + unsigned long addr; + unsigned i; + + for (i = 0; i < PTRS_PER_PTE; i++, pte++) { + addr = start + i * PAGE_SIZE; + note_page(st, addr, 4, pte_val(*pte)); + } +} + +static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start) +{ + pmd_t *pmd = pmd_offset(pud, 0); + unsigned long addr; + unsigned i; + + for (i = 0; i < PTRS_PER_PMD; i++, pmd++) { + addr = start + i * PMD_SIZE; + if (pmd_none(*pmd) || pmd_sect(*pmd)) { + note_page(st, addr, 3, pmd_val(*pmd)); + } else { + BUG_ON(pmd_bad(*pmd)); + walk_pte(st, pmd, addr); + } + } +} + +static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start) +{ + pud_t *pud = pud_offset(pgd, 0); + unsigned long addr; + unsigned i; + + for (i = 0; i < PTRS_PER_PUD; i++, pud++) { + addr = start + i * PUD_SIZE; + if (pud_none(*pud) || pud_sect(*pud)) { + note_page(st, addr, 2, pud_val(*pud)); + } else { + BUG_ON(pud_bad(*pud)); + walk_pmd(st, pud, addr); + } + } +} + +static void walk_pgd(struct pg_state *st, struct mm_struct *mm, unsigned long start) +{ + pgd_t *pgd = pgd_offset(mm, 0UL); + unsigned i; + unsigned long addr; + + for (i = 0; i < PTRS_PER_PGD; i++, pgd++) { + addr = start + i * PGDIR_SIZE; + if (pgd_none(*pgd)) { + note_page(st, addr, 1, pgd_val(*pgd)); + } else { + BUG_ON(pgd_bad(*pgd)); + walk_pud(st, pgd, addr); + } + } +} + +static int ptdump_show(struct seq_file *m, void *v) +{ + struct pg_state st = { + .seq = m, + .marker = address_markers, + }; + + walk_pgd(&st, &init_mm, LOWEST_ADDR); + + note_page(&st, 0, 0, 0); + return 0; +} + +static int ptdump_open(struct inode *inode, struct file *file) +{ + return single_open(file, ptdump_show, NULL); +} + +static const struct file_operations ptdump_fops = { + .open = ptdump_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int ptdump_init(void) +{ + struct dentry *pe; + unsigned i, j; + + for (i = 0; i < ARRAY_SIZE(pg_level); i++) + if (pg_level[i].bits) + for (j = 0; j < pg_level[i].num; j++) + pg_level[i].mask |= pg_level[i].bits[j].mask; + +#ifdef CONFIG_SPARSEMEM_VMEMMAP + address_markers[VMEMMAP_START_NR].start_address = + (unsigned long)virt_to_page(PAGE_OFFSET); + address_markers[VMEMMAP_END_NR].start_address = + (unsigned long)virt_to_page(high_memory); +#endif + + pe = debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, + &ptdump_fops); + return pe ? 0 : -ENOMEM; +} +device_initcall(ptdump_init); diff --git a/arch/arm64/mm/extable.c b/arch/arm64/mm/extable.c new file mode 100644 index 000000000..79444279b --- /dev/null +++ b/arch/arm64/mm/extable.c @@ -0,0 +1,17 @@ +/* + * Based on arch/arm/mm/extable.c + */ + +#include <linux/module.h> +#include <linux/uaccess.h> + +int fixup_exception(struct pt_regs *regs) +{ + const struct exception_table_entry *fixup; + + fixup = search_exception_tables(instruction_pointer(regs)); + if (fixup) + regs->pc = fixup->fixup; + + return fixup != NULL; +} diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c new file mode 100644 index 000000000..96da13167 --- /dev/null +++ b/arch/arm64/mm/fault.c @@ -0,0 +1,532 @@ +/* + * Based on arch/arm/mm/fault.c + * + * Copyright (C) 1995 Linus Torvalds + * Copyright (C) 1995-2004 Russell King + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/module.h> +#include <linux/signal.h> +#include <linux/mm.h> +#include <linux/hardirq.h> +#include <linux/init.h> +#include <linux/kprobes.h> +#include <linux/uaccess.h> +#include <linux/page-flags.h> +#include <linux/sched.h> +#include <linux/highmem.h> +#include <linux/perf_event.h> + +#include <asm/exception.h> +#include <asm/debug-monitors.h> +#include <asm/esr.h> +#include <asm/system_misc.h> +#include <asm/pgtable.h> +#include <asm/tlbflush.h> + +static const char *fault_name(unsigned int esr); + +/* + * Dump out the page tables associated with 'addr' in mm 'mm'. + */ +void show_pte(struct mm_struct *mm, unsigned long addr) +{ + pgd_t *pgd; + + if (!mm) + mm = &init_mm; + + pr_alert("pgd = %p\n", mm->pgd); + pgd = pgd_offset(mm, addr); + pr_alert("[%08lx] *pgd=%016llx", addr, pgd_val(*pgd)); + + do { + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + + if (pgd_none(*pgd) || pgd_bad(*pgd)) + break; + + pud = pud_offset(pgd, addr); + printk(", *pud=%016llx", pud_val(*pud)); + if (pud_none(*pud) || pud_bad(*pud)) + break; + + pmd = pmd_offset(pud, addr); + printk(", *pmd=%016llx", pmd_val(*pmd)); + if (pmd_none(*pmd) || pmd_bad(*pmd)) + break; + + pte = pte_offset_map(pmd, addr); + printk(", *pte=%016llx", pte_val(*pte)); + pte_unmap(pte); + } while(0); + + printk("\n"); +} + +/* + * The kernel tried to access some page that wasn't present. + */ +static void __do_kernel_fault(struct mm_struct *mm, unsigned long addr, + unsigned int esr, struct pt_regs *regs) +{ + /* + * Are we prepared to handle this kernel fault? + */ + if (fixup_exception(regs)) + return; + + /* + * No handler, we'll have to terminate things with extreme prejudice. + */ + bust_spinlocks(1); + pr_alert("Unable to handle kernel %s at virtual address %08lx\n", + (addr < PAGE_SIZE) ? "NULL pointer dereference" : + "paging request", addr); + + show_pte(mm, addr); + die("Oops", regs, esr); + bust_spinlocks(0); + do_exit(SIGKILL); +} + +/* + * Something tried to access memory that isn't in our memory map. User mode + * accesses just cause a SIGSEGV + */ +static void __do_user_fault(struct task_struct *tsk, unsigned long addr, + unsigned int esr, unsigned int sig, int code, + struct pt_regs *regs) +{ + struct siginfo si; + + if (show_unhandled_signals && unhandled_signal(tsk, sig) && + printk_ratelimit()) { + pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n", + tsk->comm, task_pid_nr(tsk), fault_name(esr), sig, + addr, esr); + show_pte(tsk->mm, addr); + show_regs(regs); + } + + tsk->thread.fault_address = addr; + tsk->thread.fault_code = esr; + si.si_signo = sig; + si.si_errno = 0; + si.si_code = code; + si.si_addr = (void __user *)addr; + force_sig_info(sig, &si, tsk); +} + +static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs) +{ + struct task_struct *tsk = current; + struct mm_struct *mm = tsk->active_mm; + + /* + * If we are in kernel mode at this point, we have no context to + * handle this fault with. + */ + if (user_mode(regs)) + __do_user_fault(tsk, addr, esr, SIGSEGV, SEGV_MAPERR, regs); + else + __do_kernel_fault(mm, addr, esr, regs); +} + +#define VM_FAULT_BADMAP 0x010000 +#define VM_FAULT_BADACCESS 0x020000 + +#define ESR_LNX_EXEC (1 << 24) + +static int __do_page_fault(struct mm_struct *mm, unsigned long addr, + unsigned int mm_flags, unsigned long vm_flags, + struct task_struct *tsk) +{ + struct vm_area_struct *vma; + int fault; + + vma = find_vma(mm, addr); + fault = VM_FAULT_BADMAP; + if (unlikely(!vma)) + goto out; + if (unlikely(vma->vm_start > addr)) + goto check_stack; + + /* + * Ok, we have a good vm_area for this memory access, so we can handle + * it. + */ +good_area: + /* + * Check that the permissions on the VMA allow for the fault which + * occurred. If we encountered a write or exec fault, we must have + * appropriate permissions, otherwise we allow any permission. + */ + if (!(vma->vm_flags & vm_flags)) { + fault = VM_FAULT_BADACCESS; + goto out; + } + + return handle_mm_fault(mm, vma, addr & PAGE_MASK, mm_flags); + +check_stack: + if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr)) + goto good_area; +out: + return fault; +} + +static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, + struct pt_regs *regs) +{ + struct task_struct *tsk; + struct mm_struct *mm; + int fault, sig, code; + unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC; + unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; + + tsk = current; + mm = tsk->mm; + + /* Enable interrupts if they were enabled in the parent context. */ + if (interrupts_enabled(regs)) + local_irq_enable(); + + /* + * If we're in an interrupt or have no user context, we must not take + * the fault. + */ + if (in_atomic() || !mm) + goto no_context; + + if (user_mode(regs)) + mm_flags |= FAULT_FLAG_USER; + + if (esr & ESR_LNX_EXEC) { + vm_flags = VM_EXEC; + } else if ((esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM)) { + vm_flags = VM_WRITE; + mm_flags |= FAULT_FLAG_WRITE; + } + + /* + * As per x86, we may deadlock here. However, since the kernel only + * validly references user space from well defined areas of the code, + * we can bug out early if this is from code which shouldn't. + */ + if (!down_read_trylock(&mm->mmap_sem)) { + if (!user_mode(regs) && !search_exception_tables(regs->pc)) + goto no_context; +retry: + down_read(&mm->mmap_sem); + } else { + /* + * The above down_read_trylock() might have succeeded in which + * case, we'll have missed the might_sleep() from down_read(). + */ + might_sleep(); +#ifdef CONFIG_DEBUG_VM + if (!user_mode(regs) && !search_exception_tables(regs->pc)) + goto no_context; +#endif + } + + fault = __do_page_fault(mm, addr, mm_flags, vm_flags, tsk); + + /* + * If we need to retry but a fatal signal is pending, handle the + * signal first. We do not need to release the mmap_sem because it + * would already be released in __lock_page_or_retry in mm/filemap.c. + */ + if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) + return 0; + + /* + * Major/minor page fault accounting is only done on the initial + * attempt. If we go through a retry, it is extremely likely that the + * page will be found in page cache at that point. + */ + + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); + if (mm_flags & FAULT_FLAG_ALLOW_RETRY) { + if (fault & VM_FAULT_MAJOR) { + tsk->maj_flt++; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, + addr); + } else { + tsk->min_flt++; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, + addr); + } + if (fault & VM_FAULT_RETRY) { + /* + * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of + * starvation. + */ + mm_flags &= ~FAULT_FLAG_ALLOW_RETRY; + goto retry; + } + } + + up_read(&mm->mmap_sem); + + /* + * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR + */ + if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | + VM_FAULT_BADACCESS)))) + return 0; + + /* + * If we are in kernel mode at this point, we have no context to + * handle this fault with. + */ + if (!user_mode(regs)) + goto no_context; + + if (fault & VM_FAULT_OOM) { + /* + * We ran out of memory, call the OOM killer, and return to + * userspace (which will retry the fault, or kill us if we got + * oom-killed). + */ + pagefault_out_of_memory(); + return 0; + } + + if (fault & VM_FAULT_SIGBUS) { + /* + * We had some memory, but were unable to successfully fix up + * this page fault. + */ + sig = SIGBUS; + code = BUS_ADRERR; + } else { + /* + * Something tried to access memory that isn't in our memory + * map. + */ + sig = SIGSEGV; + code = fault == VM_FAULT_BADACCESS ? + SEGV_ACCERR : SEGV_MAPERR; + } + + __do_user_fault(tsk, addr, esr, sig, code, regs); + return 0; + +no_context: + __do_kernel_fault(mm, addr, esr, regs); + return 0; +} + +/* + * First Level Translation Fault Handler + * + * We enter here because the first level page table doesn't contain a valid + * entry for the address. + * + * If the address is in kernel space (>= TASK_SIZE), then we are probably + * faulting in the vmalloc() area. + * + * If the init_task's first level page tables contains the relevant entry, we + * copy the it to this task. If not, we send the process a signal, fixup the + * exception, or oops the kernel. + * + * NOTE! We MUST NOT take any locks for this case. We may be in an interrupt + * or a critical region, and should only copy the information from the master + * page table, nothing more. + */ +static int __kprobes do_translation_fault(unsigned long addr, + unsigned int esr, + struct pt_regs *regs) +{ + if (addr < TASK_SIZE) + return do_page_fault(addr, esr, regs); + + do_bad_area(addr, esr, regs); + return 0; +} + +/* + * This abort handler always returns "fault". + */ +static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs) +{ + return 1; +} + +static struct fault_info { + int (*fn)(unsigned long addr, unsigned int esr, struct pt_regs *regs); + int sig; + int code; + const char *name; +} fault_info[] = { + { do_bad, SIGBUS, 0, "ttbr address size fault" }, + { do_bad, SIGBUS, 0, "level 1 address size fault" }, + { do_bad, SIGBUS, 0, "level 2 address size fault" }, + { do_bad, SIGBUS, 0, "level 3 address size fault" }, + { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 0 translation fault" }, + { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" }, + { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" }, + { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" }, + { do_bad, SIGBUS, 0, "reserved access flag fault" }, + { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" }, + { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" }, + { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" }, + { do_bad, SIGBUS, 0, "reserved permission fault" }, + { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" }, + { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" }, + { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" }, + { do_bad, SIGBUS, 0, "synchronous external abort" }, + { do_bad, SIGBUS, 0, "asynchronous external abort" }, + { do_bad, SIGBUS, 0, "unknown 18" }, + { do_bad, SIGBUS, 0, "unknown 19" }, + { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, + { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, + { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, + { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, + { do_bad, SIGBUS, 0, "synchronous parity error" }, + { do_bad, SIGBUS, 0, "asynchronous parity error" }, + { do_bad, SIGBUS, 0, "unknown 26" }, + { do_bad, SIGBUS, 0, "unknown 27" }, + { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, + { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, + { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, + { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, + { do_bad, SIGBUS, 0, "unknown 32" }, + { do_bad, SIGBUS, BUS_ADRALN, "alignment fault" }, + { do_bad, SIGBUS, 0, "debug event" }, + { do_bad, SIGBUS, 0, "unknown 35" }, + { do_bad, SIGBUS, 0, "unknown 36" }, + { do_bad, SIGBUS, 0, "unknown 37" }, + { do_bad, SIGBUS, 0, "unknown 38" }, + { do_bad, SIGBUS, 0, "unknown 39" }, + { do_bad, SIGBUS, 0, "unknown 40" }, + { do_bad, SIGBUS, 0, "unknown 41" }, + { do_bad, SIGBUS, 0, "unknown 42" }, + { do_bad, SIGBUS, 0, "unknown 43" }, + { do_bad, SIGBUS, 0, "unknown 44" }, + { do_bad, SIGBUS, 0, "unknown 45" }, + { do_bad, SIGBUS, 0, "unknown 46" }, + { do_bad, SIGBUS, 0, "unknown 47" }, + { do_bad, SIGBUS, 0, "unknown 48" }, + { do_bad, SIGBUS, 0, "unknown 49" }, + { do_bad, SIGBUS, 0, "unknown 50" }, + { do_bad, SIGBUS, 0, "unknown 51" }, + { do_bad, SIGBUS, 0, "implementation fault (lockdown abort)" }, + { do_bad, SIGBUS, 0, "unknown 53" }, + { do_bad, SIGBUS, 0, "unknown 54" }, + { do_bad, SIGBUS, 0, "unknown 55" }, + { do_bad, SIGBUS, 0, "unknown 56" }, + { do_bad, SIGBUS, 0, "unknown 57" }, + { do_bad, SIGBUS, 0, "implementation fault (coprocessor abort)" }, + { do_bad, SIGBUS, 0, "unknown 59" }, + { do_bad, SIGBUS, 0, "unknown 60" }, + { do_bad, SIGBUS, 0, "unknown 61" }, + { do_bad, SIGBUS, 0, "unknown 62" }, + { do_bad, SIGBUS, 0, "unknown 63" }, +}; + +static const char *fault_name(unsigned int esr) +{ + const struct fault_info *inf = fault_info + (esr & 63); + return inf->name; +} + +/* + * Dispatch a data abort to the relevant handler. + */ +asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr, + struct pt_regs *regs) +{ + const struct fault_info *inf = fault_info + (esr & 63); + struct siginfo info; + + if (!inf->fn(addr, esr, regs)) + return; + + pr_alert("Unhandled fault: %s (0x%08x) at 0x%016lx\n", + inf->name, esr, addr); + + info.si_signo = inf->sig; + info.si_errno = 0; + info.si_code = inf->code; + info.si_addr = (void __user *)addr; + arm64_notify_die("", regs, &info, esr); +} + +/* + * Handle stack alignment exceptions. + */ +asmlinkage void __exception do_sp_pc_abort(unsigned long addr, + unsigned int esr, + struct pt_regs *regs) +{ + struct siginfo info; + + info.si_signo = SIGBUS; + info.si_errno = 0; + info.si_code = BUS_ADRALN; + info.si_addr = (void __user *)addr; + arm64_notify_die("", regs, &info, esr); +} + +static struct fault_info debug_fault_info[] = { + { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware breakpoint" }, + { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware single-step" }, + { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware watchpoint" }, + { do_bad, SIGBUS, 0, "unknown 3" }, + { do_bad, SIGTRAP, TRAP_BRKPT, "aarch32 BKPT" }, + { do_bad, SIGTRAP, 0, "aarch32 vector catch" }, + { do_bad, SIGTRAP, TRAP_BRKPT, "aarch64 BRK" }, + { do_bad, SIGBUS, 0, "unknown 7" }, +}; + +void __init hook_debug_fault_code(int nr, + int (*fn)(unsigned long, unsigned int, struct pt_regs *), + int sig, int code, const char *name) +{ + BUG_ON(nr < 0 || nr >= ARRAY_SIZE(debug_fault_info)); + + debug_fault_info[nr].fn = fn; + debug_fault_info[nr].sig = sig; + debug_fault_info[nr].code = code; + debug_fault_info[nr].name = name; +} + +asmlinkage int __exception do_debug_exception(unsigned long addr, + unsigned int esr, + struct pt_regs *regs) +{ + const struct fault_info *inf = debug_fault_info + DBG_ESR_EVT(esr); + struct siginfo info; + + if (!inf->fn(addr, esr, regs)) + return 1; + + pr_alert("Unhandled debug exception: %s (0x%08x) at 0x%016lx\n", + inf->name, esr, addr); + + info.si_signo = inf->sig; + info.si_errno = 0; + info.si_code = inf->code; + info.si_addr = (void __user *)addr; + arm64_notify_die("", regs, &info, 0); + + return 0; +} diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c new file mode 100644 index 000000000..b6f14e8d2 --- /dev/null +++ b/arch/arm64/mm/flush.c @@ -0,0 +1,122 @@ +/* + * Based on arch/arm/mm/flush.c + * + * Copyright (C) 1995-2002 Russell King + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/export.h> +#include <linux/mm.h> +#include <linux/pagemap.h> + +#include <asm/cacheflush.h> +#include <asm/cachetype.h> +#include <asm/tlbflush.h> + +#include "mm.h" + +void flush_cache_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end) +{ + if (vma->vm_flags & VM_EXEC) + __flush_icache_all(); +} + +static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, + unsigned long uaddr, void *kaddr, + unsigned long len) +{ + if (vma->vm_flags & VM_EXEC) { + unsigned long addr = (unsigned long)kaddr; + if (icache_is_aliasing()) { + __flush_dcache_area(kaddr, len); + __flush_icache_all(); + } else { + flush_icache_range(addr, addr + len); + } + } +} + +/* + * Copy user data from/to a page which is mapped into a different processes + * address space. Really, we want to allow our "user space" model to handle + * this. + * + * Note that this code needs to run on the current CPU. + */ +void copy_to_user_page(struct vm_area_struct *vma, struct page *page, + unsigned long uaddr, void *dst, const void *src, + unsigned long len) +{ +#ifdef CONFIG_SMP + preempt_disable(); +#endif + memcpy(dst, src, len); + flush_ptrace_access(vma, page, uaddr, dst, len); +#ifdef CONFIG_SMP + preempt_enable(); +#endif +} + +void __sync_icache_dcache(pte_t pte, unsigned long addr) +{ + struct page *page = pte_page(pte); + + /* no flushing needed for anonymous pages */ + if (!page_mapping(page)) + return; + + if (!test_and_set_bit(PG_dcache_clean, &page->flags)) { + __flush_dcache_area(page_address(page), + PAGE_SIZE << compound_order(page)); + __flush_icache_all(); + } else if (icache_is_aivivt()) { + __flush_icache_all(); + } +} + +/* + * This function is called when a page has been modified by the kernel. Mark + * it as dirty for later flushing when mapped in user space (if executable, + * see __sync_icache_dcache). + */ +void flush_dcache_page(struct page *page) +{ + if (test_bit(PG_dcache_clean, &page->flags)) + clear_bit(PG_dcache_clean, &page->flags); +} +EXPORT_SYMBOL(flush_dcache_page); + +/* + * Additional functions defined in assembly. + */ +EXPORT_SYMBOL(flush_cache_all); +EXPORT_SYMBOL(flush_icache_range); + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#ifdef CONFIG_HAVE_RCU_TABLE_FREE +void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, + pmd_t *pmdp) +{ + pmd_t pmd = pmd_mksplitting(*pmdp); + + VM_BUG_ON(address & ~PMD_MASK); + set_pmd_at(vma->vm_mm, address, pmdp, pmd); + + /* dummy IPI to serialise against fast_gup */ + kick_all_cpus_sync(); +} +#endif /* CONFIG_HAVE_RCU_TABLE_FREE */ +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c new file mode 100644 index 000000000..0eeb4f093 --- /dev/null +++ b/arch/arm64/mm/hugetlbpage.c @@ -0,0 +1,68 @@ +/* + * arch/arm64/mm/hugetlbpage.c + * + * Copyright (C) 2013 Linaro Ltd. + * + * Based on arch/x86/mm/hugetlbpage.c. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/init.h> +#include <linux/fs.h> +#include <linux/mm.h> +#include <linux/hugetlb.h> +#include <linux/pagemap.h> +#include <linux/err.h> +#include <linux/sysctl.h> +#include <asm/mman.h> +#include <asm/tlb.h> +#include <asm/tlbflush.h> +#include <asm/pgalloc.h> + +#ifndef CONFIG_ARCH_WANT_HUGE_PMD_SHARE +int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) +{ + return 0; +} +#endif + +int pmd_huge(pmd_t pmd) +{ + return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT); +} + +int pud_huge(pud_t pud) +{ +#ifndef __PAGETABLE_PMD_FOLDED + return pud_val(pud) && !(pud_val(pud) & PUD_TABLE_BIT); +#else + return 0; +#endif +} + +static __init int setup_hugepagesz(char *opt) +{ + unsigned long ps = memparse(opt, &opt); + if (ps == PMD_SIZE) { + hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT); + } else if (ps == PUD_SIZE) { + hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); + } else { + pr_err("hugepagesz: Unsupported page size %lu M\n", ps >> 20); + return 0; + } + return 1; +} +__setup("hugepagesz=", setup_hugepagesz); diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c new file mode 100644 index 000000000..ad87ce826 --- /dev/null +++ b/arch/arm64/mm/init.c @@ -0,0 +1,376 @@ +/* + * Based on arch/arm/mm/init.c + * + * Copyright (C) 1995-2005 Russell King + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/kernel.h> +#include <linux/export.h> +#include <linux/errno.h> +#include <linux/swap.h> +#include <linux/init.h> +#include <linux/bootmem.h> +#include <linux/mman.h> +#include <linux/nodemask.h> +#include <linux/initrd.h> +#include <linux/gfp.h> +#include <linux/memblock.h> +#include <linux/sort.h> +#include <linux/of_fdt.h> +#include <linux/dma-mapping.h> +#include <linux/dma-contiguous.h> +#include <linux/efi.h> +#include <linux/swiotlb.h> + +#include <asm/fixmap.h> +#include <asm/memory.h> +#include <asm/sections.h> +#include <asm/setup.h> +#include <asm/sizes.h> +#include <asm/tlb.h> +#include <asm/alternative.h> + +#include "mm.h" + +phys_addr_t memstart_addr __read_mostly = 0; +phys_addr_t arm64_dma_phys_limit __read_mostly; + +#ifdef CONFIG_BLK_DEV_INITRD +static int __init early_initrd(char *p) +{ + unsigned long start, size; + char *endp; + + start = memparse(p, &endp); + if (*endp == ',') { + size = memparse(endp + 1, NULL); + + initrd_start = (unsigned long)__va(start); + initrd_end = (unsigned long)__va(start + size); + } + return 0; +} +early_param("initrd", early_initrd); +#endif + +/* + * Return the maximum physical address for ZONE_DMA (DMA_BIT_MASK(32)). It + * currently assumes that for memory starting above 4G, 32-bit devices will + * use a DMA offset. + */ +static phys_addr_t max_zone_dma_phys(void) +{ + phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32); + return min(offset + (1ULL << 32), memblock_end_of_DRAM()); +} + +static void __init zone_sizes_init(unsigned long min, unsigned long max) +{ + struct memblock_region *reg; + unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; + unsigned long max_dma = min; + + memset(zone_size, 0, sizeof(zone_size)); + + /* 4GB maximum for 32-bit only capable devices */ + if (IS_ENABLED(CONFIG_ZONE_DMA)) { + max_dma = PFN_DOWN(arm64_dma_phys_limit); + zone_size[ZONE_DMA] = max_dma - min; + } + zone_size[ZONE_NORMAL] = max - max_dma; + + memcpy(zhole_size, zone_size, sizeof(zhole_size)); + + for_each_memblock(memory, reg) { + unsigned long start = memblock_region_memory_base_pfn(reg); + unsigned long end = memblock_region_memory_end_pfn(reg); + + if (start >= max) + continue; + + if (IS_ENABLED(CONFIG_ZONE_DMA) && start < max_dma) { + unsigned long dma_end = min(end, max_dma); + zhole_size[ZONE_DMA] -= dma_end - start; + } + + if (end > max_dma) { + unsigned long normal_end = min(end, max); + unsigned long normal_start = max(start, max_dma); + zhole_size[ZONE_NORMAL] -= normal_end - normal_start; + } + } + + free_area_init_node(0, zone_size, min, zhole_size); +} + +#ifdef CONFIG_HAVE_ARCH_PFN_VALID +int pfn_valid(unsigned long pfn) +{ + return memblock_is_memory(pfn << PAGE_SHIFT); +} +EXPORT_SYMBOL(pfn_valid); +#endif + +#ifndef CONFIG_SPARSEMEM +static void arm64_memory_present(void) +{ +} +#else +static void arm64_memory_present(void) +{ + struct memblock_region *reg; + + for_each_memblock(memory, reg) + memory_present(0, memblock_region_memory_base_pfn(reg), + memblock_region_memory_end_pfn(reg)); +} +#endif + +static phys_addr_t memory_limit = (phys_addr_t)ULLONG_MAX; + +/* + * Limit the memory size that was specified via FDT. + */ +static int __init early_mem(char *p) +{ + if (!p) + return 1; + + memory_limit = memparse(p, &p) & PAGE_MASK; + pr_notice("Memory limited to %lldMB\n", memory_limit >> 20); + + return 0; +} +early_param("mem", early_mem); + +void __init arm64_memblock_init(void) +{ + memblock_enforce_memory_limit(memory_limit); + + /* + * Register the kernel text, kernel data, initrd, and initial + * pagetables with memblock. + */ + memblock_reserve(__pa(_text), _end - _text); +#ifdef CONFIG_BLK_DEV_INITRD + if (initrd_start) + memblock_reserve(__virt_to_phys(initrd_start), initrd_end - initrd_start); +#endif + + early_init_fdt_scan_reserved_mem(); + + /* 4GB maximum for 32-bit only capable devices */ + if (IS_ENABLED(CONFIG_ZONE_DMA)) + arm64_dma_phys_limit = max_zone_dma_phys(); + else + arm64_dma_phys_limit = PHYS_MASK + 1; + dma_contiguous_reserve(arm64_dma_phys_limit); + + memblock_allow_resize(); + memblock_dump_all(); +} + +void __init bootmem_init(void) +{ + unsigned long min, max; + + min = PFN_UP(memblock_start_of_DRAM()); + max = PFN_DOWN(memblock_end_of_DRAM()); + + early_memtest(min << PAGE_SHIFT, max << PAGE_SHIFT); + + /* + * Sparsemem tries to allocate bootmem in memory_present(), so must be + * done after the fixed reservations. + */ + arm64_memory_present(); + + sparse_init(); + zone_sizes_init(min, max); + + high_memory = __va((max << PAGE_SHIFT) - 1) + 1; + max_pfn = max_low_pfn = max; +} + +#ifndef CONFIG_SPARSEMEM_VMEMMAP +static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn) +{ + struct page *start_pg, *end_pg; + unsigned long pg, pgend; + + /* + * Convert start_pfn/end_pfn to a struct page pointer. + */ + start_pg = pfn_to_page(start_pfn - 1) + 1; + end_pg = pfn_to_page(end_pfn - 1) + 1; + + /* + * Convert to physical addresses, and round start upwards and end + * downwards. + */ + pg = (unsigned long)PAGE_ALIGN(__pa(start_pg)); + pgend = (unsigned long)__pa(end_pg) & PAGE_MASK; + + /* + * If there are free pages between these, free the section of the + * memmap array. + */ + if (pg < pgend) + free_bootmem(pg, pgend - pg); +} + +/* + * The mem_map array can get very big. Free the unused area of the memory map. + */ +static void __init free_unused_memmap(void) +{ + unsigned long start, prev_end = 0; + struct memblock_region *reg; + + for_each_memblock(memory, reg) { + start = __phys_to_pfn(reg->base); + +#ifdef CONFIG_SPARSEMEM + /* + * Take care not to free memmap entries that don't exist due + * to SPARSEMEM sections which aren't present. + */ + start = min(start, ALIGN(prev_end, PAGES_PER_SECTION)); +#endif + /* + * If we had a previous bank, and there is a space between the + * current bank and the previous, free it. + */ + if (prev_end && prev_end < start) + free_memmap(prev_end, start); + + /* + * Align up here since the VM subsystem insists that the + * memmap entries are valid from the bank end aligned to + * MAX_ORDER_NR_PAGES. + */ + prev_end = ALIGN(__phys_to_pfn(reg->base + reg->size), + MAX_ORDER_NR_PAGES); + } + +#ifdef CONFIG_SPARSEMEM + if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) + free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION)); +#endif +} +#endif /* !CONFIG_SPARSEMEM_VMEMMAP */ + +/* + * mem_init() marks the free areas in the mem_map and tells us how much memory + * is free. This is done after various parts of the system have claimed their + * memory after the kernel image. + */ +void __init mem_init(void) +{ + swiotlb_init(1); + + set_max_mapnr(pfn_to_page(max_pfn) - mem_map); + +#ifndef CONFIG_SPARSEMEM_VMEMMAP + free_unused_memmap(); +#endif + /* this will put all unused low memory onto the freelists */ + free_all_bootmem(); + + mem_init_print_info(NULL); + +#define MLK(b, t) b, t, ((t) - (b)) >> 10 +#define MLM(b, t) b, t, ((t) - (b)) >> 20 +#define MLG(b, t) b, t, ((t) - (b)) >> 30 +#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K) + + pr_notice("Virtual kernel memory layout:\n" + " vmalloc : 0x%16lx - 0x%16lx (%6ld GB)\n" +#ifdef CONFIG_SPARSEMEM_VMEMMAP + " vmemmap : 0x%16lx - 0x%16lx (%6ld GB maximum)\n" + " 0x%16lx - 0x%16lx (%6ld MB actual)\n" +#endif + " fixed : 0x%16lx - 0x%16lx (%6ld KB)\n" + " PCI I/O : 0x%16lx - 0x%16lx (%6ld MB)\n" + " modules : 0x%16lx - 0x%16lx (%6ld MB)\n" + " memory : 0x%16lx - 0x%16lx (%6ld MB)\n" + " .init : 0x%p" " - 0x%p" " (%6ld KB)\n" + " .text : 0x%p" " - 0x%p" " (%6ld KB)\n" + " .data : 0x%p" " - 0x%p" " (%6ld KB)\n", + MLG(VMALLOC_START, VMALLOC_END), +#ifdef CONFIG_SPARSEMEM_VMEMMAP + MLG((unsigned long)vmemmap, + (unsigned long)vmemmap + VMEMMAP_SIZE), + MLM((unsigned long)virt_to_page(PAGE_OFFSET), + (unsigned long)virt_to_page(high_memory)), +#endif + MLK(FIXADDR_START, FIXADDR_TOP), + MLM(PCI_IO_START, PCI_IO_END), + MLM(MODULES_VADDR, MODULES_END), + MLM(PAGE_OFFSET, (unsigned long)high_memory), + MLK_ROUNDUP(__init_begin, __init_end), + MLK_ROUNDUP(_text, _etext), + MLK_ROUNDUP(_sdata, _edata)); + +#undef MLK +#undef MLM +#undef MLK_ROUNDUP + + /* + * Check boundaries twice: Some fundamental inconsistencies can be + * detected at build time already. + */ +#ifdef CONFIG_COMPAT + BUILD_BUG_ON(TASK_SIZE_32 > TASK_SIZE_64); +#endif + BUILD_BUG_ON(TASK_SIZE_64 > MODULES_VADDR); + BUG_ON(TASK_SIZE_64 > MODULES_VADDR); + + if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { + extern int sysctl_overcommit_memory; + /* + * On a machine this small we won't get anywhere without + * overcommit, so turn it on by default. + */ + sysctl_overcommit_memory = OVERCOMMIT_ALWAYS; + } +} + +void free_initmem(void) +{ + fixup_init(); + free_initmem_default(0); + free_alternatives_memory(); +} + +#ifdef CONFIG_BLK_DEV_INITRD + +static int keep_initrd; + +void free_initrd_mem(unsigned long start, unsigned long end) +{ + if (!keep_initrd) + free_reserved_area((void *)start, (void *)end, 0, "initrd"); +} + +static int __init keepinitrd_setup(char *__unused) +{ + keep_initrd = 1; + return 1; +} + +__setup("keepinitrd", keepinitrd_setup); +#endif diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c new file mode 100644 index 000000000..01e88c8bc --- /dev/null +++ b/arch/arm64/mm/ioremap.c @@ -0,0 +1,113 @@ +/* + * Based on arch/arm/mm/ioremap.c + * + * (C) Copyright 1995 1996 Linus Torvalds + * Hacked for ARM by Phil Blundell <philb@gnu.org> + * Hacked to allow all architectures to build, and various cleanups + * by Russell King + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/export.h> +#include <linux/mm.h> +#include <linux/vmalloc.h> +#include <linux/io.h> + +#include <asm/fixmap.h> +#include <asm/tlbflush.h> +#include <asm/pgalloc.h> + +static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size, + pgprot_t prot, void *caller) +{ + unsigned long last_addr; + unsigned long offset = phys_addr & ~PAGE_MASK; + int err; + unsigned long addr; + struct vm_struct *area; + + /* + * Page align the mapping address and size, taking account of any + * offset. + */ + phys_addr &= PAGE_MASK; + size = PAGE_ALIGN(size + offset); + + /* + * Don't allow wraparound, zero size or outside PHYS_MASK. + */ + last_addr = phys_addr + size - 1; + if (!size || last_addr < phys_addr || (last_addr & ~PHYS_MASK)) + return NULL; + + /* + * Don't allow RAM to be mapped. + */ + if (WARN_ON(pfn_valid(__phys_to_pfn(phys_addr)))) + return NULL; + + area = get_vm_area_caller(size, VM_IOREMAP, caller); + if (!area) + return NULL; + addr = (unsigned long)area->addr; + area->phys_addr = phys_addr; + + err = ioremap_page_range(addr, addr + size, phys_addr, prot); + if (err) { + vunmap((void *)addr); + return NULL; + } + + return (void __iomem *)(offset + addr); +} + +void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot) +{ + return __ioremap_caller(phys_addr, size, prot, + __builtin_return_address(0)); +} +EXPORT_SYMBOL(__ioremap); + +void __iounmap(volatile void __iomem *io_addr) +{ + unsigned long addr = (unsigned long)io_addr & PAGE_MASK; + + /* + * We could get an address outside vmalloc range in case + * of ioremap_cache() reusing a RAM mapping. + */ + if (VMALLOC_START <= addr && addr < VMALLOC_END) + vunmap((void *)addr); +} +EXPORT_SYMBOL(__iounmap); + +void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size) +{ + /* For normal memory we already have a cacheable mapping. */ + if (pfn_valid(__phys_to_pfn(phys_addr))) + return (void __iomem *)__phys_to_virt(phys_addr); + + return __ioremap_caller(phys_addr, size, __pgprot(PROT_NORMAL), + __builtin_return_address(0)); +} +EXPORT_SYMBOL(ioremap_cache); + +/* + * Must be called after early_fixmap_init + */ +void __init early_ioremap_init(void) +{ + early_ioremap_setup(); +} diff --git a/arch/arm64/mm/mm.h b/arch/arm64/mm/mm.h new file mode 100644 index 000000000..ef47d99b5 --- /dev/null +++ b/arch/arm64/mm/mm.h @@ -0,0 +1,3 @@ +extern void __init bootmem_init(void); + +void fixup_init(void); diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c new file mode 100644 index 000000000..ed177475d --- /dev/null +++ b/arch/arm64/mm/mmap.c @@ -0,0 +1,138 @@ +/* + * Based on arch/arm/mm/mmap.c + * + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/elf.h> +#include <linux/fs.h> +#include <linux/mm.h> +#include <linux/mman.h> +#include <linux/export.h> +#include <linux/shm.h> +#include <linux/sched.h> +#include <linux/io.h> +#include <linux/personality.h> +#include <linux/random.h> + +#include <asm/cputype.h> + +/* + * Leave enough space between the mmap area and the stack to honour ulimit in + * the face of randomisation. + */ +#define MIN_GAP (SZ_128M + ((STACK_RND_MASK << PAGE_SHIFT) + 1)) +#define MAX_GAP (STACK_TOP/6*5) + +static int mmap_is_legacy(void) +{ + if (current->personality & ADDR_COMPAT_LAYOUT) + return 1; + + if (rlimit(RLIMIT_STACK) == RLIM_INFINITY) + return 1; + + return sysctl_legacy_va_layout; +} + +unsigned long arch_mmap_rnd(void) +{ + unsigned long rnd; + + rnd = (unsigned long)get_random_int() & STACK_RND_MASK; + + return rnd << PAGE_SHIFT; +} + +static unsigned long mmap_base(unsigned long rnd) +{ + unsigned long gap = rlimit(RLIMIT_STACK); + + if (gap < MIN_GAP) + gap = MIN_GAP; + else if (gap > MAX_GAP) + gap = MAX_GAP; + + return PAGE_ALIGN(STACK_TOP - gap - rnd); +} + +/* + * This function, called very early during the creation of a new process VM + * image, sets up which VM layout function to use: + */ +void arch_pick_mmap_layout(struct mm_struct *mm) +{ + unsigned long random_factor = 0UL; + + if (current->flags & PF_RANDOMIZE) + random_factor = arch_mmap_rnd(); + + /* + * Fall back to the standard layout if the personality bit is set, or + * if the expected stack growth is unlimited: + */ + if (mmap_is_legacy()) { + mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; + mm->get_unmapped_area = arch_get_unmapped_area; + } else { + mm->mmap_base = mmap_base(random_factor); + mm->get_unmapped_area = arch_get_unmapped_area_topdown; + } +} +EXPORT_SYMBOL_GPL(arch_pick_mmap_layout); + + +/* + * You really shouldn't be using read() or write() on /dev/mem. This might go + * away in the future. + */ +int valid_phys_addr_range(phys_addr_t addr, size_t size) +{ + if (addr < PHYS_OFFSET) + return 0; + if (addr + size > __pa(high_memory - 1) + 1) + return 0; + + return 1; +} + +/* + * Do not allow /dev/mem mappings beyond the supported physical range. + */ +int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) +{ + return !(((pfn << PAGE_SHIFT) + size) & ~PHYS_MASK); +} + +#ifdef CONFIG_STRICT_DEVMEM + +#include <linux/ioport.h> + +/* + * devmem_is_allowed() checks to see if /dev/mem access to a certain address + * is valid. The argument is a physical page number. We mimic x86 here by + * disallowing access to system RAM as well as device-exclusive MMIO regions. + * This effectively disable read()/write() on /dev/mem. + */ +int devmem_is_allowed(unsigned long pfn) +{ + if (iomem_is_exclusive(pfn << PAGE_SHIFT)) + return 0; + if (!page_is_ram(pfn)) + return 1; + return 0; +} + +#endif diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c new file mode 100644 index 000000000..5b8b66442 --- /dev/null +++ b/arch/arm64/mm/mmu.c @@ -0,0 +1,645 @@ +/* + * Based on arch/arm/mm/mmu.c + * + * Copyright (C) 1995-2005 Russell King + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/export.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/mman.h> +#include <linux/nodemask.h> +#include <linux/memblock.h> +#include <linux/fs.h> +#include <linux/io.h> +#include <linux/slab.h> +#include <linux/stop_machine.h> + +#include <asm/cputype.h> +#include <asm/fixmap.h> +#include <asm/sections.h> +#include <asm/setup.h> +#include <asm/sizes.h> +#include <asm/tlb.h> +#include <asm/memblock.h> +#include <asm/mmu_context.h> + +#include "mm.h" + +u64 idmap_t0sz = TCR_T0SZ(VA_BITS); + +/* + * Empty_zero_page is a special page that is used for zero-initialized data + * and COW. + */ +struct page *empty_zero_page; +EXPORT_SYMBOL(empty_zero_page); + +pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, + unsigned long size, pgprot_t vma_prot) +{ + if (!pfn_valid(pfn)) + return pgprot_noncached(vma_prot); + else if (file->f_flags & O_SYNC) + return pgprot_writecombine(vma_prot); + return vma_prot; +} +EXPORT_SYMBOL(phys_mem_access_prot); + +static void __init *early_alloc(unsigned long sz) +{ + void *ptr = __va(memblock_alloc(sz, sz)); + BUG_ON(!ptr); + memset(ptr, 0, sz); + return ptr; +} + +/* + * remap a PMD into pages + */ +static void split_pmd(pmd_t *pmd, pte_t *pte) +{ + unsigned long pfn = pmd_pfn(*pmd); + int i = 0; + + do { + /* + * Need to have the least restrictive permissions available + * permissions will be fixed up later + */ + set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC)); + pfn++; + } while (pte++, i++, i < PTRS_PER_PTE); +} + +static void alloc_init_pte(pmd_t *pmd, unsigned long addr, + unsigned long end, unsigned long pfn, + pgprot_t prot, + void *(*alloc)(unsigned long size)) +{ + pte_t *pte; + + if (pmd_none(*pmd) || pmd_sect(*pmd)) { + pte = alloc(PTRS_PER_PTE * sizeof(pte_t)); + if (pmd_sect(*pmd)) + split_pmd(pmd, pte); + __pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE); + flush_tlb_all(); + } + BUG_ON(pmd_bad(*pmd)); + + pte = pte_offset_kernel(pmd, addr); + do { + set_pte(pte, pfn_pte(pfn, prot)); + pfn++; + } while (pte++, addr += PAGE_SIZE, addr != end); +} + +void split_pud(pud_t *old_pud, pmd_t *pmd) +{ + unsigned long addr = pud_pfn(*old_pud) << PAGE_SHIFT; + pgprot_t prot = __pgprot(pud_val(*old_pud) ^ addr); + int i = 0; + + do { + set_pmd(pmd, __pmd(addr | prot)); + addr += PMD_SIZE; + } while (pmd++, i++, i < PTRS_PER_PMD); +} + +static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud, + unsigned long addr, unsigned long end, + phys_addr_t phys, pgprot_t prot, + void *(*alloc)(unsigned long size)) +{ + pmd_t *pmd; + unsigned long next; + + /* + * Check for initial section mappings in the pgd/pud and remove them. + */ + if (pud_none(*pud) || pud_sect(*pud)) { + pmd = alloc(PTRS_PER_PMD * sizeof(pmd_t)); + if (pud_sect(*pud)) { + /* + * need to have the 1G of mappings continue to be + * present + */ + split_pud(pud, pmd); + } + pud_populate(mm, pud, pmd); + flush_tlb_all(); + } + BUG_ON(pud_bad(*pud)); + + pmd = pmd_offset(pud, addr); + do { + next = pmd_addr_end(addr, end); + /* try section mapping first */ + if (((addr | next | phys) & ~SECTION_MASK) == 0) { + pmd_t old_pmd =*pmd; + set_pmd(pmd, __pmd(phys | + pgprot_val(mk_sect_prot(prot)))); + /* + * Check for previous table entries created during + * boot (__create_page_tables) and flush them. + */ + if (!pmd_none(old_pmd)) { + flush_tlb_all(); + if (pmd_table(old_pmd)) { + phys_addr_t table = __pa(pte_offset_map(&old_pmd, 0)); + if (!WARN_ON_ONCE(slab_is_available())) + memblock_free(table, PAGE_SIZE); + } + } + } else { + alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys), + prot, alloc); + } + phys += next - addr; + } while (pmd++, addr = next, addr != end); +} + +static inline bool use_1G_block(unsigned long addr, unsigned long next, + unsigned long phys) +{ + if (PAGE_SHIFT != 12) + return false; + + if (((addr | next | phys) & ~PUD_MASK) != 0) + return false; + + return true; +} + +static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd, + unsigned long addr, unsigned long end, + phys_addr_t phys, pgprot_t prot, + void *(*alloc)(unsigned long size)) +{ + pud_t *pud; + unsigned long next; + + if (pgd_none(*pgd)) { + pud = alloc(PTRS_PER_PUD * sizeof(pud_t)); + pgd_populate(mm, pgd, pud); + } + BUG_ON(pgd_bad(*pgd)); + + pud = pud_offset(pgd, addr); + do { + next = pud_addr_end(addr, end); + + /* + * For 4K granule only, attempt to put down a 1GB block + */ + if (use_1G_block(addr, next, phys)) { + pud_t old_pud = *pud; + set_pud(pud, __pud(phys | + pgprot_val(mk_sect_prot(prot)))); + + /* + * If we have an old value for a pud, it will + * be pointing to a pmd table that we no longer + * need (from swapper_pg_dir). + * + * Look up the old pmd table and free it. + */ + if (!pud_none(old_pud)) { + flush_tlb_all(); + if (pud_table(old_pud)) { + phys_addr_t table = __pa(pmd_offset(&old_pud, 0)); + if (!WARN_ON_ONCE(slab_is_available())) + memblock_free(table, PAGE_SIZE); + } + } + } else { + alloc_init_pmd(mm, pud, addr, next, phys, prot, alloc); + } + phys += next - addr; + } while (pud++, addr = next, addr != end); +} + +/* + * Create the page directory entries and any necessary page tables for the + * mapping specified by 'md'. + */ +static void __create_mapping(struct mm_struct *mm, pgd_t *pgd, + phys_addr_t phys, unsigned long virt, + phys_addr_t size, pgprot_t prot, + void *(*alloc)(unsigned long size)) +{ + unsigned long addr, length, end, next; + + addr = virt & PAGE_MASK; + length = PAGE_ALIGN(size + (virt & ~PAGE_MASK)); + + end = addr + length; + do { + next = pgd_addr_end(addr, end); + alloc_init_pud(mm, pgd, addr, next, phys, prot, alloc); + phys += next - addr; + } while (pgd++, addr = next, addr != end); +} + +static void *late_alloc(unsigned long size) +{ + void *ptr; + + BUG_ON(size > PAGE_SIZE); + ptr = (void *)__get_free_page(PGALLOC_GFP); + BUG_ON(!ptr); + return ptr; +} + +static void __ref create_mapping(phys_addr_t phys, unsigned long virt, + phys_addr_t size, pgprot_t prot) +{ + if (virt < VMALLOC_START) { + pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n", + &phys, virt); + return; + } + __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK), phys, virt, + size, prot, early_alloc); +} + +void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, + unsigned long virt, phys_addr_t size, + pgprot_t prot) +{ + __create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, prot, + late_alloc); +} + +static void create_mapping_late(phys_addr_t phys, unsigned long virt, + phys_addr_t size, pgprot_t prot) +{ + if (virt < VMALLOC_START) { + pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n", + &phys, virt); + return; + } + + return __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK), + phys, virt, size, prot, late_alloc); +} + +#ifdef CONFIG_DEBUG_RODATA +static void __init __map_memblock(phys_addr_t start, phys_addr_t end) +{ + /* + * Set up the executable regions using the existing section mappings + * for now. This will get more fine grained later once all memory + * is mapped + */ + unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE); + unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); + + if (end < kernel_x_start) { + create_mapping(start, __phys_to_virt(start), + end - start, PAGE_KERNEL); + } else if (start >= kernel_x_end) { + create_mapping(start, __phys_to_virt(start), + end - start, PAGE_KERNEL); + } else { + if (start < kernel_x_start) + create_mapping(start, __phys_to_virt(start), + kernel_x_start - start, + PAGE_KERNEL); + create_mapping(kernel_x_start, + __phys_to_virt(kernel_x_start), + kernel_x_end - kernel_x_start, + PAGE_KERNEL_EXEC); + if (kernel_x_end < end) + create_mapping(kernel_x_end, + __phys_to_virt(kernel_x_end), + end - kernel_x_end, + PAGE_KERNEL); + } + +} +#else +static void __init __map_memblock(phys_addr_t start, phys_addr_t end) +{ + create_mapping(start, __phys_to_virt(start), end - start, + PAGE_KERNEL_EXEC); +} +#endif + +static void __init map_mem(void) +{ + struct memblock_region *reg; + phys_addr_t limit; + + /* + * Temporarily limit the memblock range. We need to do this as + * create_mapping requires puds, pmds and ptes to be allocated from + * memory addressable from the initial direct kernel mapping. + * + * The initial direct kernel mapping, located at swapper_pg_dir, gives + * us PUD_SIZE (4K pages) or PMD_SIZE (64K pages) memory starting from + * PHYS_OFFSET (which must be aligned to 2MB as per + * Documentation/arm64/booting.txt). + */ + if (IS_ENABLED(CONFIG_ARM64_64K_PAGES)) + limit = PHYS_OFFSET + PMD_SIZE; + else + limit = PHYS_OFFSET + PUD_SIZE; + memblock_set_current_limit(limit); + + /* map all the memory banks */ + for_each_memblock(memory, reg) { + phys_addr_t start = reg->base; + phys_addr_t end = start + reg->size; + + if (start >= end) + break; + +#ifndef CONFIG_ARM64_64K_PAGES + /* + * For the first memory bank align the start address and + * current memblock limit to prevent create_mapping() from + * allocating pte page tables from unmapped memory. + * When 64K pages are enabled, the pte page table for the + * first PGDIR_SIZE is already present in swapper_pg_dir. + */ + if (start < limit) + start = ALIGN(start, PMD_SIZE); + if (end < limit) { + limit = end & PMD_MASK; + memblock_set_current_limit(limit); + } +#endif + __map_memblock(start, end); + } + + /* Limit no longer required. */ + memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE); +} + +void __init fixup_executable(void) +{ +#ifdef CONFIG_DEBUG_RODATA + /* now that we are actually fully mapped, make the start/end more fine grained */ + if (!IS_ALIGNED((unsigned long)_stext, SECTION_SIZE)) { + unsigned long aligned_start = round_down(__pa(_stext), + SECTION_SIZE); + + create_mapping(aligned_start, __phys_to_virt(aligned_start), + __pa(_stext) - aligned_start, + PAGE_KERNEL); + } + + if (!IS_ALIGNED((unsigned long)__init_end, SECTION_SIZE)) { + unsigned long aligned_end = round_up(__pa(__init_end), + SECTION_SIZE); + create_mapping(__pa(__init_end), (unsigned long)__init_end, + aligned_end - __pa(__init_end), + PAGE_KERNEL); + } +#endif +} + +#ifdef CONFIG_DEBUG_RODATA +void mark_rodata_ro(void) +{ + create_mapping_late(__pa(_stext), (unsigned long)_stext, + (unsigned long)_etext - (unsigned long)_stext, + PAGE_KERNEL_EXEC | PTE_RDONLY); + +} +#endif + +void fixup_init(void) +{ + create_mapping_late(__pa(__init_begin), (unsigned long)__init_begin, + (unsigned long)__init_end - (unsigned long)__init_begin, + PAGE_KERNEL); +} + +/* + * paging_init() sets up the page tables, initialises the zone memory + * maps and sets up the zero page. + */ +void __init paging_init(void) +{ + void *zero_page; + + map_mem(); + fixup_executable(); + + /* allocate the zero page. */ + zero_page = early_alloc(PAGE_SIZE); + + bootmem_init(); + + empty_zero_page = virt_to_page(zero_page); + + /* + * TTBR0 is only used for the identity mapping at this stage. Make it + * point to zero page to avoid speculatively fetching new entries. + */ + cpu_set_reserved_ttbr0(); + flush_tlb_all(); + cpu_set_default_tcr_t0sz(); +} + +/* + * Enable the identity mapping to allow the MMU disabling. + */ +void setup_mm_for_reboot(void) +{ + cpu_set_reserved_ttbr0(); + flush_tlb_all(); + cpu_set_idmap_tcr_t0sz(); + cpu_switch_mm(idmap_pg_dir, &init_mm); +} + +/* + * Check whether a kernel address is valid (derived from arch/x86/). + */ +int kern_addr_valid(unsigned long addr) +{ + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + + if ((((long)addr) >> VA_BITS) != -1UL) + return 0; + + pgd = pgd_offset_k(addr); + if (pgd_none(*pgd)) + return 0; + + pud = pud_offset(pgd, addr); + if (pud_none(*pud)) + return 0; + + if (pud_sect(*pud)) + return pfn_valid(pud_pfn(*pud)); + + pmd = pmd_offset(pud, addr); + if (pmd_none(*pmd)) + return 0; + + if (pmd_sect(*pmd)) + return pfn_valid(pmd_pfn(*pmd)); + + pte = pte_offset_kernel(pmd, addr); + if (pte_none(*pte)) + return 0; + + return pfn_valid(pte_pfn(*pte)); +} +#ifdef CONFIG_SPARSEMEM_VMEMMAP +#ifdef CONFIG_ARM64_64K_PAGES +int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) +{ + return vmemmap_populate_basepages(start, end, node); +} +#else /* !CONFIG_ARM64_64K_PAGES */ +int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) +{ + unsigned long addr = start; + unsigned long next; + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + + do { + next = pmd_addr_end(addr, end); + + pgd = vmemmap_pgd_populate(addr, node); + if (!pgd) + return -ENOMEM; + + pud = vmemmap_pud_populate(pgd, addr, node); + if (!pud) + return -ENOMEM; + + pmd = pmd_offset(pud, addr); + if (pmd_none(*pmd)) { + void *p = NULL; + + p = vmemmap_alloc_block_buf(PMD_SIZE, node); + if (!p) + return -ENOMEM; + + set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL)); + } else + vmemmap_verify((pte_t *)pmd, node, addr, next); + } while (addr = next, addr != end); + + return 0; +} +#endif /* CONFIG_ARM64_64K_PAGES */ +void vmemmap_free(unsigned long start, unsigned long end) +{ +} +#endif /* CONFIG_SPARSEMEM_VMEMMAP */ + +static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss; +#if CONFIG_PGTABLE_LEVELS > 2 +static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss; +#endif +#if CONFIG_PGTABLE_LEVELS > 3 +static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss; +#endif + +static inline pud_t * fixmap_pud(unsigned long addr) +{ + pgd_t *pgd = pgd_offset_k(addr); + + BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd)); + + return pud_offset(pgd, addr); +} + +static inline pmd_t * fixmap_pmd(unsigned long addr) +{ + pud_t *pud = fixmap_pud(addr); + + BUG_ON(pud_none(*pud) || pud_bad(*pud)); + + return pmd_offset(pud, addr); +} + +static inline pte_t * fixmap_pte(unsigned long addr) +{ + pmd_t *pmd = fixmap_pmd(addr); + + BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd)); + + return pte_offset_kernel(pmd, addr); +} + +void __init early_fixmap_init(void) +{ + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + unsigned long addr = FIXADDR_START; + + pgd = pgd_offset_k(addr); + pgd_populate(&init_mm, pgd, bm_pud); + pud = pud_offset(pgd, addr); + pud_populate(&init_mm, pud, bm_pmd); + pmd = pmd_offset(pud, addr); + pmd_populate_kernel(&init_mm, pmd, bm_pte); + + /* + * The boot-ioremap range spans multiple pmds, for which + * we are not preparted: + */ + BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT) + != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT)); + + if ((pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN))) + || pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) { + WARN_ON(1); + pr_warn("pmd %p != %p, %p\n", + pmd, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)), + fixmap_pmd(fix_to_virt(FIX_BTMAP_END))); + pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", + fix_to_virt(FIX_BTMAP_BEGIN)); + pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n", + fix_to_virt(FIX_BTMAP_END)); + + pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END); + pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN); + } +} + +void __set_fixmap(enum fixed_addresses idx, + phys_addr_t phys, pgprot_t flags) +{ + unsigned long addr = __fix_to_virt(idx); + pte_t *pte; + + BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses); + + pte = fixmap_pte(addr); + + if (pgprot_val(flags)) { + set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags)); + } else { + pte_clear(&init_mm, addr, pte); + flush_tlb_kernel_range(addr, addr+PAGE_SIZE); + } +} diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c new file mode 100644 index 000000000..e47ed1c5d --- /dev/null +++ b/arch/arm64/mm/pageattr.c @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/sched.h> + +#include <asm/pgtable.h> +#include <asm/tlbflush.h> + +struct page_change_data { + pgprot_t set_mask; + pgprot_t clear_mask; +}; + +static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr, + void *data) +{ + struct page_change_data *cdata = data; + pte_t pte = *ptep; + + pte = clear_pte_bit(pte, cdata->clear_mask); + pte = set_pte_bit(pte, cdata->set_mask); + + set_pte(ptep, pte); + return 0; +} + +static int change_memory_common(unsigned long addr, int numpages, + pgprot_t set_mask, pgprot_t clear_mask) +{ + unsigned long start = addr; + unsigned long size = PAGE_SIZE*numpages; + unsigned long end = start + size; + int ret; + struct page_change_data data; + + if (!IS_ALIGNED(addr, PAGE_SIZE)) { + start &= PAGE_MASK; + end = start + size; + WARN_ON_ONCE(1); + } + + if (start < MODULES_VADDR || start >= MODULES_END) + return -EINVAL; + + if (end < MODULES_VADDR || end >= MODULES_END) + return -EINVAL; + + data.set_mask = set_mask; + data.clear_mask = clear_mask; + + ret = apply_to_page_range(&init_mm, start, size, change_page_range, + &data); + + flush_tlb_kernel_range(start, end); + return ret; +} + +int set_memory_ro(unsigned long addr, int numpages) +{ + return change_memory_common(addr, numpages, + __pgprot(PTE_RDONLY), + __pgprot(PTE_WRITE)); +} + +int set_memory_rw(unsigned long addr, int numpages) +{ + return change_memory_common(addr, numpages, + __pgprot(PTE_WRITE), + __pgprot(PTE_RDONLY)); +} + +int set_memory_nx(unsigned long addr, int numpages) +{ + return change_memory_common(addr, numpages, + __pgprot(PTE_PXN), + __pgprot(0)); +} +EXPORT_SYMBOL_GPL(set_memory_nx); + +int set_memory_x(unsigned long addr, int numpages) +{ + return change_memory_common(addr, numpages, + __pgprot(0), + __pgprot(PTE_PXN)); +} +EXPORT_SYMBOL_GPL(set_memory_x); diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c new file mode 100644 index 000000000..71ca104f9 --- /dev/null +++ b/arch/arm64/mm/pgd.c @@ -0,0 +1,61 @@ +/* + * PGD allocation/freeing + * + * Copyright (C) 2012 ARM Ltd. + * Author: Catalin Marinas <catalin.marinas@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/mm.h> +#include <linux/gfp.h> +#include <linux/highmem.h> +#include <linux/slab.h> + +#include <asm/pgalloc.h> +#include <asm/page.h> +#include <asm/tlbflush.h> + +#include "mm.h" + +#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t)) + +static struct kmem_cache *pgd_cache; + +pgd_t *pgd_alloc(struct mm_struct *mm) +{ + if (PGD_SIZE == PAGE_SIZE) + return (pgd_t *)__get_free_page(PGALLOC_GFP); + else + return kmem_cache_alloc(pgd_cache, PGALLOC_GFP); +} + +void pgd_free(struct mm_struct *mm, pgd_t *pgd) +{ + if (PGD_SIZE == PAGE_SIZE) + free_page((unsigned long)pgd); + else + kmem_cache_free(pgd_cache, pgd); +} + +static int __init pgd_cache_init(void) +{ + /* + * Naturally aligned pgds required by the architecture. + */ + if (PGD_SIZE != PAGE_SIZE) + pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_SIZE, + SLAB_PANIC, NULL); + return 0; +} +core_initcall(pgd_cache_init); diff --git a/arch/arm64/mm/proc-macros.S b/arch/arm64/mm/proc-macros.S new file mode 100644 index 000000000..4c4d93c4b --- /dev/null +++ b/arch/arm64/mm/proc-macros.S @@ -0,0 +1,64 @@ +/* + * Based on arch/arm/mm/proc-macros.S + * + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <asm/asm-offsets.h> +#include <asm/thread_info.h> + +/* + * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm) + */ + .macro vma_vm_mm, rd, rn + ldr \rd, [\rn, #VMA_VM_MM] + .endm + +/* + * mmid - get context id from mm pointer (mm->context.id) + */ + .macro mmid, rd, rn + ldr \rd, [\rn, #MM_CONTEXT_ID] + .endm + +/* + * dcache_line_size - get the minimum D-cache line size from the CTR register. + */ + .macro dcache_line_size, reg, tmp + mrs \tmp, ctr_el0 // read CTR + ubfm \tmp, \tmp, #16, #19 // cache line size encoding + mov \reg, #4 // bytes per word + lsl \reg, \reg, \tmp // actual cache line size + .endm + +/* + * icache_line_size - get the minimum I-cache line size from the CTR register. + */ + .macro icache_line_size, reg, tmp + mrs \tmp, ctr_el0 // read CTR + and \tmp, \tmp, #0xf // cache line size encoding + mov \reg, #4 // bytes per word + lsl \reg, \reg, \tmp // actual cache line size + .endm + +/* + * tcr_set_idmap_t0sz - update TCR.T0SZ so that we can load the ID map + */ + .macro tcr_set_idmap_t0sz, valreg, tmpreg +#ifndef CONFIG_ARM64_VA_BITS_48 + ldr_l \tmpreg, idmap_t0sz + bfi \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH +#endif + .endm diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S new file mode 100644 index 000000000..cdd754e19 --- /dev/null +++ b/arch/arm64/mm/proc.S @@ -0,0 +1,264 @@ +/* + * Based on arch/arm/mm/proc.S + * + * Copyright (C) 2001 Deep Blue Solutions Ltd. + * Copyright (C) 2012 ARM Ltd. + * Author: Catalin Marinas <catalin.marinas@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/init.h> +#include <linux/linkage.h> +#include <asm/assembler.h> +#include <asm/asm-offsets.h> +#include <asm/hwcap.h> +#include <asm/pgtable-hwdef.h> +#include <asm/pgtable.h> + +#include "proc-macros.S" + +#ifdef CONFIG_ARM64_64K_PAGES +#define TCR_TG_FLAGS TCR_TG0_64K | TCR_TG1_64K +#else +#define TCR_TG_FLAGS TCR_TG0_4K | TCR_TG1_4K +#endif + +#ifdef CONFIG_SMP +#define TCR_SMP_FLAGS TCR_SHARED +#else +#define TCR_SMP_FLAGS 0 +#endif + +/* PTWs cacheable, inner/outer WBWA */ +#define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA + +#define MAIR(attr, mt) ((attr) << ((mt) * 8)) + +/* + * cpu_cache_off() + * + * Turn the CPU D-cache off. + */ +ENTRY(cpu_cache_off) + mrs x0, sctlr_el1 + bic x0, x0, #1 << 2 // clear SCTLR.C + msr sctlr_el1, x0 + isb + ret +ENDPROC(cpu_cache_off) + +/* + * cpu_reset(loc) + * + * Perform a soft reset of the system. Put the CPU into the same state + * as it would be if it had been reset, and branch to what would be the + * reset vector. It must be executed with the flat identity mapping. + * + * - loc - location to jump to for soft reset + */ + .align 5 +ENTRY(cpu_reset) + mrs x1, sctlr_el1 + bic x1, x1, #1 + msr sctlr_el1, x1 // disable the MMU + isb + ret x0 +ENDPROC(cpu_reset) + +ENTRY(cpu_soft_restart) + /* Save address of cpu_reset() and reset address */ + mov x19, x0 + mov x20, x1 + + /* Turn D-cache off */ + bl cpu_cache_off + + /* Push out all dirty data, and ensure cache is empty */ + bl flush_cache_all + + mov x0, x20 + ret x19 +ENDPROC(cpu_soft_restart) + +/* + * cpu_do_idle() + * + * Idle the processor (wait for interrupt). + */ +ENTRY(cpu_do_idle) + dsb sy // WFI may enter a low-power mode + wfi + ret +ENDPROC(cpu_do_idle) + +#ifdef CONFIG_CPU_PM +/** + * cpu_do_suspend - save CPU registers context + * + * x0: virtual address of context pointer + */ +ENTRY(cpu_do_suspend) + mrs x2, tpidr_el0 + mrs x3, tpidrro_el0 + mrs x4, contextidr_el1 + mrs x5, mair_el1 + mrs x6, cpacr_el1 + mrs x7, ttbr1_el1 + mrs x8, tcr_el1 + mrs x9, vbar_el1 + mrs x10, mdscr_el1 + mrs x11, oslsr_el1 + mrs x12, sctlr_el1 + stp x2, x3, [x0] + stp x4, x5, [x0, #16] + stp x6, x7, [x0, #32] + stp x8, x9, [x0, #48] + stp x10, x11, [x0, #64] + str x12, [x0, #80] + ret +ENDPROC(cpu_do_suspend) + +/** + * cpu_do_resume - restore CPU register context + * + * x0: Physical address of context pointer + * x1: ttbr0_el1 to be restored + * + * Returns: + * sctlr_el1 value in x0 + */ +ENTRY(cpu_do_resume) + /* + * Invalidate local tlb entries before turning on MMU + */ + tlbi vmalle1 + ldp x2, x3, [x0] + ldp x4, x5, [x0, #16] + ldp x6, x7, [x0, #32] + ldp x8, x9, [x0, #48] + ldp x10, x11, [x0, #64] + ldr x12, [x0, #80] + msr tpidr_el0, x2 + msr tpidrro_el0, x3 + msr contextidr_el1, x4 + msr mair_el1, x5 + msr cpacr_el1, x6 + msr ttbr0_el1, x1 + msr ttbr1_el1, x7 + tcr_set_idmap_t0sz x8, x7 + msr tcr_el1, x8 + msr vbar_el1, x9 + msr mdscr_el1, x10 + /* + * Restore oslsr_el1 by writing oslar_el1 + */ + ubfx x11, x11, #1, #1 + msr oslar_el1, x11 + mov x0, x12 + dsb nsh // Make sure local tlb invalidation completed + isb + ret +ENDPROC(cpu_do_resume) +#endif + +/* + * cpu_do_switch_mm(pgd_phys, tsk) + * + * Set the translation table base pointer to be pgd_phys. + * + * - pgd_phys - physical address of new TTB + */ +ENTRY(cpu_do_switch_mm) + mmid w1, x1 // get mm->context.id + bfi x0, x1, #48, #16 // set the ASID + msr ttbr0_el1, x0 // set TTBR0 + isb + ret +ENDPROC(cpu_do_switch_mm) + + .section ".text.init", #alloc, #execinstr + +/* + * __cpu_setup + * + * Initialise the processor for turning the MMU on. Return in x0 the + * value of the SCTLR_EL1 register. + */ +ENTRY(__cpu_setup) + ic iallu // I+BTB cache invalidate + tlbi vmalle1is // invalidate I + D TLBs + dsb ish + + mov x0, #3 << 20 + msr cpacr_el1, x0 // Enable FP/ASIMD + msr mdscr_el1, xzr // Reset mdscr_el1 + /* + * Memory region attributes for LPAE: + * + * n = AttrIndx[2:0] + * n MAIR + * DEVICE_nGnRnE 000 00000000 + * DEVICE_nGnRE 001 00000100 + * DEVICE_GRE 010 00001100 + * NORMAL_NC 011 01000100 + * NORMAL 100 11111111 + */ + ldr x5, =MAIR(0x00, MT_DEVICE_nGnRnE) | \ + MAIR(0x04, MT_DEVICE_nGnRE) | \ + MAIR(0x0c, MT_DEVICE_GRE) | \ + MAIR(0x44, MT_NORMAL_NC) | \ + MAIR(0xff, MT_NORMAL) + msr mair_el1, x5 + /* + * Prepare SCTLR + */ + adr x5, crval + ldp w5, w6, [x5] + mrs x0, sctlr_el1 + bic x0, x0, x5 // clear bits + orr x0, x0, x6 // set bits + /* + * Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for + * both user and kernel. + */ + ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ + TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0 + tcr_set_idmap_t0sz x10, x9 + + /* + * Read the PARange bits from ID_AA64MMFR0_EL1 and set the IPS bits in + * TCR_EL1. + */ + mrs x9, ID_AA64MMFR0_EL1 + bfi x10, x9, #32, #3 + msr tcr_el1, x10 + ret // return to head.S +ENDPROC(__cpu_setup) + + /* + * We set the desired value explicitly, including those of the + * reserved bits. The values of bits EE & E0E were set early in + * el2_setup, which are left untouched below. + * + * n n T + * U E WT T UD US IHBS + * CE0 XWHW CZ ME TEEA S + * .... .IEE .... NEAI TE.I ..AD DEN0 ACAM + * 0011 0... 1101 ..0. ..0. 10.. .0.. .... < hardware reserved + * .... .1.. .... 01.1 11.1 ..01 0.01 1101 < software settings + */ + .type crval, #object +crval: + .word 0xfcffffff // clear + .word 0x34d5d91d // set |