diff options
author | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2016-03-25 03:53:42 -0300 |
---|---|---|
committer | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2016-03-25 03:53:42 -0300 |
commit | 03dd4cb26d967f9588437b0fc9cc0e8353322bb7 (patch) | |
tree | fa581f6dc1c0596391690d1f67eceef3af8246dc /arch/arm/mm | |
parent | d4e493caf788ef44982e131ff9c786546904d934 (diff) |
Linux-libre 4.5-gnu
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/Kconfig | 32 | ||||
-rw-r--r-- | arch/arm/mm/cache-l2x0.c | 33 | ||||
-rw-r--r-- | arch/arm/mm/cache-uniphier.c | 13 | ||||
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 11 | ||||
-rw-r--r-- | arch/arm/mm/flush.c | 17 | ||||
-rw-r--r-- | arch/arm/mm/idmap.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/init.c | 5 | ||||
-rw-r--r-- | arch/arm/mm/ioremap.c | 9 | ||||
-rw-r--r-- | arch/arm/mm/mmap.c | 3 | ||||
-rw-r--r-- | arch/arm/mm/mmu.c | 132 | ||||
-rw-r--r-- | arch/arm/mm/pageattr.c | 3 | ||||
-rw-r--r-- | arch/arm/mm/proc-mohawk.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-v7.S | 23 | ||||
-rw-r--r-- | arch/arm/mm/proc-v7m.S | 14 |
14 files changed, 179 insertions, 120 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 41218867a..549f6d3ae 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -21,7 +21,7 @@ config CPU_ARM7TDMI # ARM720T config CPU_ARM720T - bool "Support ARM720T processor" if (ARCH_MULTI_V4T && ARCH_INTEGRATOR) + bool select CPU_32v4T select CPU_ABRT_LV4T select CPU_CACHE_V4 @@ -39,7 +39,7 @@ config CPU_ARM720T # ARM740T config CPU_ARM740T - bool "Support ARM740T processor" if (ARCH_MULTI_V4T && ARCH_INTEGRATOR) + bool depends on !MMU select CPU_32v4T select CPU_ABRT_LV4T @@ -71,7 +71,7 @@ config CPU_ARM9TDMI # ARM920T config CPU_ARM920T - bool "Support ARM920T processor" if (ARCH_MULTI_V4T && ARCH_INTEGRATOR) + bool select CPU_32v4T select CPU_ABRT_EV4T select CPU_CACHE_V4WT @@ -89,7 +89,7 @@ config CPU_ARM920T # ARM922T config CPU_ARM922T - bool "Support ARM922T processor" if (ARCH_MULTI_V4T && ARCH_INTEGRATOR) + bool select CPU_32v4T select CPU_ABRT_EV4T select CPU_CACHE_V4WT @@ -108,7 +108,7 @@ config CPU_ARM922T # ARM925T config CPU_ARM925T - bool "Support ARM925T processor" if ARCH_OMAP1 + bool select CPU_32v4T select CPU_ABRT_EV4T select CPU_CACHE_V4WT @@ -127,7 +127,7 @@ config CPU_ARM925T # ARM926T config CPU_ARM926T - bool "Support ARM926T processor" if (!ARCH_MULTIPLATFORM || ARCH_MULTI_V5) && (ARCH_INTEGRATOR || MACH_REALVIEW_EB) + bool select CPU_32v5 select CPU_ABRT_EV5TJ select CPU_CACHE_VIVT @@ -163,7 +163,7 @@ config CPU_FA526 # ARM940T config CPU_ARM940T - bool "Support ARM940T processor" if (ARCH_MULTI_V4T && ARCH_INTEGRATOR) + bool depends on !MMU select CPU_32v4T select CPU_ABRT_NOMMU @@ -181,7 +181,7 @@ config CPU_ARM940T # ARM946E-S config CPU_ARM946E - bool "Support ARM946E-S processor" if (ARCH_MULTI_V5 && ARCH_INTEGRATOR) + bool depends on !MMU select CPU_32v5 select CPU_ABRT_NOMMU @@ -198,7 +198,7 @@ config CPU_ARM946E # ARM1020 - needs validating config CPU_ARM1020 - bool "Support ARM1020T (rev 0) processor" if (ARCH_MULTI_V5 && ARCH_INTEGRATOR) + bool select CPU_32v5 select CPU_ABRT_EV4T select CPU_CACHE_V4WT @@ -216,7 +216,7 @@ config CPU_ARM1020 # ARM1020E - needs validating config CPU_ARM1020E - bool "Support ARM1020E processor" if (ARCH_MULTI_V5 && ARCH_INTEGRATOR) + bool depends on n select CPU_32v5 select CPU_ABRT_EV4T @@ -229,7 +229,7 @@ config CPU_ARM1020E # ARM1022E config CPU_ARM1022 - bool "Support ARM1022E processor" if (ARCH_MULTI_V5 && ARCH_INTEGRATOR) + bool select CPU_32v5 select CPU_ABRT_EV4T select CPU_CACHE_VIVT @@ -247,7 +247,7 @@ config CPU_ARM1022 # ARM1026EJ-S config CPU_ARM1026 - bool "Support ARM1026EJ-S processor" if (ARCH_MULTI_V5 && ARCH_INTEGRATOR) + bool select CPU_32v5 select CPU_ABRT_EV5T # But need Jazelle, but EV5TJ ignores bit 10 select CPU_CACHE_VIVT @@ -358,7 +358,7 @@ config CPU_PJ4B # ARMv6 config CPU_V6 - bool "Support ARM V6 processor" if (!ARCH_MULTIPLATFORM || ARCH_MULTI_V6) && (ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX) + bool select CPU_32v6 select CPU_ABRT_EV6 select CPU_CACHE_V6 @@ -371,7 +371,7 @@ config CPU_V6 # ARMv6k config CPU_V6K - bool "Support ARM V6K processor" if (!ARCH_MULTIPLATFORM || ARCH_MULTI_V6) && (ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX) + bool select CPU_32v6 select CPU_32v6K select CPU_ABRT_EV6 @@ -385,7 +385,7 @@ config CPU_V6K # ARMv7 config CPU_V7 - bool "Support ARM V7 processor" if (!ARCH_MULTIPLATFORM || ARCH_MULTI_V7) && (ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX) + bool select CPU_32v6K select CPU_32v7 select CPU_ABRT_EV7 @@ -1005,8 +1005,6 @@ config ARM_L1_CACHE_SHIFT config ARM_DMA_MEM_BUFFERABLE bool "Use non-cacheable memory for DMA" if (CPU_V6 || CPU_V6K) && !CPU_V7 - depends on !(MACH_REALVIEW_PB1176 || REALVIEW_EB_ARM11MP || \ - MACH_REALVIEW_PB11MP) default y if CPU_V6 || CPU_V6K || CPU_V7 help Historically, the kernel has used strongly ordered mappings to diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 493692d83..9f9d54271 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -790,7 +790,7 @@ static const struct l2c_init_data l2c310_init_fns __initconst = { }; static int __init __l2c_init(const struct l2c_init_data *data, - u32 aux_val, u32 aux_mask, u32 cache_id) + u32 aux_val, u32 aux_mask, u32 cache_id, bool nosync) { struct outer_cache_fns fns; unsigned way_size_bits, ways; @@ -866,6 +866,10 @@ static int __init __l2c_init(const struct l2c_init_data *data, fns.configure = outer_cache.configure; if (data->fixup) data->fixup(l2x0_base, cache_id, &fns); + if (nosync) { + pr_info("L2C: disabling outer sync\n"); + fns.sync = NULL; + } /* * Check if l2x0 controller is already enabled. If we are booting @@ -925,7 +929,7 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) if (data->save) data->save(l2x0_base); - __l2c_init(data, aux_val, aux_mask, cache_id); + __l2c_init(data, aux_val, aux_mask, cache_id, false); } #ifdef CONFIG_OF @@ -1060,6 +1064,18 @@ static void __init l2x0_of_parse(const struct device_node *np, val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT; } + if (of_property_read_bool(np, "arm,parity-enable")) { + mask &= ~L2C_AUX_CTRL_PARITY_ENABLE; + val |= L2C_AUX_CTRL_PARITY_ENABLE; + } else if (of_property_read_bool(np, "arm,parity-disable")) { + mask &= ~L2C_AUX_CTRL_PARITY_ENABLE; + } + + if (of_property_read_bool(np, "arm,shared-override")) { + mask &= ~L2C_AUX_CTRL_SHARED_OVERRIDE; + val |= L2C_AUX_CTRL_SHARED_OVERRIDE; + } + ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_256K); if (ret) return; @@ -1176,6 +1192,14 @@ static void __init l2c310_of_parse(const struct device_node *np, *aux_mask &= ~L2C_AUX_CTRL_SHARED_OVERRIDE; } + if (of_property_read_bool(np, "arm,parity-enable")) { + *aux_val |= L2C_AUX_CTRL_PARITY_ENABLE; + *aux_mask &= ~L2C_AUX_CTRL_PARITY_ENABLE; + } else if (of_property_read_bool(np, "arm,parity-disable")) { + *aux_val &= ~L2C_AUX_CTRL_PARITY_ENABLE; + *aux_mask &= ~L2C_AUX_CTRL_PARITY_ENABLE; + } + prefetch = l2x0_saved_regs.prefetch_ctrl; ret = of_property_read_u32(np, "arm,double-linefill", &val); @@ -1704,6 +1728,7 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask) struct resource res; u32 cache_id, old_aux; u32 cache_level = 2; + bool nosync = false; np = of_find_matching_node(NULL, l2x0_ids); if (!np) @@ -1742,6 +1767,8 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask) if (cache_level != 2) pr_err("L2C: device tree specifies invalid cache level\n"); + nosync = of_property_read_bool(np, "arm,outer-sync-disable"); + /* Read back current (default) hardware configuration */ if (data->save) data->save(l2x0_base); @@ -1756,6 +1783,6 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask) else cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); - return __l2c_init(data, aux_val, aux_mask, cache_id); + return __l2c_init(data, aux_val, aux_mask, cache_id, nosync); } #endif diff --git a/arch/arm/mm/cache-uniphier.c b/arch/arm/mm/cache-uniphier.c index 0502ba17a..a6fa7b73f 100644 --- a/arch/arm/mm/cache-uniphier.c +++ b/arch/arm/mm/cache-uniphier.c @@ -377,17 +377,6 @@ static const struct of_device_id uniphier_cache_match[] __initconst = { { /* sentinel */ } }; -static struct device_node * __init uniphier_cache_get_next_level_node( - struct device_node *np) -{ - u32 phandle; - - if (of_property_read_u32(np, "next-level-cache", &phandle)) - return NULL; - - return of_find_node_by_phandle(phandle); -} - static int __init __uniphier_cache_init(struct device_node *np, unsigned int *cache_level) { @@ -491,7 +480,7 @@ static int __init __uniphier_cache_init(struct device_node *np, * next level cache fails because we want to continue with available * cache levels. */ - next_np = uniphier_cache_get_next_level_node(np); + next_np = of_find_next_cache_node(np); if (next_np) { (*cache_level)++; ret = __uniphier_cache_init(next_np, cache_level); diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 534a60ae2..0eca38125 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -1200,10 +1200,7 @@ error: while (i--) if (pages[i]) __free_pages(pages[i], 0); - if (array_size <= PAGE_SIZE) - kfree(pages); - else - vfree(pages); + kvfree(pages); return NULL; } @@ -1211,7 +1208,6 @@ static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t size, struct dma_attrs *attrs) { int count = size >> PAGE_SHIFT; - int array_size = count * sizeof(struct page *); int i; if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) { @@ -1222,10 +1218,7 @@ static int __iommu_free_buffer(struct device *dev, struct page **pages, __free_pages(pages[i], 0); } - if (array_size <= PAGE_SIZE) - kfree(pages); - else - vfree(pages); + kvfree(pages); return 0; } diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 1ec8e7590..d0ba3551d 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -330,7 +330,7 @@ void flush_dcache_page(struct page *page) mapping = page_mapping(page); if (!cache_ops_need_broadcast() && - mapping && !page_mapped(page)) + mapping && !page_mapcount(page)) clear_bit(PG_dcache_clean, &page->flags); else { __flush_dcache_page(mapping, page); @@ -415,18 +415,3 @@ void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned l */ __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); } - -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -#ifdef CONFIG_HAVE_RCU_TABLE_FREE -void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, - pmd_t *pmdp) -{ - pmd_t pmd = pmd_mksplitting(*pmdp); - VM_BUG_ON(address & ~PMD_MASK); - set_pmd_at(vma->vm_mm, address, pmdp, pmd); - - /* dummy IPI to serialise against fast_gup */ - kick_all_cpus_sync(); -} -#endif /* CONFIG_HAVE_RCU_TABLE_FREE */ -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c index e7a81cebb..d65909697 100644 --- a/arch/arm/mm/idmap.c +++ b/arch/arm/mm/idmap.c @@ -86,7 +86,7 @@ static void identity_mapping_add(pgd_t *pgd, const char *text_start, prot |= PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF; - if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) + if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale_family()) prot |= PMD_BIT4; pgd += pgd_index(addr); diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 7f8cd1b35..49bd08178 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -192,7 +192,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max_low, #ifdef CONFIG_HAVE_ARCH_PFN_VALID int pfn_valid(unsigned long pfn) { - return memblock_is_memory(__pfn_to_phys(pfn)); + return memblock_is_map_memory(__pfn_to_phys(pfn)); } EXPORT_SYMBOL(pfn_valid); #endif @@ -433,6 +433,9 @@ static void __init free_highpages(void) if (end <= max_low) continue; + if (memblock_is_nomap(mem)) + continue; + /* Truncate partial highmem entries */ if (start < max_low) start = max_low; diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 0c81056c1..66a978d05 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -30,6 +30,7 @@ #include <asm/cp15.h> #include <asm/cputype.h> #include <asm/cacheflush.h> +#include <asm/early_ioremap.h> #include <asm/mmu_context.h> #include <asm/pgalloc.h> #include <asm/tlbflush.h> @@ -469,3 +470,11 @@ int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr) } EXPORT_SYMBOL_GPL(pci_ioremap_io); #endif + +/* + * Must be called after early_fixmap_init + */ +void __init early_ioremap_init(void) +{ + early_ioremap_setup(); +} diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index 407dc7865..66353caa3 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c @@ -173,8 +173,7 @@ unsigned long arch_mmap_rnd(void) { unsigned long rnd; - /* 8 bits of randomness in 20 address space bits */ - rnd = (unsigned long)get_random_int() % (1 << 8); + rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1); return rnd << PAGE_SHIFT; } diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 4867f5daf..434d76f0b 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -390,7 +390,7 @@ void __init early_fixmap_init(void) * The early fixmap range spans multiple pmds, for which * we are not prepared: */ - BUILD_BUG_ON((__fix_to_virt(__end_of_permanent_fixed_addresses) >> PMD_SHIFT) + BUILD_BUG_ON((__fix_to_virt(__end_of_early_ioremap_region) >> PMD_SHIFT) != FIXADDR_TOP >> PMD_SHIFT); pmd = fixmap_pmd(FIXADDR_TOP); @@ -477,7 +477,7 @@ static void __init build_mem_type_table(void) * "update-able on write" bit on ARM610). However, Xscale and * Xscale3 require this bit to be cleared. */ - if (cpu_is_xscale() || cpu_is_xsc3()) { + if (cpu_is_xscale_family()) { for (i = 0; i < ARRAY_SIZE(mem_types); i++) { mem_types[i].prot_sect &= ~PMD_BIT4; mem_types[i].prot_l1 &= ~PMD_BIT4; @@ -572,7 +572,7 @@ static void __init build_mem_type_table(void) * in the Short-descriptor translation table format descriptors. */ if (cpu_arch == CPU_ARCH_ARMv7 && - (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xF) == 4) { + (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xF) >= 4) { user_pmd_table |= PMD_PXNTABLE; } #endif @@ -724,30 +724,49 @@ static void __init *early_alloc(unsigned long sz) return early_alloc_aligned(sz, sz); } -static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot) +static void *__init late_alloc(unsigned long sz) +{ + void *ptr = (void *)__get_free_pages(PGALLOC_GFP, get_order(sz)); + + BUG_ON(!ptr); + return ptr; +} + +static pte_t * __init pte_alloc(pmd_t *pmd, unsigned long addr, + unsigned long prot, + void *(*alloc)(unsigned long sz)) { if (pmd_none(*pmd)) { - pte_t *pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE); + pte_t *pte = alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE); __pmd_populate(pmd, __pa(pte), prot); } BUG_ON(pmd_bad(*pmd)); return pte_offset_kernel(pmd, addr); } +static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, + unsigned long prot) +{ + return pte_alloc(pmd, addr, prot, early_alloc); +} + static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, unsigned long end, unsigned long pfn, - const struct mem_type *type) + const struct mem_type *type, + void *(*alloc)(unsigned long sz), + bool ng) { - pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1); + pte_t *pte = pte_alloc(pmd, addr, type->prot_l1, alloc); do { - set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0); + set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), + ng ? PTE_EXT_NG : 0); pfn++; } while (pte++, addr += PAGE_SIZE, addr != end); } static void __init __map_init_section(pmd_t *pmd, unsigned long addr, unsigned long end, phys_addr_t phys, - const struct mem_type *type) + const struct mem_type *type, bool ng) { pmd_t *p = pmd; @@ -765,7 +784,7 @@ static void __init __map_init_section(pmd_t *pmd, unsigned long addr, pmd++; #endif do { - *pmd = __pmd(phys | type->prot_sect); + *pmd = __pmd(phys | type->prot_sect | (ng ? PMD_SECT_nG : 0)); phys += SECTION_SIZE; } while (pmd++, addr += SECTION_SIZE, addr != end); @@ -774,7 +793,8 @@ static void __init __map_init_section(pmd_t *pmd, unsigned long addr, static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end, phys_addr_t phys, - const struct mem_type *type) + const struct mem_type *type, + void *(*alloc)(unsigned long sz), bool ng) { pmd_t *pmd = pmd_offset(pud, addr); unsigned long next; @@ -792,10 +812,10 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, */ if (type->prot_sect && ((addr | next | phys) & ~SECTION_MASK) == 0) { - __map_init_section(pmd, addr, next, phys, type); + __map_init_section(pmd, addr, next, phys, type, ng); } else { alloc_init_pte(pmd, addr, next, - __phys_to_pfn(phys), type); + __phys_to_pfn(phys), type, alloc, ng); } phys += next - addr; @@ -805,21 +825,24 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end, phys_addr_t phys, - const struct mem_type *type) + const struct mem_type *type, + void *(*alloc)(unsigned long sz), bool ng) { pud_t *pud = pud_offset(pgd, addr); unsigned long next; do { next = pud_addr_end(addr, end); - alloc_init_pmd(pud, addr, next, phys, type); + alloc_init_pmd(pud, addr, next, phys, type, alloc, ng); phys += next - addr; } while (pud++, addr = next, addr != end); } #ifndef CONFIG_ARM_LPAE -static void __init create_36bit_mapping(struct map_desc *md, - const struct mem_type *type) +static void __init create_36bit_mapping(struct mm_struct *mm, + struct map_desc *md, + const struct mem_type *type, + bool ng) { unsigned long addr, length, end; phys_addr_t phys; @@ -859,7 +882,7 @@ static void __init create_36bit_mapping(struct map_desc *md, */ phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20); - pgd = pgd_offset_k(addr); + pgd = pgd_offset(mm, addr); end = addr + length; do { pud_t *pud = pud_offset(pgd, addr); @@ -867,7 +890,8 @@ static void __init create_36bit_mapping(struct map_desc *md, int i; for (i = 0; i < 16; i++) - *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER); + *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER | + (ng ? PMD_SECT_nG : 0)); addr += SUPERSECTION_SIZE; phys += SUPERSECTION_SIZE; @@ -876,33 +900,15 @@ static void __init create_36bit_mapping(struct map_desc *md, } #endif /* !CONFIG_ARM_LPAE */ -/* - * Create the page directory entries and any necessary - * page tables for the mapping specified by `md'. We - * are able to cope here with varying sizes and address - * offsets, and we take full advantage of sections and - * supersections. - */ -static void __init create_mapping(struct map_desc *md) +static void __init __create_mapping(struct mm_struct *mm, struct map_desc *md, + void *(*alloc)(unsigned long sz), + bool ng) { unsigned long addr, length, end; phys_addr_t phys; const struct mem_type *type; pgd_t *pgd; - if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { - pr_warn("BUG: not creating mapping for 0x%08llx at 0x%08lx in user region\n", - (long long)__pfn_to_phys((u64)md->pfn), md->virtual); - return; - } - - if ((md->type == MT_DEVICE || md->type == MT_ROM) && - md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START && - (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) { - pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n", - (long long)__pfn_to_phys((u64)md->pfn), md->virtual); - } - type = &mem_types[md->type]; #ifndef CONFIG_ARM_LPAE @@ -910,7 +916,7 @@ static void __init create_mapping(struct map_desc *md) * Catch 36-bit addresses */ if (md->pfn >= 0x100000) { - create_36bit_mapping(md, type); + create_36bit_mapping(mm, md, type, ng); return; } #endif @@ -925,12 +931,12 @@ static void __init create_mapping(struct map_desc *md) return; } - pgd = pgd_offset_k(addr); + pgd = pgd_offset(mm, addr); end = addr + length; do { unsigned long next = pgd_addr_end(addr, end); - alloc_init_pud(pgd, addr, next, phys, type); + alloc_init_pud(pgd, addr, next, phys, type, alloc, ng); phys += next - addr; addr = next; @@ -938,6 +944,43 @@ static void __init create_mapping(struct map_desc *md) } /* + * Create the page directory entries and any necessary + * page tables for the mapping specified by `md'. We + * are able to cope here with varying sizes and address + * offsets, and we take full advantage of sections and + * supersections. + */ +static void __init create_mapping(struct map_desc *md) +{ + if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { + pr_warn("BUG: not creating mapping for 0x%08llx at 0x%08lx in user region\n", + (long long)__pfn_to_phys((u64)md->pfn), md->virtual); + return; + } + + if ((md->type == MT_DEVICE || md->type == MT_ROM) && + md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START && + (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) { + pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n", + (long long)__pfn_to_phys((u64)md->pfn), md->virtual); + } + + __create_mapping(&init_mm, md, early_alloc, false); +} + +void __init create_mapping_late(struct mm_struct *mm, struct map_desc *md, + bool ng) +{ +#ifdef CONFIG_ARM_LPAE + pud_t *pud = pud_alloc(mm, pgd_offset(mm, md->virtual), md->virtual); + if (WARN_ON(!pud)) + return; + pmd_alloc(mm, pud, 0); +#endif + __create_mapping(mm, md, late_alloc, ng); +} + +/* * Create the architecture specific mappings */ void __init iotable_init(struct map_desc *io_desc, int nr) @@ -1392,6 +1435,9 @@ static void __init map_lowmem(void) phys_addr_t end = start + reg->size; struct map_desc map; + if (memblock_is_nomap(reg)) + continue; + if (end > arm_lowmem_limit) end = arm_lowmem_limit; if (start >= end) diff --git a/arch/arm/mm/pageattr.c b/arch/arm/mm/pageattr.c index cf30daff8..d19b1ad29 100644 --- a/arch/arm/mm/pageattr.c +++ b/arch/arm/mm/pageattr.c @@ -49,6 +49,9 @@ static int change_memory_common(unsigned long addr, int numpages, WARN_ON_ONCE(1); } + if (!numpages) + return 0; + if (start < MODULES_VADDR || start >= MODULES_END) return -EINVAL; diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S index d65edf717..6f07d2ef4 100644 --- a/arch/arm/mm/proc-mohawk.S +++ b/arch/arm/mm/proc-mohawk.S @@ -342,11 +342,13 @@ ENTRY(cpu_mohawk_switch_mm) */ .align 5 ENTRY(cpu_mohawk_set_pte_ext) +#ifdef CONFIG_MMU armv3_set_pte_ext mov r0, r0 mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcr p15, 0, r0, c7, c10, 4 @ drain WB ret lr +#endif .globl cpu_mohawk_suspend_size .equ cpu_mohawk_suspend_size, 4 * 6 diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 8e1ea433c..0f92d575a 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -274,10 +274,12 @@ __v7_ca15mp_setup: __v7_b15mp_setup: __v7_ca17mp_setup: mov r10, #0 -1: adr r12, __v7_setup_stack @ the local stack - stmia r12, {r0-r5, lr} @ v7_invalidate_l1 touches r0-r6 +1: adr r0, __v7_setup_stack_ptr + ldr r12, [r0] + add r12, r12, r0 @ the local stack + stmia r12, {r1-r6, lr} @ v7_invalidate_l1 touches r0-r6 bl v7_invalidate_l1 - ldmia r12, {r0-r5, lr} + ldmia r12, {r1-r6, lr} #ifdef CONFIG_SMP ALT_SMP(mrc p15, 0, r0, c1, c0, 1) ALT_UP(mov r0, #(1 << 6)) @ fake it for UP @@ -415,10 +417,12 @@ __v7_pj4b_setup: #endif /* CONFIG_CPU_PJ4B */ __v7_setup: - adr r12, __v7_setup_stack @ the local stack - stmia r12, {r0-r5, lr} @ v7_invalidate_l1 touches r0-r6 + adr r0, __v7_setup_stack_ptr + ldr r12, [r0] + add r12, r12, r0 @ the local stack + stmia r12, {r1-r6, lr} @ v7_invalidate_l1 touches r0-r6 bl v7_invalidate_l1 - ldmia r12, {r0-r5, lr} + ldmia r12, {r1-r6, lr} __v7_setup_cont: and r0, r9, #0xff000000 @ ARM? @@ -480,11 +484,16 @@ __errata_finish: orr r0, r0, r6 @ set them THUMB( orr r0, r0, #1 << 30 ) @ Thumb exceptions ret lr @ return to head.S:__ret + + .align 2 +__v7_setup_stack_ptr: + .word __v7_setup_stack - . ENDPROC(__v7_setup) + .bss .align 2 __v7_setup_stack: - .space 4 * 7 @ 12 registers + .space 4 * 7 @ 7 registers __INITDATA diff --git a/arch/arm/mm/proc-v7m.S b/arch/arm/mm/proc-v7m.S index 67d920907..7229d8d0b 100644 --- a/arch/arm/mm/proc-v7m.S +++ b/arch/arm/mm/proc-v7m.S @@ -12,6 +12,7 @@ */ #include <linux/linkage.h> #include <asm/assembler.h> +#include <asm/memory.h> #include <asm/v7m.h> #include "proc-macros.S" @@ -97,19 +98,19 @@ __v7m_setup: mov r5, #0x00800000 str r5, [r0, V7M_SCB_SHPR3] @ set PendSV priority - @ SVC to run the kernel in this mode + @ SVC to switch to handler mode. Notice that this requires sp to + @ point to writeable memory because the processor saves + @ some registers to the stack. badr r1, 1f ldr r5, [r12, #11 * 4] @ read the SVC vector entry str r1, [r12, #11 * 4] @ write the temporary SVC vector entry mov r6, lr @ save LR - mov r7, sp @ save SP - ldr sp, =__v7m_setup_stack_top + ldr sp, =init_thread_union + THREAD_START_SP cpsie i svc #0 1: cpsid i str r5, [r12, #11 * 4] @ restore the original SVC vector entry mov lr, r6 @ restore LR - mov sp, r7 @ restore SP @ Special-purpose control register mov r1, #1 @@ -123,11 +124,6 @@ __v7m_setup: ret lr ENDPROC(__v7m_setup) - .align 2 -__v7m_setup_stack: - .space 4 * 8 @ 8 registers -__v7m_setup_stack_top: - define_processor_functions v7m, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1 .section ".rodata" |