summaryrefslogtreecommitdiff
path: root/arch/arm64/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r--arch/arm64/mm/cache.S75
-rw-r--r--arch/arm64/mm/dma-mapping.c92
-rw-r--r--arch/arm64/mm/fault.c14
-rw-r--r--arch/arm64/mm/flush.c1
-rw-r--r--arch/arm64/mm/hugetlbpage.c7
-rw-r--r--arch/arm64/mm/mmu.c68
-rw-r--r--arch/arm64/mm/proc.S46
7 files changed, 170 insertions, 133 deletions
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 2560e1e15..bdeb5d38c 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -22,84 +22,11 @@
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/cpufeature.h>
-#include <asm/alternative-asm.h>
+#include <asm/alternative.h>
#include "proc-macros.S"
/*
- * __flush_dcache_all()
- *
- * Flush the whole D-cache.
- *
- * Corrupted registers: x0-x7, x9-x11
- */
-__flush_dcache_all:
- dmb sy // ensure ordering with previous memory accesses
- mrs x0, clidr_el1 // read clidr
- and x3, x0, #0x7000000 // extract loc from clidr
- lsr x3, x3, #23 // left align loc bit field
- cbz x3, finished // if loc is 0, then no need to clean
- mov x10, #0 // start clean at cache level 0
-loop1:
- add x2, x10, x10, lsr #1 // work out 3x current cache level
- lsr x1, x0, x2 // extract cache type bits from clidr
- and x1, x1, #7 // mask of the bits for current cache only
- cmp x1, #2 // see what cache we have at this level
- b.lt skip // skip if no cache, or just i-cache
- save_and_disable_irqs x9 // make CSSELR and CCSIDR access atomic
- msr csselr_el1, x10 // select current cache level in csselr
- isb // isb to sych the new cssr&csidr
- mrs x1, ccsidr_el1 // read the new ccsidr
- restore_irqs x9
- and x2, x1, #7 // extract the length of the cache lines
- add x2, x2, #4 // add 4 (line length offset)
- mov x4, #0x3ff
- and x4, x4, x1, lsr #3 // find maximum number on the way size
- clz w5, w4 // find bit position of way size increment
- mov x7, #0x7fff
- and x7, x7, x1, lsr #13 // extract max number of the index size
-loop2:
- mov x9, x4 // create working copy of max way size
-loop3:
- lsl x6, x9, x5
- orr x11, x10, x6 // factor way and cache number into x11
- lsl x6, x7, x2
- orr x11, x11, x6 // factor index number into x11
- dc cisw, x11 // clean & invalidate by set/way
- subs x9, x9, #1 // decrement the way
- b.ge loop3
- subs x7, x7, #1 // decrement the index
- b.ge loop2
-skip:
- add x10, x10, #2 // increment cache number
- cmp x3, x10
- b.gt loop1
-finished:
- mov x10, #0 // swith back to cache level 0
- msr csselr_el1, x10 // select current cache level in csselr
- dsb sy
- isb
- ret
-ENDPROC(__flush_dcache_all)
-
-/*
- * flush_cache_all()
- *
- * Flush the entire cache system. The data cache flush is now achieved
- * using atomic clean / invalidates working outwards from L1 cache. This
- * is done using Set/Way based cache maintainance instructions. The
- * instruction cache can still be invalidated back to the point of
- * unification in a single instruction.
- */
-ENTRY(flush_cache_all)
- mov x12, lr
- bl __flush_dcache_all
- mov x0, #0
- ic ialluis // I+BTB cache invalidate
- ret x12
-ENDPROC(flush_cache_all)
-
-/*
* flush_icache_range(start,end)
*
* Ensure that the I and D caches are coherent within specified region.
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index b0bd4e5fd..d16a1cead 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -414,6 +414,98 @@ out:
return -ENOMEM;
}
+/********************************************
+ * The following APIs are for dummy DMA ops *
+ ********************************************/
+
+static void *__dummy_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags,
+ struct dma_attrs *attrs)
+{
+ return NULL;
+}
+
+static void __dummy_free(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
+{
+}
+
+static int __dummy_mmap(struct device *dev,
+ struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ struct dma_attrs *attrs)
+{
+ return -ENXIO;
+}
+
+static dma_addr_t __dummy_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ return DMA_ERROR_CODE;
+}
+
+static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+}
+
+static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl,
+ int nelems, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ return 0;
+}
+
+static void __dummy_unmap_sg(struct device *dev,
+ struct scatterlist *sgl, int nelems,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+}
+
+static void __dummy_sync_single(struct device *dev,
+ dma_addr_t dev_addr, size_t size,
+ enum dma_data_direction dir)
+{
+}
+
+static void __dummy_sync_sg(struct device *dev,
+ struct scatterlist *sgl, int nelems,
+ enum dma_data_direction dir)
+{
+}
+
+static int __dummy_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
+{
+ return 1;
+}
+
+static int __dummy_dma_supported(struct device *hwdev, u64 mask)
+{
+ return 0;
+}
+
+struct dma_map_ops dummy_dma_ops = {
+ .alloc = __dummy_alloc,
+ .free = __dummy_free,
+ .mmap = __dummy_mmap,
+ .map_page = __dummy_map_page,
+ .unmap_page = __dummy_unmap_page,
+ .map_sg = __dummy_map_sg,
+ .unmap_sg = __dummy_unmap_sg,
+ .sync_single_for_cpu = __dummy_sync_single,
+ .sync_single_for_device = __dummy_sync_single,
+ .sync_sg_for_cpu = __dummy_sync_sg,
+ .sync_sg_for_device = __dummy_sync_sg,
+ .mapping_error = __dummy_mapping_error,
+ .dma_supported = __dummy_dma_supported,
+};
+EXPORT_SYMBOL(dummy_dma_ops);
+
static int __init arm64_dma_init(void)
{
int ret;
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 96da13167..94d98cd1a 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -115,8 +115,7 @@ static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
{
struct siginfo si;
- if (show_unhandled_signals && unhandled_signal(tsk, sig) &&
- printk_ratelimit()) {
+ if (unhandled_signal(tsk, sig) && show_unhandled_signals_ratelimited()) {
pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n",
tsk->comm, task_pid_nr(tsk), fault_name(esr), sig,
addr, esr);
@@ -211,7 +210,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
* If we're in an interrupt or have no user context, we must not take
* the fault.
*/
- if (in_atomic() || !mm)
+ if (faulthandler_disabled() || !mm)
goto no_context;
if (user_mode(regs))
@@ -478,12 +477,19 @@ asmlinkage void __exception do_sp_pc_abort(unsigned long addr,
struct pt_regs *regs)
{
struct siginfo info;
+ struct task_struct *tsk = current;
+
+ if (show_unhandled_signals && unhandled_signal(tsk, SIGBUS))
+ pr_info_ratelimited("%s[%d]: %s exception: pc=%p sp=%p\n",
+ tsk->comm, task_pid_nr(tsk),
+ esr_get_class_string(esr), (void *)regs->pc,
+ (void *)regs->sp);
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRALN;
info.si_addr = (void __user *)addr;
- arm64_notify_die("", regs, &info, esr);
+ arm64_notify_die("Oops - SP/PC alignment exception", regs, &info, esr);
}
static struct fault_info debug_fault_info[] = {
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index b6f14e8d2..4dfa3975c 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -102,7 +102,6 @@ EXPORT_SYMBOL(flush_dcache_page);
/*
* Additional functions defined in assembly.
*/
-EXPORT_SYMBOL(flush_cache_all);
EXPORT_SYMBOL(flush_icache_range);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 0eeb4f093..831ec534d 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -31,13 +31,6 @@
#include <asm/tlbflush.h>
#include <asm/pgalloc.h>
-#ifndef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
-int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
-{
- return 0;
-}
-#endif
-
int pmd_huge(pmd_t pmd)
{
return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 5b8b66442..a4ede4e2d 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -21,6 +21,7 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
+#include <linux/libfdt.h>
#include <linux/mman.h>
#include <linux/nodemask.h>
#include <linux/memblock.h>
@@ -116,7 +117,7 @@ void split_pud(pud_t *old_pud, pmd_t *pmd)
int i = 0;
do {
- set_pmd(pmd, __pmd(addr | prot));
+ set_pmd(pmd, __pmd(addr | pgprot_val(prot)));
addr += PMD_SIZE;
} while (pmd++, i++, i < PTRS_PER_PMD);
}
@@ -643,3 +644,68 @@ void __set_fixmap(enum fixed_addresses idx,
flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
}
}
+
+void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
+{
+ const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
+ pgprot_t prot = PAGE_KERNEL | PTE_RDONLY;
+ int granularity, size, offset;
+ void *dt_virt;
+
+ /*
+ * Check whether the physical FDT address is set and meets the minimum
+ * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
+ * at least 8 bytes so that we can always access the size field of the
+ * FDT header after mapping the first chunk, double check here if that
+ * is indeed the case.
+ */
+ BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
+ if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
+ return NULL;
+
+ /*
+ * Make sure that the FDT region can be mapped without the need to
+ * allocate additional translation table pages, so that it is safe
+ * to call create_mapping() this early.
+ *
+ * On 64k pages, the FDT will be mapped using PTEs, so we need to
+ * be in the same PMD as the rest of the fixmap.
+ * On 4k pages, we'll use section mappings for the FDT so we only
+ * have to be in the same PUD.
+ */
+ BUILD_BUG_ON(dt_virt_base % SZ_2M);
+
+ if (IS_ENABLED(CONFIG_ARM64_64K_PAGES)) {
+ BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> PMD_SHIFT !=
+ __fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT);
+
+ granularity = PAGE_SIZE;
+ } else {
+ BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> PUD_SHIFT !=
+ __fix_to_virt(FIX_BTMAP_BEGIN) >> PUD_SHIFT);
+
+ granularity = PMD_SIZE;
+ }
+
+ offset = dt_phys % granularity;
+ dt_virt = (void *)dt_virt_base + offset;
+
+ /* map the first chunk so we can read the size from the header */
+ create_mapping(round_down(dt_phys, granularity), dt_virt_base,
+ granularity, prot);
+
+ if (fdt_check_header(dt_virt) != 0)
+ return NULL;
+
+ size = fdt_totalsize(dt_virt);
+ if (size > MAX_FDT_SIZE)
+ return NULL;
+
+ if (offset + size > granularity)
+ create_mapping(round_down(dt_phys, granularity), dt_virt_base,
+ round_up(offset + size, granularity), prot);
+
+ memblock_reserve(dt_phys, size);
+
+ return dt_virt;
+}
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index cdd754e19..39139a3aa 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -46,52 +46,6 @@
#define MAIR(attr, mt) ((attr) << ((mt) * 8))
/*
- * cpu_cache_off()
- *
- * Turn the CPU D-cache off.
- */
-ENTRY(cpu_cache_off)
- mrs x0, sctlr_el1
- bic x0, x0, #1 << 2 // clear SCTLR.C
- msr sctlr_el1, x0
- isb
- ret
-ENDPROC(cpu_cache_off)
-
-/*
- * cpu_reset(loc)
- *
- * Perform a soft reset of the system. Put the CPU into the same state
- * as it would be if it had been reset, and branch to what would be the
- * reset vector. It must be executed with the flat identity mapping.
- *
- * - loc - location to jump to for soft reset
- */
- .align 5
-ENTRY(cpu_reset)
- mrs x1, sctlr_el1
- bic x1, x1, #1
- msr sctlr_el1, x1 // disable the MMU
- isb
- ret x0
-ENDPROC(cpu_reset)
-
-ENTRY(cpu_soft_restart)
- /* Save address of cpu_reset() and reset address */
- mov x19, x0
- mov x20, x1
-
- /* Turn D-cache off */
- bl cpu_cache_off
-
- /* Push out all dirty data, and ensure cache is empty */
- bl flush_cache_all
-
- mov x0, x20
- ret x19
-ENDPROC(cpu_soft_restart)
-
-/*
* cpu_do_idle()
*
* Idle the processor (wait for interrupt).