summaryrefslogtreecommitdiff
path: root/arch/cris/arch-v32/mm
diff options
context:
space:
mode:
authorAndré Fabian Silva Delgado <emulatorman@parabola.nu>2015-08-05 17:04:01 -0300
committerAndré Fabian Silva Delgado <emulatorman@parabola.nu>2015-08-05 17:04:01 -0300
commit57f0f512b273f60d52568b8c6b77e17f5636edc0 (patch)
tree5e910f0e82173f4ef4f51111366a3f1299037a7b /arch/cris/arch-v32/mm
Initial import
Diffstat (limited to 'arch/cris/arch-v32/mm')
-rw-r--r--arch/cris/arch-v32/mm/Makefile4
-rw-r--r--arch/cris/arch-v32/mm/init.c162
-rw-r--r--arch/cris/arch-v32/mm/intmem.c150
-rw-r--r--arch/cris/arch-v32/mm/l2cache.c29
-rw-r--r--arch/cris/arch-v32/mm/mmu.S210
-rw-r--r--arch/cris/arch-v32/mm/tlb.c207
6 files changed, 762 insertions, 0 deletions
diff --git a/arch/cris/arch-v32/mm/Makefile b/arch/cris/arch-v32/mm/Makefile
new file mode 100644
index 000000000..0b801f296
--- /dev/null
+++ b/arch/cris/arch-v32/mm/Makefile
@@ -0,0 +1,4 @@
+# Makefile for the Linux/cris parts of the memory manager.
+
+obj-y += mmu.o init.o tlb.o intmem.o
+obj-$(CONFIG_ETRAX_L2CACHE) += l2cache.o
diff --git a/arch/cris/arch-v32/mm/init.c b/arch/cris/arch-v32/mm/init.c
new file mode 100644
index 000000000..f5438ca81
--- /dev/null
+++ b/arch/cris/arch-v32/mm/init.c
@@ -0,0 +1,162 @@
+/*
+ * Set up paging and the MMU.
+ *
+ * Copyright (C) 2000-2003, Axis Communications AB.
+ *
+ * Authors: Bjorn Wesen <bjornw@axis.com>
+ * Tobias Anderberg <tobiasa@axis.com>, CRISv32 port.
+ */
+#include <linux/mmzone.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/mm.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/types.h>
+#include <asm/mmu.h>
+#include <asm/io.h>
+#include <asm/mmu_context.h>
+#include <arch/hwregs/asm/mmu_defs_asm.h>
+#include <arch/hwregs/supp_reg.h>
+
+extern void tlb_init(void);
+
+/*
+ * The kernel is already mapped with linear mapping at kseg_c so there's no
+ * need to map it with a page table. However, head.S also temporarily mapped it
+ * at kseg_4 thus the ksegs are set up again. Also clear the TLB and do various
+ * other paging stuff.
+ */
+void __init cris_mmu_init(void)
+{
+ unsigned long mmu_config;
+ unsigned long mmu_kbase_hi;
+ unsigned long mmu_kbase_lo;
+ unsigned short mmu_page_id;
+
+ /*
+ * Make sure the current pgd table points to something sane, even if it
+ * is most probably not used until the next switch_mm.
+ */
+ per_cpu(current_pgd, smp_processor_id()) = init_mm.pgd;
+
+ /* Initialise the TLB. Function found in tlb.c. */
+ tlb_init();
+
+ /*
+ * Enable exceptions and initialize the kernel segments.
+ * See head.S for differences between ARTPEC-3 and ETRAX FS.
+ */
+ mmu_config = ( REG_STATE(mmu, rw_mm_cfg, we, on) |
+ REG_STATE(mmu, rw_mm_cfg, acc, on) |
+ REG_STATE(mmu, rw_mm_cfg, ex, on) |
+ REG_STATE(mmu, rw_mm_cfg, inv, on) |
+#ifdef CONFIG_CRIS_MACH_ARTPEC3
+ REG_STATE(mmu, rw_mm_cfg, seg_f, page) |
+ REG_STATE(mmu, rw_mm_cfg, seg_e, page) |
+ REG_STATE(mmu, rw_mm_cfg, seg_d, linear) |
+#else
+ REG_STATE(mmu, rw_mm_cfg, seg_f, linear) |
+ REG_STATE(mmu, rw_mm_cfg, seg_e, linear) |
+ REG_STATE(mmu, rw_mm_cfg, seg_d, page) |
+#endif
+ REG_STATE(mmu, rw_mm_cfg, seg_c, linear) |
+ REG_STATE(mmu, rw_mm_cfg, seg_b, linear) |
+ REG_STATE(mmu, rw_mm_cfg, seg_a, page) |
+ REG_STATE(mmu, rw_mm_cfg, seg_9, page) |
+ REG_STATE(mmu, rw_mm_cfg, seg_8, page) |
+ REG_STATE(mmu, rw_mm_cfg, seg_7, page) |
+ REG_STATE(mmu, rw_mm_cfg, seg_6, page) |
+ REG_STATE(mmu, rw_mm_cfg, seg_5, page) |
+ REG_STATE(mmu, rw_mm_cfg, seg_4, page) |
+ REG_STATE(mmu, rw_mm_cfg, seg_3, page) |
+ REG_STATE(mmu, rw_mm_cfg, seg_2, page) |
+ REG_STATE(mmu, rw_mm_cfg, seg_1, page) |
+ REG_STATE(mmu, rw_mm_cfg, seg_0, page));
+
+ /* See head.S for differences between ARTPEC-3 and ETRAX FS. */
+ mmu_kbase_hi = ( REG_FIELD(mmu, rw_mm_kbase_hi, base_f, 0x0) |
+#ifdef CONFIG_CRIS_MACH_ARTPEC3
+ REG_FIELD(mmu, rw_mm_kbase_hi, base_e, 0x0) |
+ REG_FIELD(mmu, rw_mm_kbase_hi, base_d, 0x5) |
+#else
+ REG_FIELD(mmu, rw_mm_kbase_hi, base_e, 0x8) |
+ REG_FIELD(mmu, rw_mm_kbase_hi, base_d, 0x0) |
+#endif
+ REG_FIELD(mmu, rw_mm_kbase_hi, base_c, 0x4) |
+ REG_FIELD(mmu, rw_mm_kbase_hi, base_b, 0xb) |
+ REG_FIELD(mmu, rw_mm_kbase_hi, base_a, 0x0) |
+ REG_FIELD(mmu, rw_mm_kbase_hi, base_9, 0x0) |
+ REG_FIELD(mmu, rw_mm_kbase_hi, base_8, 0x0));
+
+ mmu_kbase_lo = ( REG_FIELD(mmu, rw_mm_kbase_lo, base_7, 0x0) |
+ REG_FIELD(mmu, rw_mm_kbase_lo, base_6, 0x0) |
+ REG_FIELD(mmu, rw_mm_kbase_lo, base_5, 0x0) |
+ REG_FIELD(mmu, rw_mm_kbase_lo, base_4, 0x0) |
+ REG_FIELD(mmu, rw_mm_kbase_lo, base_3, 0x0) |
+ REG_FIELD(mmu, rw_mm_kbase_lo, base_2, 0x0) |
+ REG_FIELD(mmu, rw_mm_kbase_lo, base_1, 0x0) |
+ REG_FIELD(mmu, rw_mm_kbase_lo, base_0, 0x0));
+
+ mmu_page_id = REG_FIELD(mmu, rw_mm_tlb_hi, pid, 0);
+
+ /* Update the instruction MMU. */
+ SUPP_BANK_SEL(BANK_IM);
+ SUPP_REG_WR(RW_MM_CFG, mmu_config);
+ SUPP_REG_WR(RW_MM_KBASE_HI, mmu_kbase_hi);
+ SUPP_REG_WR(RW_MM_KBASE_LO, mmu_kbase_lo);
+ SUPP_REG_WR(RW_MM_TLB_HI, mmu_page_id);
+
+ /* Update the data MMU. */
+ SUPP_BANK_SEL(BANK_DM);
+ SUPP_REG_WR(RW_MM_CFG, mmu_config);
+ SUPP_REG_WR(RW_MM_KBASE_HI, mmu_kbase_hi);
+ SUPP_REG_WR(RW_MM_KBASE_LO, mmu_kbase_lo);
+ SUPP_REG_WR(RW_MM_TLB_HI, mmu_page_id);
+
+ SPEC_REG_WR(SPEC_REG_PID, 0);
+
+ /*
+ * The MMU has been enabled ever since head.S but just to make it
+ * totally obvious enable it here as well.
+ */
+ SUPP_BANK_SEL(BANK_GC);
+ SUPP_REG_WR(RW_GC_CFG, 0xf); /* IMMU, DMMU, ICache, DCache on */
+}
+
+void __init paging_init(void)
+{
+ int i;
+ unsigned long zones_size[MAX_NR_ZONES];
+
+ printk("Setting up paging and the MMU.\n");
+
+ /* Clear out the init_mm.pgd that will contain the kernel's mappings. */
+ for(i = 0; i < PTRS_PER_PGD; i++)
+ swapper_pg_dir[i] = __pgd(0);
+
+ cris_mmu_init();
+
+ /*
+ * Initialize the bad page table and bad page to point to a couple of
+ * allocated pages.
+ */
+ empty_zero_page = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
+ memset((void *) empty_zero_page, 0, PAGE_SIZE);
+
+ /* All pages are DMA'able in Etrax, so put all in the DMA'able zone. */
+ zones_size[0] = ((unsigned long) high_memory - PAGE_OFFSET) >> PAGE_SHIFT;
+
+ for (i = 1; i < MAX_NR_ZONES; i++)
+ zones_size[i] = 0;
+
+ /*
+ * Use free_area_init_node instead of free_area_init, because it is
+ * designed for systems where the DRAM starts at an address
+ * substantially higher than 0, like us (we start at PAGE_OFFSET). This
+ * saves space in the mem_map page array.
+ */
+ free_area_init_node(0, zones_size, PAGE_OFFSET >> PAGE_SHIFT, 0);
+
+ mem_map = contig_page_data.node_mem_map;
+}
diff --git a/arch/cris/arch-v32/mm/intmem.c b/arch/cris/arch-v32/mm/intmem.c
new file mode 100644
index 000000000..1b17d92ce
--- /dev/null
+++ b/arch/cris/arch-v32/mm/intmem.c
@@ -0,0 +1,150 @@
+/*
+ * Simple allocator for internal RAM in ETRAX FS
+ *
+ * Copyright (c) 2004 Axis Communications AB.
+ */
+
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+#include <memmap.h>
+
+#define STATUS_FREE 0
+#define STATUS_ALLOCATED 1
+
+#ifdef CONFIG_ETRAX_L2CACHE
+#define RESERVED_SIZE 66*1024
+#else
+#define RESERVED_SIZE 0
+#endif
+
+struct intmem_allocation {
+ struct list_head entry;
+ unsigned int size;
+ unsigned offset;
+ char status;
+};
+
+
+static struct list_head intmem_allocations;
+static void* intmem_virtual;
+
+static void crisv32_intmem_init(void)
+{
+ static int initiated = 0;
+ if (!initiated) {
+ struct intmem_allocation* alloc;
+ alloc = kmalloc(sizeof *alloc, GFP_KERNEL);
+ INIT_LIST_HEAD(&intmem_allocations);
+ intmem_virtual = ioremap(MEM_INTMEM_START + RESERVED_SIZE,
+ MEM_INTMEM_SIZE - RESERVED_SIZE);
+ initiated = 1;
+ alloc->size = MEM_INTMEM_SIZE - RESERVED_SIZE;
+ alloc->offset = 0;
+ alloc->status = STATUS_FREE;
+ list_add_tail(&alloc->entry, &intmem_allocations);
+ }
+}
+
+void* crisv32_intmem_alloc(unsigned size, unsigned align)
+{
+ struct intmem_allocation* allocation;
+ struct intmem_allocation* tmp;
+ void* ret = NULL;
+
+ preempt_disable();
+ crisv32_intmem_init();
+
+ list_for_each_entry_safe(allocation, tmp, &intmem_allocations, entry) {
+ int alignment = allocation->offset % align;
+ alignment = alignment ? align - alignment : alignment;
+
+ if (allocation->status == STATUS_FREE &&
+ allocation->size >= size + alignment) {
+ if (allocation->size > size + alignment) {
+ struct intmem_allocation* alloc;
+ alloc = kmalloc(sizeof *alloc, GFP_ATOMIC);
+ alloc->status = STATUS_FREE;
+ alloc->size = allocation->size - size -
+ alignment;
+ alloc->offset = allocation->offset + size +
+ alignment;
+ list_add(&alloc->entry, &allocation->entry);
+
+ if (alignment) {
+ struct intmem_allocation *tmp;
+ tmp = kmalloc(sizeof *tmp, GFP_ATOMIC);
+ tmp->offset = allocation->offset;
+ tmp->size = alignment;
+ tmp->status = STATUS_FREE;
+ allocation->offset += alignment;
+ list_add_tail(&tmp->entry,
+ &allocation->entry);
+ }
+ }
+ allocation->status = STATUS_ALLOCATED;
+ allocation->size = size;
+ ret = (void*)((int)intmem_virtual + allocation->offset);
+ }
+ }
+ preempt_enable();
+ return ret;
+}
+
+void crisv32_intmem_free(void* addr)
+{
+ struct intmem_allocation* allocation;
+ struct intmem_allocation* tmp;
+
+ if (addr == NULL)
+ return;
+
+ preempt_disable();
+ crisv32_intmem_init();
+
+ list_for_each_entry_safe(allocation, tmp, &intmem_allocations, entry) {
+ if (allocation->offset == (int)(addr - intmem_virtual)) {
+ struct intmem_allocation *prev =
+ list_entry(allocation->entry.prev,
+ struct intmem_allocation, entry);
+ struct intmem_allocation *next =
+ list_entry(allocation->entry.next,
+ struct intmem_allocation, entry);
+
+ allocation->status = STATUS_FREE;
+ /* Join with prev and/or next if also free */
+ if ((prev != &intmem_allocations) &&
+ (prev->status == STATUS_FREE)) {
+ prev->size += allocation->size;
+ list_del(&allocation->entry);
+ kfree(allocation);
+ allocation = prev;
+ }
+ if ((next != &intmem_allocations) &&
+ (next->status == STATUS_FREE)) {
+ allocation->size += next->size;
+ list_del(&next->entry);
+ kfree(next);
+ }
+ preempt_enable();
+ return;
+ }
+ }
+ preempt_enable();
+}
+
+void* crisv32_intmem_phys_to_virt(unsigned long addr)
+{
+ return (void *)(addr - (MEM_INTMEM_START + RESERVED_SIZE) +
+ (unsigned long)intmem_virtual);
+}
+
+unsigned long crisv32_intmem_virt_to_phys(void* addr)
+{
+ return (unsigned long)((unsigned long )addr -
+ (unsigned long)intmem_virtual + MEM_INTMEM_START +
+ RESERVED_SIZE);
+}
+
+module_init(crisv32_intmem_init);
+
diff --git a/arch/cris/arch-v32/mm/l2cache.c b/arch/cris/arch-v32/mm/l2cache.c
new file mode 100644
index 000000000..332ff10dc
--- /dev/null
+++ b/arch/cris/arch-v32/mm/l2cache.c
@@ -0,0 +1,29 @@
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <memmap.h>
+#include <hwregs/reg_map.h>
+#include <hwregs/reg_rdwr.h>
+#include <hwregs/l2cache_defs.h>
+#include <asm/io.h>
+
+#define L2CACHE_SIZE 64
+
+int __init l2cache_init(void)
+{
+ reg_l2cache_rw_ctrl ctrl = {0};
+ reg_l2cache_rw_cfg cfg = {.en = regk_l2cache_yes};
+
+ ctrl.csize = L2CACHE_SIZE;
+ ctrl.cbase = L2CACHE_SIZE / 4 + (L2CACHE_SIZE % 4 ? 1 : 0);
+ REG_WR(l2cache, regi_l2cache, rw_ctrl, ctrl);
+
+ /* Flush the tag memory */
+ memset((void *)(MEM_INTMEM_START | MEM_NON_CACHEABLE), 0, 2*1024);
+
+ /* Enable the cache */
+ REG_WR(l2cache, regi_l2cache, rw_cfg, cfg);
+
+ return 0;
+}
+
diff --git a/arch/cris/arch-v32/mm/mmu.S b/arch/cris/arch-v32/mm/mmu.S
new file mode 100644
index 000000000..c0981044e
--- /dev/null
+++ b/arch/cris/arch-v32/mm/mmu.S
@@ -0,0 +1,210 @@
+; WARNING : The refill handler has been modified, see below !!!
+
+/*
+ * Copyright (C) 2003 Axis Communications AB
+ *
+ * Authors: Mikael Starvik (starvik@axis.com)
+ *
+ * Code for the fault low-level handling routines.
+ *
+ */
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+
+; Save all register. Must save in same order as struct pt_regs.
+.macro SAVE_ALL
+ subq 12, $sp
+ move $erp, [$sp]
+ subq 4, $sp
+ move $srp, [$sp]
+ subq 4, $sp
+ move $ccs, [$sp]
+ subq 4, $sp
+ move $spc, [$sp]
+ subq 4, $sp
+ move $mof, [$sp]
+ subq 4, $sp
+ move $srs, [$sp]
+ subq 4, $sp
+ move.d $acr, [$sp]
+ subq 14*4, $sp
+ movem $r13, [$sp]
+ subq 4, $sp
+ move.d $r10, [$sp]
+.endm
+
+; Bus fault handler. Extracts relevant information and calls mm subsystem
+; to handle the fault.
+.macro MMU_BUS_FAULT_HANDLER handler, mmu, we, ex
+ .globl \handler
+ .type \handler,"function"
+\handler:
+ SAVE_ALL
+ move \mmu, $srs ; Select MMU support register bank
+ move.d $sp, $r11 ; regs
+ moveq 1, $r12 ; protection fault
+ moveq \we, $r13 ; write exception?
+ orq \ex << 1, $r13 ; execute?
+ move $s3, $r10 ; rw_mm_cause
+ and.d ~8191, $r10 ; Get faulting page start address
+
+ jsr do_page_fault
+ nop
+ ba ret_from_intr
+ nop
+ .size \handler, . - \handler
+.endm
+
+; Refill handler. Three cases may occur:
+; 1. PMD and PTE exists in mm subsystem but not in TLB
+; 2. PMD exists but not PTE
+; 3. PMD doesn't exist
+; The code below handles case 1 and calls the mm subsystem for case 2 and 3.
+; Do not touch this code without very good reasons and extensive testing.
+; Note that the code is optimized to minimize stalls (makes the code harder
+; to read).
+;
+; WARNING !!!
+; Modified by Mikael Asker 060725: added a workaround for strange TLB
+; behavior. If the same PTE is present in more than one set, the TLB
+; doesn't recognize it and we get stuck in a loop of refill exceptions.
+; The workaround detects such loops and exits them by flushing
+; the TLB contents. The problem and workaround were verified
+; in VCS by Mikael Starvik.
+;
+; Each page is 8 KB. Each PMD holds 8192/4 PTEs (each PTE is 4 bytes) so each
+; PMD holds 16 MB of virtual memory.
+; Bits 0-12 : Offset within a page
+; Bits 13-23 : PTE offset within a PMD
+; Bits 24-31 : PMD offset within the PGD
+
+.macro MMU_REFILL_HANDLER handler, mmu
+ .data
+1: .dword 0 ; refill_count
+ ; == 0 <=> last_refill_cause is invalid
+2: .dword 0 ; last_refill_cause
+ .text
+ .globl \handler
+ .type \handler, "function"
+\handler:
+ subq 4, $sp
+; (The pipeline stalls for one cycle; $sp used as address in the next cycle.)
+ move $srs, [$sp]
+ subq 4, $sp
+ move \mmu, $srs ; Select MMU support register bank
+ move.d $acr, [$sp]
+ subq 12, $sp
+ move.d 1b, $acr ; Point to refill_count
+ movem $r2, [$sp]
+
+ test.d [$acr] ; refill_count == 0 ?
+ beq 5f ; yes, last_refill_cause is invalid
+ move.d $acr, $r1
+
+ ; last_refill_cause is valid, investigate cause
+ addq 4, $r1 ; Point to last_refill_cause
+ move $s3, $r0 ; Get rw_mm_cause
+ move.d [$r1], $r2 ; Get last_refill_cause
+ cmp.d $r0, $r2 ; rw_mm_cause == last_refill_cause ?
+ beq 6f ; yes, increment count
+ moveq 1, $r2
+
+ ; rw_mm_cause != last_refill_cause
+ move.d $r2, [$acr] ; refill_count = 1
+ move.d $r0, [$r1] ; last_refill_cause = rw_mm_cause
+
+3: ; Probably not in a loop, continue normal processing
+ move.d current_pgd, $acr ; PGD
+ ; Look up PMD in PGD
+ lsrq 24, $r0 ; Get PMD index into PGD (bit 24-31)
+ move.d [$acr], $acr ; PGD for the current process
+ addi $r0.d, $acr, $acr
+ move $s3, $r0 ; rw_mm_cause
+ move.d [$acr], $acr ; Get PMD
+ beq 8f
+ ; Look up PTE in PMD
+ lsrq PAGE_SHIFT, $r0
+ and.w PAGE_MASK, $acr ; Remove PMD flags
+ and.d 0x7ff, $r0 ; Get PTE index into PMD (bit 13-23)
+ addi $r0.d, $acr, $acr
+ move.d [$acr], $acr ; Get PTE
+ beq 9f
+ movem [$sp], $r2 ; Restore r0-r2 in delay slot
+ addq 12, $sp
+ ; Store in TLB
+ move $acr, $s5
+4: ; Return
+ move.d [$sp+], $acr
+ move [$sp], $srs
+ addq 4, $sp
+ rete
+ rfe
+
+5: ; last_refill_cause is invalid
+ moveq 1, $r2
+ addq 4, $r1 ; Point to last_refill_cause
+ move.d $r2, [$acr] ; refill_count = 1
+ move $s3, $r0 ; Get rw_mm_cause
+ ba 3b ; Continue normal processing
+ move.d $r0,[$r1] ; last_refill_cause = rw_mm_cause
+
+6: ; rw_mm_cause == last_refill_cause
+ move.d [$acr], $r2 ; Get refill_count
+ cmpq 4, $r2 ; refill_count > 4 ?
+ bhi 7f ; yes
+ addq 1, $r2 ; refill_count++
+ ba 3b ; Continue normal processing
+ move.d $r2, [$acr]
+
+7: ; refill_count > 4, error
+ move.d $acr, $r0 ; Save pointer to refill_count
+ clear.d [$r0] ; refill_count = 0
+
+ ;; rewind the short stack
+ movem [$sp], $r2 ; Restore r0-r2
+ addq 12, $sp
+ move.d [$sp+], $acr
+ move [$sp], $srs
+ addq 4, $sp
+ ;; Keep it simple (slow), save all the regs.
+ SAVE_ALL
+ jsr __flush_tlb_all
+ nop
+ ba ret_from_intr ; Return
+ nop
+
+8: ; PMD missing, let the mm subsystem fix it up.
+ movem [$sp], $r2 ; Restore r0-r2
+9: ; PTE missing, let the mm subsystem fix it up.
+ addq 12, $sp
+ move.d [$sp+], $acr
+ move [$sp], $srs
+ addq 4, $sp
+ SAVE_ALL
+ move \mmu, $srs
+ move.d $sp, $r11 ; regs
+ clear.d $r12 ; Not a protection fault
+ move.w PAGE_MASK, $acr
+ move $s3, $r10 ; rw_mm_cause
+ btstq 9, $r10 ; Check if write access
+ smi $r13
+ and.w PAGE_MASK, $r10 ; Get VPN (virtual address)
+ jsr do_page_fault
+ and.w $acr, $r10
+ ; Return
+ ba ret_from_intr
+ nop
+ .size \handler, . - \handler
+.endm
+
+ ; This is the MMU bus fault handlers.
+
+MMU_REFILL_HANDLER i_mmu_refill, 1
+MMU_BUS_FAULT_HANDLER i_mmu_invalid, 1, 0, 0
+MMU_BUS_FAULT_HANDLER i_mmu_access, 1, 0, 0
+MMU_BUS_FAULT_HANDLER i_mmu_execute, 1, 0, 1
+MMU_REFILL_HANDLER d_mmu_refill, 2
+MMU_BUS_FAULT_HANDLER d_mmu_invalid, 2, 0, 0
+MMU_BUS_FAULT_HANDLER d_mmu_access, 2, 0, 0
+MMU_BUS_FAULT_HANDLER d_mmu_write, 2, 1, 0
diff --git a/arch/cris/arch-v32/mm/tlb.c b/arch/cris/arch-v32/mm/tlb.c
new file mode 100644
index 000000000..c030d0206
--- /dev/null
+++ b/arch/cris/arch-v32/mm/tlb.c
@@ -0,0 +1,207 @@
+/*
+ * Low level TLB handling.
+ *
+ * Copyright (C) 2000-2003, Axis Communications AB.
+ *
+ * Authors: Bjorn Wesen <bjornw@axis.com>
+ * Tobias Anderberg <tobiasa@axis.com>, CRISv32 port.
+ */
+
+#include <asm/tlb.h>
+#include <asm/mmu_context.h>
+#include <arch/hwregs/asm/mmu_defs_asm.h>
+#include <arch/hwregs/supp_reg.h>
+
+#define UPDATE_TLB_SEL_IDX(val) \
+do { \
+ unsigned long tlb_sel; \
+ \
+ tlb_sel = REG_FIELD(mmu, rw_mm_tlb_sel, idx, val); \
+ SUPP_REG_WR(RW_MM_TLB_SEL, tlb_sel); \
+} while(0)
+
+#define UPDATE_TLB_HILO(tlb_hi, tlb_lo) \
+do { \
+ SUPP_REG_WR(RW_MM_TLB_HI, tlb_hi); \
+ SUPP_REG_WR(RW_MM_TLB_LO, tlb_lo); \
+} while(0)
+
+/*
+ * The TLB can host up to 256 different mm contexts at the same time. The running
+ * context is found in the PID register. Each TLB entry contains a page_id that
+ * has to match the PID register to give a hit. page_id_map keeps track of which
+ * mm's is assigned to which page_id's, making sure it's known when to
+ * invalidate TLB entries.
+ *
+ * The last page_id is never running, it is used as an invalid page_id so that
+ * it's possible to make TLB entries that will nerver match.
+ *
+ * Note; the flushes needs to be atomic otherwise an interrupt hander that uses
+ * vmalloc'ed memory might cause a TLB load in the middle of a flush.
+ */
+
+/* Flush all TLB entries. */
+void
+__flush_tlb_all(void)
+{
+ int i;
+ int mmu;
+ unsigned long flags;
+ unsigned long mmu_tlb_hi;
+ unsigned long mmu_tlb_sel;
+
+ /*
+ * Mask with 0xf so similar TLB entries aren't written in the same 4-way
+ * entry group.
+ */
+ local_irq_save(flags);
+
+ for (mmu = 1; mmu <= 2; mmu++) {
+ SUPP_BANK_SEL(mmu); /* Select the MMU */
+ for (i = 0; i < NUM_TLB_ENTRIES; i++) {
+ /* Store invalid entry */
+ mmu_tlb_sel = REG_FIELD(mmu, rw_mm_tlb_sel, idx, i);
+
+ mmu_tlb_hi = (REG_FIELD(mmu, rw_mm_tlb_hi, pid, INVALID_PAGEID)
+ | REG_FIELD(mmu, rw_mm_tlb_hi, vpn, i & 0xf));
+
+ SUPP_REG_WR(RW_MM_TLB_SEL, mmu_tlb_sel);
+ SUPP_REG_WR(RW_MM_TLB_HI, mmu_tlb_hi);
+ SUPP_REG_WR(RW_MM_TLB_LO, 0);
+ }
+ }
+
+ local_irq_restore(flags);
+}
+
+/* Flush an entire user address space. */
+void
+__flush_tlb_mm(struct mm_struct *mm)
+{
+ int i;
+ int mmu;
+ unsigned long flags;
+ unsigned long page_id;
+ unsigned long tlb_hi;
+ unsigned long mmu_tlb_hi;
+
+ page_id = mm->context.page_id;
+
+ if (page_id == NO_CONTEXT)
+ return;
+
+ /* Mark the TLB entries that match the page_id as invalid. */
+ local_irq_save(flags);
+
+ for (mmu = 1; mmu <= 2; mmu++) {
+ SUPP_BANK_SEL(mmu);
+ for (i = 0; i < NUM_TLB_ENTRIES; i++) {
+ UPDATE_TLB_SEL_IDX(i);
+
+ /* Get the page_id */
+ SUPP_REG_RD(RW_MM_TLB_HI, tlb_hi);
+
+ /* Check if the page_id match. */
+ if ((tlb_hi & 0xff) == page_id) {
+ mmu_tlb_hi = (REG_FIELD(mmu, rw_mm_tlb_hi, pid,
+ INVALID_PAGEID)
+ | REG_FIELD(mmu, rw_mm_tlb_hi, vpn,
+ i & 0xf));
+
+ UPDATE_TLB_HILO(mmu_tlb_hi, 0);
+ }
+ }
+ }
+
+ local_irq_restore(flags);
+}
+
+/* Invalidate a single page. */
+void
+__flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
+{
+ int i;
+ int mmu;
+ unsigned long page_id;
+ unsigned long flags;
+ unsigned long tlb_hi;
+ unsigned long mmu_tlb_hi;
+
+ page_id = vma->vm_mm->context.page_id;
+
+ if (page_id == NO_CONTEXT)
+ return;
+
+ addr &= PAGE_MASK;
+
+ /*
+ * Invalidate those TLB entries that match both the mm context and the
+ * requested virtual address.
+ */
+ local_irq_save(flags);
+
+ for (mmu = 1; mmu <= 2; mmu++) {
+ SUPP_BANK_SEL(mmu);
+ for (i = 0; i < NUM_TLB_ENTRIES; i++) {
+ UPDATE_TLB_SEL_IDX(i);
+ SUPP_REG_RD(RW_MM_TLB_HI, tlb_hi);
+
+ /* Check if page_id and address matches */
+ if (((tlb_hi & 0xff) == page_id) &&
+ ((tlb_hi & PAGE_MASK) == addr)) {
+ mmu_tlb_hi = REG_FIELD(mmu, rw_mm_tlb_hi, pid,
+ INVALID_PAGEID) | addr;
+
+ UPDATE_TLB_HILO(mmu_tlb_hi, 0);
+ }
+ }
+ }
+
+ local_irq_restore(flags);
+}
+
+/*
+ * Initialize the context related info for a new mm_struct
+ * instance.
+ */
+
+int
+init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+{
+ mm->context.page_id = NO_CONTEXT;
+ return 0;
+}
+
+static DEFINE_SPINLOCK(mmu_context_lock);
+
+/* Called in schedule() just before actually doing the switch_to. */
+void
+switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ if (prev != next) {
+ int cpu = smp_processor_id();
+
+ /* Make sure there is a MMU context. */
+ spin_lock(&mmu_context_lock);
+ get_mmu_context(next);
+ cpumask_set_cpu(cpu, mm_cpumask(next));
+ spin_unlock(&mmu_context_lock);
+
+ /*
+ * Remember the pgd for the fault handlers. Keep a separate
+ * copy of it because current and active_mm might be invalid
+ * at points where * there's still a need to derefer the pgd.
+ */
+ per_cpu(current_pgd, cpu) = next->pgd;
+
+ /* Switch context in the MMU. */
+ if (tsk && task_thread_info(tsk)) {
+ SPEC_REG_WR(SPEC_REG_PID, next->context.page_id |
+ task_thread_info(tsk)->tls);
+ } else {
+ SPEC_REG_WR(SPEC_REG_PID, next->context.page_id);
+ }
+ }
+}
+