diff options
Diffstat (limited to 'arch/mips/mm')
-rw-r--r-- | arch/mips/mm/c-r4k.c | 72 | ||||
-rw-r--r-- | arch/mips/mm/dma-default.c | 7 | ||||
-rw-r--r-- | arch/mips/mm/init.c | 14 | ||||
-rw-r--r-- | arch/mips/mm/page.c | 9 | ||||
-rw-r--r-- | arch/mips/mm/sc-mips.c | 1 | ||||
-rw-r--r-- | arch/mips/mm/tlb-r3k.c | 24 | ||||
-rw-r--r-- | arch/mips/mm/tlb-r4k.c | 56 | ||||
-rw-r--r-- | arch/mips/mm/tlb-r8k.c | 2 | ||||
-rw-r--r-- | arch/mips/mm/tlbex.c | 260 | ||||
-rw-r--r-- | arch/mips/mm/uasm-mips.c | 2 | ||||
-rw-r--r-- | arch/mips/mm/uasm.c | 3 |
11 files changed, 339 insertions, 111 deletions
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index caac3d747..ef7f925dd 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -77,6 +77,7 @@ static inline void r4k_on_each_cpu(void (*func) (void *info), void *info) */ static unsigned long icache_size __read_mostly; static unsigned long dcache_size __read_mostly; +static unsigned long vcache_size __read_mostly; static unsigned long scache_size __read_mostly; /* @@ -447,6 +448,11 @@ static inline void local_r4k___flush_cache_all(void * args) r4k_blast_scache(); break; + case CPU_BMIPS5000: + r4k_blast_scache(); + __sync(); + break; + default: r4k_blast_dcache(); r4k_blast_icache(); @@ -492,7 +498,14 @@ static inline void local_r4k_flush_cache_range(void * args) if (!(has_valid_asid(vma->vm_mm))) return; - r4k_blast_dcache(); + /* + * If dcache can alias, we must blast it since mapping is changing. + * If executable, we must ensure any dirty lines are written back far + * enough to be visible to icache. + */ + if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) + r4k_blast_dcache(); + /* If executable, blast stale lines from icache */ if (exec) r4k_blast_icache(); } @@ -502,7 +515,7 @@ static void r4k_flush_cache_range(struct vm_area_struct *vma, { int exec = vma->vm_flags & VM_EXEC; - if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) + if (cpu_has_dc_aliases || exec) r4k_on_each_cpu(local_r4k_flush_cache_range, vma); } @@ -1148,6 +1161,8 @@ static void probe_pcache(void) c->dcache.ways * c->dcache.linesz; c->dcache.waybit = 0; + if ((prid & PRID_REV_MASK) >= PRID_REV_LOONGSON3A_R2) + c->options |= MIPS_CPU_PREFETCH; break; case CPU_CAVIUM_OCTEON3: @@ -1278,6 +1293,8 @@ static void probe_pcache(void) case CPU_M5150: case CPU_QEMU_GENERIC: case CPU_I6400: + case CPU_P6600: + case CPU_M6250: if (!(read_c0_config7() & MIPS_CONF7_IAR) && (c->icache.waysize > PAGE_SIZE)) c->icache.flags |= MIPS_CACHE_ALIASES; @@ -1304,7 +1321,14 @@ static void probe_pcache(void) break; case CPU_ALCHEMY: + case CPU_I6400: + c->icache.flags |= MIPS_CACHE_IC_F_DC; + break; + + case CPU_BMIPS5000: c->icache.flags |= MIPS_CACHE_IC_F_DC; + /* Cache aliases are handled in hardware; allow HIGHMEM */ + c->dcache.flags &= ~MIPS_CACHE_ALIASES; break; case CPU_LOONGSON2: @@ -1328,6 +1352,31 @@ static void probe_pcache(void) c->dcache.linesz); } +static void probe_vcache(void) +{ + struct cpuinfo_mips *c = ¤t_cpu_data; + unsigned int config2, lsize; + + if (current_cpu_type() != CPU_LOONGSON3) + return; + + config2 = read_c0_config2(); + if ((lsize = ((config2 >> 20) & 15))) + c->vcache.linesz = 2 << lsize; + else + c->vcache.linesz = lsize; + + c->vcache.sets = 64 << ((config2 >> 24) & 15); + c->vcache.ways = 1 + ((config2 >> 16) & 15); + + vcache_size = c->vcache.sets * c->vcache.ways * c->vcache.linesz; + + c->vcache.waybit = 0; + + pr_info("Unified victim cache %ldkB %s, linesize %d bytes.\n", + vcache_size >> 10, way_string[c->vcache.ways], c->vcache.linesz); +} + /* * If you even _breathe_ on this function, look at the gcc output and make sure * it does not pop things on and off the stack for the cache sizing loop that @@ -1650,6 +1699,7 @@ void r4k_cache_init(void) struct cpuinfo_mips *c = ¤t_cpu_data; probe_pcache(); + probe_vcache(); setup_scache(); r4k_blast_dcache_page_setup(); @@ -1671,7 +1721,7 @@ void r4k_cache_init(void) * This code supports virtually indexed processors and will be * unnecessarily inefficient on physically indexed processors. */ - if (c->dcache.linesz) + if (c->dcache.linesz && cpu_has_dc_aliases) shm_align_mask = max_t( unsigned long, c->dcache.sets * c->dcache.linesz - 1, PAGE_SIZE - 1); @@ -1744,12 +1794,24 @@ void r4k_cache_init(void) flush_icache_range = (void *)b5k_instruction_hazard; local_flush_icache_range = (void *)b5k_instruction_hazard; - /* Cache aliases are handled in hardware; allow HIGHMEM */ - current_cpu_data.dcache.flags &= ~MIPS_CACHE_ALIASES; /* Optimization: an L2 flush implicitly flushes the L1 */ current_cpu_data.options |= MIPS_CPU_INCLUSIVE_CACHES; break; + case CPU_LOONGSON3: + /* Loongson-3 maintains cache coherency by hardware */ + __flush_cache_all = cache_noop; + __flush_cache_vmap = cache_noop; + __flush_cache_vunmap = cache_noop; + __flush_kernel_vmap_range = (void *)cache_noop; + flush_cache_mm = (void *)cache_noop; + flush_cache_page = (void *)cache_noop; + flush_cache_range = (void *)cache_noop; + flush_cache_sigtramp = (void *)cache_noop; + flush_icache_all = (void *)cache_noop; + flush_data_cache_page = (void *)cache_noop; + local_flush_data_cache_page = (void *)cache_noop; + break; } } diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index 730d394ce..cb557d28c 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c @@ -88,19 +88,20 @@ static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp) else #endif #if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA) - if (dev->coherent_dma_mask < DMA_BIT_MASK(32)) + if (dev == NULL || dev->coherent_dma_mask < DMA_BIT_MASK(32)) dma_flag = __GFP_DMA; else if (dev->coherent_dma_mask < DMA_BIT_MASK(64)) dma_flag = __GFP_DMA32; else #endif #if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA) - if (dev->coherent_dma_mask < DMA_BIT_MASK(64)) + if (dev == NULL || dev->coherent_dma_mask < DMA_BIT_MASK(64)) dma_flag = __GFP_DMA32; else #endif #if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32) - if (dev->coherent_dma_mask < DMA_BIT_MASK(sizeof(phys_addr_t) * 8)) + if (dev == NULL || + dev->coherent_dma_mask < DMA_BIT_MASK(sizeof(phys_addr_t) * 8)) dma_flag = __GFP_DMA; else #endif diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 7e5fa0938..9b58eb5fd 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -98,8 +98,10 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot) idx += in_interrupt() ? FIX_N_COLOURS : 0; vaddr = __fix_to_virt(FIX_CMAP_END - idx); pte = mk_pte(page, prot); -#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) +#if defined(CONFIG_XPA) entrylo = pte_to_entrylo(pte.pte_high); +#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) + entrylo = pte.pte_high; #else entrylo = pte_to_entrylo(pte_val(pte)); #endif @@ -110,9 +112,11 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot) write_c0_entrylo0(entrylo); write_c0_entrylo1(entrylo); #ifdef CONFIG_XPA - entrylo = (pte.pte_low & _PFNX_MASK); - writex_c0_entrylo0(entrylo); - writex_c0_entrylo1(entrylo); + if (cpu_has_xpa) { + entrylo = (pte.pte_low & _PFNX_MASK); + writex_c0_entrylo0(entrylo); + writex_c0_entrylo1(entrylo); + } #endif tlbidx = read_c0_wired(); write_c0_wired(tlbidx + 1); @@ -196,7 +200,7 @@ void copy_to_user_page(struct vm_area_struct *vma, if (cpu_has_dc_aliases) SetPageDcacheDirty(page); } - if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc) + if (vma->vm_flags & VM_EXEC) flush_cache_page(vma, vaddr, page_to_pfn(page)); } diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c index 885d73ffd..c41953ca6 100644 --- a/arch/mips/mm/page.c +++ b/arch/mips/mm/page.c @@ -188,6 +188,15 @@ static void set_prefetch_parameters(void) } break; + case CPU_LOONGSON3: + /* Loongson-3 only support the Pref_Load/Pref_Store. */ + pref_bias_clear_store = 128; + pref_bias_copy_load = 128; + pref_bias_copy_store = 128; + pref_src_mode = Pref_Load; + pref_dst_mode = Pref_Store; + break; + default: pref_bias_clear_store = 128; pref_bias_copy_load = 256; diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c index 91dec32c7..286a4d5a1 100644 --- a/arch/mips/mm/sc-mips.c +++ b/arch/mips/mm/sc-mips.c @@ -141,6 +141,7 @@ static inline int mips_sc_is_activated(struct cpuinfo_mips *c) case CPU_P5600: case CPU_BMIPS5000: case CPU_QEMU_GENERIC: + case CPU_P6600: if (config2 & (1 << 12)) return 0; } diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c index b4f366f7c..1290b9956 100644 --- a/arch/mips/mm/tlb-r3k.c +++ b/arch/mips/mm/tlb-r3k.c @@ -43,7 +43,7 @@ static void local_flush_tlb_from(int entry) { unsigned long old_ctx; - old_ctx = read_c0_entryhi() & ASID_MASK; + old_ctx = read_c0_entryhi() & cpu_asid_mask(¤t_cpu_data); write_c0_entrylo0(0); while (entry < current_cpu_data.tlbsize) { write_c0_index(entry << 8); @@ -81,6 +81,7 @@ void local_flush_tlb_mm(struct mm_struct *mm) void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { + unsigned long asid_mask = cpu_asid_mask(¤t_cpu_data); struct mm_struct *mm = vma->vm_mm; int cpu = smp_processor_id(); @@ -89,13 +90,13 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, #ifdef DEBUG_TLB printk("[tlbrange<%lu,0x%08lx,0x%08lx>]", - cpu_context(cpu, mm) & ASID_MASK, start, end); + cpu_context(cpu, mm) & asid_mask, start, end); #endif local_irq_save(flags); size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; if (size <= current_cpu_data.tlbsize) { - int oldpid = read_c0_entryhi() & ASID_MASK; - int newpid = cpu_context(cpu, mm) & ASID_MASK; + int oldpid = read_c0_entryhi() & asid_mask; + int newpid = cpu_context(cpu, mm) & asid_mask; start &= PAGE_MASK; end += PAGE_SIZE - 1; @@ -159,6 +160,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { + unsigned long asid_mask = cpu_asid_mask(¤t_cpu_data); int cpu = smp_processor_id(); if (cpu_context(cpu, vma->vm_mm) != 0) { @@ -168,10 +170,10 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) #ifdef DEBUG_TLB printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page); #endif - newpid = cpu_context(cpu, vma->vm_mm) & ASID_MASK; + newpid = cpu_context(cpu, vma->vm_mm) & asid_mask; page &= PAGE_MASK; local_irq_save(flags); - oldpid = read_c0_entryhi() & ASID_MASK; + oldpid = read_c0_entryhi() & asid_mask; write_c0_entryhi(page | newpid); BARRIER; tlb_probe(); @@ -190,6 +192,7 @@ finish: void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) { + unsigned long asid_mask = cpu_asid_mask(¤t_cpu_data); unsigned long flags; int idx, pid; @@ -199,10 +202,10 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) if (current->active_mm != vma->vm_mm) return; - pid = read_c0_entryhi() & ASID_MASK; + pid = read_c0_entryhi() & asid_mask; #ifdef DEBUG_TLB - if ((pid != (cpu_context(cpu, vma->vm_mm) & ASID_MASK)) || (cpu_context(cpu, vma->vm_mm) == 0)) { + if ((pid != (cpu_context(cpu, vma->vm_mm) & asid_mask)) || (cpu_context(cpu, vma->vm_mm) == 0)) { printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n", (cpu_context(cpu, vma->vm_mm)), pid); } @@ -228,6 +231,7 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, unsigned long entryhi, unsigned long pagemask) { + unsigned long asid_mask = cpu_asid_mask(¤t_cpu_data); unsigned long flags; unsigned long old_ctx; static unsigned long wired = 0; @@ -243,7 +247,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, local_irq_save(flags); /* Save old context and create impossible VPN2 value */ - old_ctx = read_c0_entryhi() & ASID_MASK; + old_ctx = read_c0_entryhi() & asid_mask; old_pagemask = read_c0_pagemask(); w = read_c0_wired(); write_c0_wired(w + 1); @@ -266,7 +270,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, #endif local_irq_save(flags); - old_ctx = read_c0_entryhi() & ASID_MASK; + old_ctx = read_c0_entryhi() & asid_mask; write_c0_entrylo0(entrylo0); write_c0_entryhi(entryhi); write_c0_index(wired); diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index c17d7627f..e8b335c16 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -28,25 +28,28 @@ extern void build_tlb_refill_handler(void); /* - * LOONGSON2/3 has a 4 entry itlb which is a subset of dtlb, - * unfortunately, itlb is not totally transparent to software. + * LOONGSON-2 has a 4 entry itlb which is a subset of jtlb, LOONGSON-3 has + * a 4 entry itlb and a 4 entry dtlb which are subsets of jtlb. Unfortunately, + * itlb/dtlb are not totally transparent to software. */ -static inline void flush_itlb(void) +static inline void flush_micro_tlb(void) { switch (current_cpu_type()) { case CPU_LOONGSON2: + write_c0_diag(LOONGSON_DIAG_ITLB); + break; case CPU_LOONGSON3: - write_c0_diag(4); + write_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB); break; default: break; } } -static inline void flush_itlb_vm(struct vm_area_struct *vma) +static inline void flush_micro_tlb_vm(struct vm_area_struct *vma) { if (vma->vm_flags & VM_EXEC) - flush_itlb(); + flush_micro_tlb(); } void local_flush_tlb_all(void) @@ -93,7 +96,7 @@ void local_flush_tlb_all(void) tlbw_use_hazard(); write_c0_entryhi(old_ctx); htw_start(); - flush_itlb(); + flush_micro_tlb(); local_irq_restore(flags); } EXPORT_SYMBOL(local_flush_tlb_all); @@ -159,7 +162,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, } else { drop_mmu_context(mm, cpu); } - flush_itlb(); + flush_micro_tlb(); local_irq_restore(flags); } } @@ -205,7 +208,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) } else { local_flush_tlb_all(); } - flush_itlb(); + flush_micro_tlb(); local_irq_restore(flags); } @@ -240,7 +243,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) finish: write_c0_entryhi(oldpid); htw_start(); - flush_itlb_vm(vma); + flush_micro_tlb_vm(vma); local_irq_restore(flags); } } @@ -274,7 +277,7 @@ void local_flush_tlb_one(unsigned long page) } write_c0_entryhi(oldpid); htw_start(); - flush_itlb(); + flush_micro_tlb(); local_irq_restore(flags); } @@ -301,7 +304,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) local_irq_save(flags); htw_stop(); - pid = read_c0_entryhi() & ASID_MASK; + pid = read_c0_entryhi() & cpu_asid_mask(¤t_cpu_data); address &= (PAGE_MASK << 1); write_c0_entryhi(address | pid); pgdp = pgd_offset(vma->vm_mm, address); @@ -336,10 +339,12 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) #ifdef CONFIG_XPA write_c0_entrylo0(pte_to_entrylo(ptep->pte_high)); - writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK); + if (cpu_has_xpa) + writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK); ptep++; write_c0_entrylo1(pte_to_entrylo(ptep->pte_high)); - writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK); + if (cpu_has_xpa) + writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK); #else write_c0_entrylo0(ptep->pte_high); ptep++; @@ -357,7 +362,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) } tlbw_use_hazard(); htw_start(); - flush_itlb_vm(vma); + flush_micro_tlb_vm(vma); local_irq_restore(flags); } @@ -400,19 +405,20 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, #ifdef CONFIG_TRANSPARENT_HUGEPAGE -int __init has_transparent_hugepage(void) +int has_transparent_hugepage(void) { - unsigned int mask; - unsigned long flags; + static unsigned int mask = -1; - local_irq_save(flags); - write_c0_pagemask(PM_HUGE_MASK); - back_to_back_c0_hazard(); - mask = read_c0_pagemask(); - write_c0_pagemask(PM_DEFAULT_MASK); - - local_irq_restore(flags); + if (mask == -1) { /* first call comes during __init */ + unsigned long flags; + local_irq_save(flags); + write_c0_pagemask(PM_HUGE_MASK); + back_to_back_c0_hazard(); + mask = read_c0_pagemask(); + write_c0_pagemask(PM_DEFAULT_MASK); + local_irq_restore(flags); + } return mask == PM_HUGE_MASK; } diff --git a/arch/mips/mm/tlb-r8k.c b/arch/mips/mm/tlb-r8k.c index 138a2ec7c..e86e2e55a 100644 --- a/arch/mips/mm/tlb-r8k.c +++ b/arch/mips/mm/tlb-r8k.c @@ -194,7 +194,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) if (current->active_mm != vma->vm_mm) return; - pid = read_c0_entryhi() & ASID_MASK; + pid = read_c0_entryhi() & cpu_asid_mask(¤t_cpu_data); local_irq_save(flags); address &= PAGE_MASK; diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 84c6e3fda..4004b659c 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -234,20 +234,16 @@ static void output_pgtable_bits_defines(void) pr_debug("\n"); pr_define("_PAGE_PRESENT_SHIFT %d\n", _PAGE_PRESENT_SHIFT); - pr_define("_PAGE_READ_SHIFT %d\n", _PAGE_READ_SHIFT); + pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT); pr_define("_PAGE_WRITE_SHIFT %d\n", _PAGE_WRITE_SHIFT); pr_define("_PAGE_ACCESSED_SHIFT %d\n", _PAGE_ACCESSED_SHIFT); pr_define("_PAGE_MODIFIED_SHIFT %d\n", _PAGE_MODIFIED_SHIFT); #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT); #endif -#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) - if (cpu_has_rixi) { #ifdef _PAGE_NO_EXEC_SHIFT + if (cpu_has_rixi) pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT); - pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT); -#endif - } #endif pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT); pr_define("_PAGE_VALID_SHIFT %d\n", _PAGE_VALID_SHIFT); @@ -284,7 +280,12 @@ static inline void dump_handler(const char *symbol, const u32 *handler, int coun #define C0_ENTRYLO1 3, 0 #define C0_CONTEXT 4, 0 #define C0_PAGEMASK 5, 0 +#define C0_PWBASE 5, 5 +#define C0_PWFIELD 5, 6 +#define C0_PWSIZE 5, 7 +#define C0_PWCTL 6, 6 #define C0_BADVADDR 8, 0 +#define C0_PGD 9, 7 #define C0_ENTRYHI 10, 0 #define C0_EPC 14, 0 #define C0_XCONTEXT 20, 0 @@ -630,6 +631,11 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l, static __maybe_unused void build_convert_pte_to_entrylo(u32 **p, unsigned int reg) { + if (_PAGE_GLOBAL_SHIFT == 0) { + /* pte_t is already in EntryLo format */ + return; + } + if (cpu_has_rixi && _PAGE_NO_EXEC) { if (fill_includes_sw_bits) { UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL)); @@ -808,7 +814,10 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, if (pgd_reg != -1) { /* pgd is in pgd_reg */ - UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg); + if (cpu_has_ldpte) + UASM_i_MFC0(p, ptr, C0_PWBASE); + else + UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg); } else { #if defined(CONFIG_MIPS_PGD_C0_CONTEXT) /* @@ -1007,39 +1016,40 @@ static void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) { - /* - * 64bit address support (36bit on a 32bit CPU) in a 32bit - * Kernel is a special case. Only a few CPUs use it. - */ - if (config_enabled(CONFIG_PHYS_ADDR_T_64BIT) && !cpu_has_64bits) { - int pte_off_even = sizeof(pte_t) / 2; - int pte_off_odd = pte_off_even + sizeof(pte_t); -#ifdef CONFIG_XPA - const int scratch = 1; /* Our extra working register */ + int pte_off_even = 0; + int pte_off_odd = sizeof(pte_t); - uasm_i_addu(p, scratch, 0, ptep); +#if defined(CONFIG_CPU_MIPS32) && defined(CONFIG_PHYS_ADDR_T_64BIT) + /* The low 32 bits of EntryLo is stored in pte_high */ + pte_off_even += offsetof(pte_t, pte_high); + pte_off_odd += offsetof(pte_t, pte_high); #endif + + if (config_enabled(CONFIG_XPA)) { uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */ - uasm_i_lw(p, ptep, pte_off_odd, ptep); /* odd pte */ UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); - UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); UASM_i_MTC0(p, tmp, C0_ENTRYLO0); - UASM_i_MTC0(p, ptep, C0_ENTRYLO1); -#ifdef CONFIG_XPA - uasm_i_lw(p, tmp, 0, scratch); - uasm_i_lw(p, ptep, sizeof(pte_t), scratch); - uasm_i_lui(p, scratch, 0xff); - uasm_i_ori(p, scratch, scratch, 0xffff); - uasm_i_and(p, tmp, scratch, tmp); - uasm_i_and(p, ptep, scratch, ptep); - uasm_i_mthc0(p, tmp, C0_ENTRYLO0); - uasm_i_mthc0(p, ptep, C0_ENTRYLO1); -#endif + + if (cpu_has_xpa && !mips_xpa_disabled) { + uasm_i_lw(p, tmp, 0, ptep); + uasm_i_ext(p, tmp, tmp, 0, 24); + uasm_i_mthc0(p, tmp, C0_ENTRYLO0); + } + + uasm_i_lw(p, tmp, pte_off_odd, ptep); /* odd pte */ + UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); + UASM_i_MTC0(p, tmp, C0_ENTRYLO1); + + if (cpu_has_xpa && !mips_xpa_disabled) { + uasm_i_lw(p, tmp, sizeof(pte_t), ptep); + uasm_i_ext(p, tmp, tmp, 0, 24); + uasm_i_mthc0(p, tmp, C0_ENTRYLO1); + } return; } - UASM_i_LW(p, tmp, 0, ptep); /* get even pte */ - UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ + UASM_i_LW(p, tmp, pte_off_even, ptep); /* get even pte */ + UASM_i_LW(p, ptep, pte_off_odd, ptep); /* get odd pte */ if (r45k_bvahwbug()) build_tlb_probe_entry(p); build_convert_pte_to_entrylo(p, tmp); @@ -1421,6 +1431,108 @@ static void build_r4000_tlb_refill_handler(void) dump_handler("r4000_tlb_refill", (u32 *)ebase, 64); } +static void setup_pw(void) +{ + unsigned long pgd_i, pgd_w; +#ifndef __PAGETABLE_PMD_FOLDED + unsigned long pmd_i, pmd_w; +#endif + unsigned long pt_i, pt_w; + unsigned long pte_i, pte_w; +#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT + unsigned long psn; + + psn = ilog2(_PAGE_HUGE); /* bit used to indicate huge page */ +#endif + pgd_i = PGDIR_SHIFT; /* 1st level PGD */ +#ifndef __PAGETABLE_PMD_FOLDED + pgd_w = PGDIR_SHIFT - PMD_SHIFT + PGD_ORDER; + + pmd_i = PMD_SHIFT; /* 2nd level PMD */ + pmd_w = PMD_SHIFT - PAGE_SHIFT; +#else + pgd_w = PGDIR_SHIFT - PAGE_SHIFT + PGD_ORDER; +#endif + + pt_i = PAGE_SHIFT; /* 3rd level PTE */ + pt_w = PAGE_SHIFT - 3; + + pte_i = ilog2(_PAGE_GLOBAL); + pte_w = 0; + +#ifndef __PAGETABLE_PMD_FOLDED + write_c0_pwfield(pgd_i << 24 | pmd_i << 12 | pt_i << 6 | pte_i); + write_c0_pwsize(1 << 30 | pgd_w << 24 | pmd_w << 12 | pt_w << 6 | pte_w); +#else + write_c0_pwfield(pgd_i << 24 | pt_i << 6 | pte_i); + write_c0_pwsize(1 << 30 | pgd_w << 24 | pt_w << 6 | pte_w); +#endif + +#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT + write_c0_pwctl(1 << 6 | psn); +#endif + write_c0_kpgd(swapper_pg_dir); + kscratch_used_mask |= (1 << 7); /* KScratch6 is used for KPGD */ +} + +static void build_loongson3_tlb_refill_handler(void) +{ + u32 *p = tlb_handler; + struct uasm_label *l = labels; + struct uasm_reloc *r = relocs; + + memset(labels, 0, sizeof(labels)); + memset(relocs, 0, sizeof(relocs)); + memset(tlb_handler, 0, sizeof(tlb_handler)); + + if (check_for_high_segbits) { + uasm_i_dmfc0(&p, K0, C0_BADVADDR); + uasm_i_dsrl_safe(&p, K1, K0, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); + uasm_il_beqz(&p, &r, K1, label_vmalloc); + uasm_i_nop(&p); + + uasm_il_bgez(&p, &r, K0, label_large_segbits_fault); + uasm_i_nop(&p); + uasm_l_vmalloc(&l, p); + } + + uasm_i_dmfc0(&p, K1, C0_PGD); + + uasm_i_lddir(&p, K0, K1, 3); /* global page dir */ +#ifndef __PAGETABLE_PMD_FOLDED + uasm_i_lddir(&p, K1, K0, 1); /* middle page dir */ +#endif + uasm_i_ldpte(&p, K1, 0); /* even */ + uasm_i_ldpte(&p, K1, 1); /* odd */ + uasm_i_tlbwr(&p); + + /* restore page mask */ + if (PM_DEFAULT_MASK >> 16) { + uasm_i_lui(&p, K0, PM_DEFAULT_MASK >> 16); + uasm_i_ori(&p, K0, K0, PM_DEFAULT_MASK & 0xffff); + uasm_i_mtc0(&p, K0, C0_PAGEMASK); + } else if (PM_DEFAULT_MASK) { + uasm_i_ori(&p, K0, 0, PM_DEFAULT_MASK); + uasm_i_mtc0(&p, K0, C0_PAGEMASK); + } else { + uasm_i_mtc0(&p, 0, C0_PAGEMASK); + } + + uasm_i_eret(&p); + + if (check_for_high_segbits) { + uasm_l_large_segbits_fault(&l, p); + UASM_i_LA(&p, K1, (unsigned long)tlb_do_page_fault_0); + uasm_i_jr(&p, K1); + uasm_i_nop(&p); + } + + uasm_resolve_relocs(relocs, labels); + memcpy((void *)(ebase + 0x80), tlb_handler, 0x80); + local_flush_icache_range(ebase + 0x80, ebase + 0x100); + dump_handler("loongson3_tlb_refill", (u32 *)(ebase + 0x80), 32); +} + extern u32 handle_tlbl[], handle_tlbl_end[]; extern u32 handle_tlbs[], handle_tlbs_end[]; extern u32 handle_tlbm[], handle_tlbm_end[]; @@ -1468,7 +1580,10 @@ static void build_setup_pgd(void) } else { /* PGD in c0_KScratch */ uasm_i_jr(&p, 31); - UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg); + if (cpu_has_ldpte) + UASM_i_MTC0(&p, a0, C0_PWBASE); + else + UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg); } #else #ifdef CONFIG_SMP @@ -1523,19 +1638,19 @@ iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr) static void iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, - unsigned int mode) + unsigned int mode, unsigned int scratch) { -#ifdef CONFIG_PHYS_ADDR_T_64BIT unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY); + unsigned int swmode = mode & ~hwmode; - if (!cpu_has_64bits) { - const int scratch = 1; /* Our extra working register */ - - uasm_i_lui(p, scratch, (mode >> 16)); + if (config_enabled(CONFIG_XPA) && !cpu_has_64bits) { + uasm_i_lui(p, scratch, swmode >> 16); uasm_i_or(p, pte, pte, scratch); - } else -#endif - uasm_i_ori(p, pte, pte, mode); + BUG_ON(swmode & 0xffff); + } else { + uasm_i_ori(p, pte, pte, mode); + } + #ifdef CONFIG_SMP # ifdef CONFIG_PHYS_ADDR_T_64BIT if (cpu_has_64bits) @@ -1554,6 +1669,7 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, /* no uasm_i_nop needed */ uasm_i_ll(p, pte, sizeof(pte_t) / 2, ptr); uasm_i_ori(p, pte, pte, hwmode); + BUG_ON(hwmode & ~0xffff); uasm_i_sc(p, pte, sizeof(pte_t) / 2, ptr); uasm_il_beqz(p, r, pte, label_smp_pgtable_change); /* no uasm_i_nop needed */ @@ -1575,6 +1691,7 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, if (!cpu_has_64bits) { uasm_i_lw(p, pte, sizeof(pte_t) / 2, ptr); uasm_i_ori(p, pte, pte, hwmode); + BUG_ON(hwmode & ~0xffff); uasm_i_sw(p, pte, sizeof(pte_t) / 2, ptr); uasm_i_lw(p, pte, 0, ptr); } @@ -1615,9 +1732,8 @@ build_pte_present(u32 **p, struct uasm_reloc **r, cur = t; } uasm_i_andi(p, t, cur, - (_PAGE_PRESENT | _PAGE_READ) >> _PAGE_PRESENT_SHIFT); - uasm_i_xori(p, t, t, - (_PAGE_PRESENT | _PAGE_READ) >> _PAGE_PRESENT_SHIFT); + (_PAGE_PRESENT | _PAGE_NO_READ) >> _PAGE_PRESENT_SHIFT); + uasm_i_xori(p, t, t, _PAGE_PRESENT >> _PAGE_PRESENT_SHIFT); uasm_il_bnez(p, r, t, lid); if (pte == t) /* You lose the SMP race :-(*/ @@ -1628,11 +1744,11 @@ build_pte_present(u32 **p, struct uasm_reloc **r, /* Make PTE valid, store result in PTR. */ static void build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte, - unsigned int ptr) + unsigned int ptr, unsigned int scratch) { unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED; - iPTE_SW(p, r, pte, ptr, mode); + iPTE_SW(p, r, pte, ptr, mode, scratch); } /* @@ -1668,12 +1784,12 @@ build_pte_writable(u32 **p, struct uasm_reloc **r, */ static void build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte, - unsigned int ptr) + unsigned int ptr, unsigned int scratch) { unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); - iPTE_SW(p, r, pte, ptr, mode); + iPTE_SW(p, r, pte, ptr, mode, scratch); } /* @@ -1778,7 +1894,7 @@ static void build_r3000_tlb_load_handler(void) build_r3000_tlbchange_handler_head(&p, K0, K1); build_pte_present(&p, &r, K0, K1, -1, label_nopage_tlbl); uasm_i_nop(&p); /* load delay */ - build_make_valid(&p, &r, K0, K1); + build_make_valid(&p, &r, K0, K1, -1); build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); uasm_l_nopage_tlbl(&l, p); @@ -1809,7 +1925,7 @@ static void build_r3000_tlb_store_handler(void) build_r3000_tlbchange_handler_head(&p, K0, K1); build_pte_writable(&p, &r, K0, K1, -1, label_nopage_tlbs); uasm_i_nop(&p); /* load delay */ - build_make_write(&p, &r, K0, K1); + build_make_write(&p, &r, K0, K1, -1); build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); uasm_l_nopage_tlbs(&l, p); @@ -1840,7 +1956,7 @@ static void build_r3000_tlb_modify_handler(void) build_r3000_tlbchange_handler_head(&p, K0, K1); build_pte_modifiable(&p, &r, K0, K1, -1, label_nopage_tlbm); uasm_i_nop(&p); /* load delay */ - build_make_write(&p, &r, K0, K1); + build_make_write(&p, &r, K0, K1, -1); build_r3000_pte_reload_tlbwi(&p, K0, K1); uasm_l_nopage_tlbm(&l, p); @@ -2008,7 +2124,7 @@ static void build_r4000_tlb_load_handler(void) } uasm_l_tlbl_goaround1(&l, p); } - build_make_valid(&p, &r, wr.r1, wr.r2); + build_make_valid(&p, &r, wr.r1, wr.r2, wr.r3); build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2); #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT @@ -2122,7 +2238,7 @@ static void build_r4000_tlb_store_handler(void) build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs); if (m4kc_tlbp_war()) build_tlb_probe_entry(&p); - build_make_write(&p, &r, wr.r1, wr.r2); + build_make_write(&p, &r, wr.r1, wr.r2, wr.r3); build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2); #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT @@ -2178,7 +2294,7 @@ static void build_r4000_tlb_modify_handler(void) if (m4kc_tlbp_war()) build_tlb_probe_entry(&p); /* Present and writable bits set, set accessed and dirty bits. */ - build_make_write(&p, &r, wr.r1, wr.r2); + build_make_write(&p, &r, wr.r1, wr.r2, wr.r3); build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2); #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT @@ -2245,8 +2361,9 @@ static void print_htw_config(void) (config & MIPS_PWFIELD_PTEI_MASK) >> MIPS_PWFIELD_PTEI_SHIFT); config = read_c0_pwsize(); - pr_debug("PWSize (0x%0*lx): GDW: 0x%02lx UDW: 0x%02lx MDW: 0x%02lx PTW: 0x%02lx PTEW: 0x%02lx\n", + pr_debug("PWSize (0x%0*lx): PS: 0x%lx GDW: 0x%02lx UDW: 0x%02lx MDW: 0x%02lx PTW: 0x%02lx PTEW: 0x%02lx\n", field, config, + (config & MIPS_PWSIZE_PS_MASK) >> MIPS_PWSIZE_PS_SHIFT, (config & MIPS_PWSIZE_GDW_MASK) >> MIPS_PWSIZE_GDW_SHIFT, (config & MIPS_PWSIZE_UDW_MASK) >> MIPS_PWSIZE_UDW_SHIFT, (config & MIPS_PWSIZE_MDW_MASK) >> MIPS_PWSIZE_MDW_SHIFT, @@ -2254,9 +2371,12 @@ static void print_htw_config(void) (config & MIPS_PWSIZE_PTEW_MASK) >> MIPS_PWSIZE_PTEW_SHIFT); pwctl = read_c0_pwctl(); - pr_debug("PWCtl (0x%x): PWEn: 0x%x DPH: 0x%x HugePg: 0x%x Psn: 0x%x\n", + pr_debug("PWCtl (0x%x): PWEn: 0x%x XK: 0x%x XS: 0x%x XU: 0x%x DPH: 0x%x HugePg: 0x%x Psn: 0x%x\n", pwctl, (pwctl & MIPS_PWCTL_PWEN_MASK) >> MIPS_PWCTL_PWEN_SHIFT, + (pwctl & MIPS_PWCTL_XK_MASK) >> MIPS_PWCTL_XK_SHIFT, + (pwctl & MIPS_PWCTL_XS_MASK) >> MIPS_PWCTL_XS_SHIFT, + (pwctl & MIPS_PWCTL_XU_MASK) >> MIPS_PWCTL_XU_SHIFT, (pwctl & MIPS_PWCTL_DPH_MASK) >> MIPS_PWCTL_DPH_SHIFT, (pwctl & MIPS_PWCTL_HUGEPG_MASK) >> MIPS_PWCTL_HUGEPG_SHIFT, (pwctl & MIPS_PWCTL_PSN_MASK) >> MIPS_PWCTL_PSN_SHIFT); @@ -2311,17 +2431,25 @@ static void config_htw_params(void) if (CONFIG_PGTABLE_LEVELS >= 3) pwsize |= ilog2(PTRS_PER_PMD) << MIPS_PWSIZE_MDW_SHIFT; - /* If XPA has been enabled, PTEs are 64-bit in size. */ - if (config_enabled(CONFIG_64BITS) || (read_c0_pagegrain() & PG_ELPA)) - pwsize |= 1; + /* Set pointer size to size of directory pointers */ + if (config_enabled(CONFIG_64BIT)) + pwsize |= MIPS_PWSIZE_PS_MASK; + /* PTEs may be multiple pointers long (e.g. with XPA) */ + pwsize |= ((PTE_T_LOG2 - PGD_T_LOG2) << MIPS_PWSIZE_PTEW_SHIFT) + & MIPS_PWSIZE_PTEW_MASK; write_c0_pwsize(pwsize); /* Make sure everything is set before we enable the HTW */ back_to_back_c0_hazard(); - /* Enable HTW and disable the rest of the pwctl fields */ + /* + * Enable HTW (and only for XUSeg on 64-bit), and disable the rest of + * the pwctl fields. + */ config = 1 << MIPS_PWCTL_PWEN_SHIFT; + if (config_enabled(CONFIG_64BIT)) + config |= MIPS_PWCTL_XU_MASK; write_c0_pwctl(config); pr_info("Hardware Page Table Walker enabled\n"); @@ -2394,6 +2522,9 @@ void build_tlb_refill_handler(void) */ static int run_once = 0; + if (config_enabled(CONFIG_XPA) && !cpu_has_rixi) + panic("Kernels supporting XPA currently require CPUs with RIXI"); + output_pgtable_bits_defines(); check_pabits(); @@ -2437,13 +2568,18 @@ void build_tlb_refill_handler(void) break; default: + if (cpu_has_ldpte) + setup_pw(); + if (!run_once) { scratch_reg = allocate_kscratch(); build_setup_pgd(); build_r4000_tlb_load_handler(); build_r4000_tlb_store_handler(); build_r4000_tlb_modify_handler(); - if (!cpu_has_local_ebase) + if (cpu_has_ldpte) + build_loongson3_tlb_refill_handler(); + else if (!cpu_has_local_ebase) build_r4000_tlb_refill_handler(); flush_tlb_handlers(); run_once++; diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c index b4a837893..9c2220a45 100644 --- a/arch/mips/mm/uasm-mips.c +++ b/arch/mips/mm/uasm-mips.c @@ -153,6 +153,8 @@ static struct insn insn_table[] = { { insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD }, { insn_yield, M(spec3_op, 0, 0, 0, 0, yield_op), RS | RD }, + { insn_ldpte, M(lwc2_op, 0, 0, 0, ldpte_op, mult_op), RS | RD }, + { insn_lddir, M(lwc2_op, 0, 0, 0, lddir_op, mult_op), RS | RT | RD }, { insn_invalid, 0, 0 } }; diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c index 319051c34..ad718debc 100644 --- a/arch/mips/mm/uasm.c +++ b/arch/mips/mm/uasm.c @@ -60,6 +60,7 @@ enum opcode { insn_sltiu, insn_sltu, insn_sra, insn_srl, insn_srlv, insn_subu, insn_sw, insn_sync, insn_syscall, insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_wait, insn_wsbh, insn_xor, insn_xori, insn_yield, + insn_lddir, insn_ldpte, }; struct insn { @@ -335,6 +336,8 @@ I_u1u2s3(_bbit0); I_u1u2s3(_bbit1); I_u3u1u2(_lwx) I_u3u1u2(_ldx) +I_u1u2(_ldpte) +I_u2u1u3(_lddir) #ifdef CONFIG_CPU_CAVIUM_OCTEON #include <asm/octeon/octeon.h> |