From b907a8622e39eecfc4b243f3be3ad26559d1faee Mon Sep 17 00:00:00 2001 From: André Fabian Silva Delgado Date: Mon, 27 Jun 2016 21:01:09 -0300 Subject: Linux-libre 4.6.3-gnu --- arch/sparc/mm/hugetlbpage.c | 33 ++++++++++++++++++++++++++++----- arch/sparc/mm/init_64.c | 22 +++++++--------------- arch/sparc/mm/tlb.c | 25 ++++++++++++++++++------- arch/sparc/mm/tsb.c | 32 +++++++++++++++++--------------- 4 files changed, 70 insertions(+), 42 deletions(-) (limited to 'arch/sparc/mm') diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index 4977800e9..ba52e6466 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -176,17 +176,31 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t entry) { int i; + pte_t orig[2]; + unsigned long nptes; if (!pte_present(*ptep) && pte_present(entry)) mm->context.huge_pte_count++; addr &= HPAGE_MASK; - for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { - set_pte_at(mm, addr, ptep, entry); + + nptes = 1 << HUGETLB_PAGE_ORDER; + orig[0] = *ptep; + orig[1] = *(ptep + nptes / 2); + for (i = 0; i < nptes; i++) { + *ptep = entry; ptep++; addr += PAGE_SIZE; pte_val(entry) += PAGE_SIZE; } + + /* Issue TLB flush at REAL_HPAGE_SIZE boundaries */ + addr -= REAL_HPAGE_SIZE; + ptep -= nptes / 2; + maybe_tlb_batch_add(mm, addr, ptep, orig[1], 0); + addr -= REAL_HPAGE_SIZE; + ptep -= nptes / 2; + maybe_tlb_batch_add(mm, addr, ptep, orig[0], 0); } pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, @@ -194,19 +208,28 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, { pte_t entry; int i; + unsigned long nptes; entry = *ptep; if (pte_present(entry)) mm->context.huge_pte_count--; addr &= HPAGE_MASK; - - for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { - pte_clear(mm, addr, ptep); + nptes = 1 << HUGETLB_PAGE_ORDER; + for (i = 0; i < nptes; i++) { + *ptep = __pte(0UL); addr += PAGE_SIZE; ptep++; } + /* Issue TLB flush at REAL_HPAGE_SIZE boundaries */ + addr -= REAL_HPAGE_SIZE; + ptep -= nptes / 2; + maybe_tlb_batch_add(mm, addr, ptep, entry, 0); + addr -= REAL_HPAGE_SIZE; + ptep -= nptes / 2; + maybe_tlb_batch_add(mm, addr, ptep, entry, 0); + return entry; } diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 09e838801..14bb0d5ed 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -324,18 +324,6 @@ static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_inde tsb_insert(tsb, tag, tte); } -#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) -static inline bool is_hugetlb_pte(pte_t pte) -{ - if ((tlb_type == hypervisor && - (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) || - (tlb_type != hypervisor && - (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) - return true; - return false; -} -#endif - void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { struct mm_struct *mm; @@ -2836,9 +2824,10 @@ void hugetlb_setup(struct pt_regs *regs) * the Data-TLB for huge pages. */ if (tlb_type == cheetah_plus) { + bool need_context_reload = false; unsigned long ctx; - spin_lock(&ctx_alloc_lock); + spin_lock_irq(&ctx_alloc_lock); ctx = mm->context.sparc64_ctx_val; ctx &= ~CTX_PGSZ_MASK; ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT; @@ -2857,9 +2846,12 @@ void hugetlb_setup(struct pt_regs *regs) * also executing in this address space. */ mm->context.sparc64_ctx_val = ctx; - on_each_cpu(context_reload, mm, 0); + need_context_reload = true; } - spin_unlock(&ctx_alloc_lock); + spin_unlock_irq(&ctx_alloc_lock); + + if (need_context_reload) + on_each_cpu(context_reload, mm, 0); } } #endif diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index 9df2190c0..f81cd9736 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c @@ -67,7 +67,7 @@ void arch_leave_lazy_mmu_mode(void) } static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, - bool exec) + bool exec, bool huge) { struct tlb_batch *tb = &get_cpu_var(tlb_batch); unsigned long nr; @@ -84,13 +84,21 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, } if (!tb->active) { - flush_tsb_user_page(mm, vaddr); + flush_tsb_user_page(mm, vaddr, huge); global_flush_tlb_page(mm, vaddr); goto out; } - if (nr == 0) + if (nr == 0) { tb->mm = mm; + tb->huge = huge; + } + + if (tb->huge != huge) { + flush_tlb_pending(); + tb->huge = huge; + nr = 0; + } tb->vaddrs[nr] = vaddr; tb->tlb_nr = ++nr; @@ -104,6 +112,8 @@ out: void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig, int fullmm) { + bool huge = is_hugetlb_pte(orig); + if (tlb_type != hypervisor && pte_dirty(orig)) { unsigned long paddr, pfn = pte_pfn(orig); @@ -129,7 +139,7 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, no_cache_flush: if (!fullmm) - tlb_batch_add_one(mm, vaddr, pte_exec(orig)); + tlb_batch_add_one(mm, vaddr, pte_exec(orig), huge); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE @@ -145,7 +155,7 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr, if (pte_val(*pte) & _PAGE_VALID) { bool exec = pte_exec(*pte); - tlb_batch_add_one(mm, vaddr, exec); + tlb_batch_add_one(mm, vaddr, exec, false); } pte++; vaddr += PAGE_SIZE; @@ -185,8 +195,9 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, pte_t orig_pte = __pte(pmd_val(orig)); bool exec = pte_exec(orig_pte); - tlb_batch_add_one(mm, addr, exec); - tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec); + tlb_batch_add_one(mm, addr, exec, true); + tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec, + true); } else { tlb_batch_pmd_scan(mm, addr, orig); } diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index a06576683..a0604a493 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c @@ -76,14 +76,15 @@ void flush_tsb_user(struct tlb_batch *tb) spin_lock_irqsave(&mm->context.lock, flags); - base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; - nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; - if (tlb_type == cheetah_plus || tlb_type == hypervisor) - base = __pa(base); - __flush_tsb_one(tb, PAGE_SHIFT, base, nentries); - + if (!tb->huge) { + base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; + nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; + if (tlb_type == cheetah_plus || tlb_type == hypervisor) + base = __pa(base); + __flush_tsb_one(tb, PAGE_SHIFT, base, nentries); + } #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) - if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { + if (tb->huge && mm->context.tsb_block[MM_TSB_HUGE].tsb) { base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; if (tlb_type == cheetah_plus || tlb_type == hypervisor) @@ -94,20 +95,21 @@ void flush_tsb_user(struct tlb_batch *tb) spin_unlock_irqrestore(&mm->context.lock, flags); } -void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr) +void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, bool huge) { unsigned long nentries, base, flags; spin_lock_irqsave(&mm->context.lock, flags); - base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; - nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; - if (tlb_type == cheetah_plus || tlb_type == hypervisor) - base = __pa(base); - __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries); - + if (!huge) { + base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; + nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; + if (tlb_type == cheetah_plus || tlb_type == hypervisor) + base = __pa(base); + __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries); + } #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) - if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { + if (huge && mm->context.tsb_block[MM_TSB_HUGE].tsb) { base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; if (tlb_type == cheetah_plus || tlb_type == hypervisor) -- cgit v1.2.3-54-g00ecf