diff options
author | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2016-03-25 03:53:42 -0300 |
---|---|---|
committer | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2016-03-25 03:53:42 -0300 |
commit | 03dd4cb26d967f9588437b0fc9cc0e8353322bb7 (patch) | |
tree | fa581f6dc1c0596391690d1f67eceef3af8246dc /arch/powerpc/mm/pgtable_64.c | |
parent | d4e493caf788ef44982e131ff9c786546904d934 (diff) |
Linux-libre 4.5-gnu
Diffstat (limited to 'arch/powerpc/mm/pgtable_64.c')
-rw-r--r-- | arch/powerpc/mm/pgtable_64.c | 107 |
1 files changed, 42 insertions, 65 deletions
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index e92cb2146..cdf2123d4 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -359,7 +359,7 @@ struct page *pud_page(pud_t pud) struct page *pmd_page(pmd_t pmd) { if (pmd_trans_huge(pmd) || pmd_huge(pmd)) - return pfn_to_page(pmd_pfn(pmd)); + return pte_page(pmd_pte(pmd)); return virt_to_page(pmd_page_vaddr(pmd)); } @@ -604,55 +604,6 @@ int pmdp_clear_flush_young(struct vm_area_struct *vma, } /* - * We mark the pmd splitting and invalidate all the hpte - * entries for this hugepage. - */ -void pmdp_splitting_flush(struct vm_area_struct *vma, - unsigned long address, pmd_t *pmdp) -{ - unsigned long old, tmp; - - VM_BUG_ON(address & ~HPAGE_PMD_MASK); - -#ifdef CONFIG_DEBUG_VM - WARN_ON(!pmd_trans_huge(*pmdp)); - assert_spin_locked(&vma->vm_mm->page_table_lock); -#endif - -#ifdef PTE_ATOMIC_UPDATES - - __asm__ __volatile__( - "1: ldarx %0,0,%3\n\ - andi. %1,%0,%6\n\ - bne- 1b \n\ - ori %1,%0,%4 \n\ - stdcx. %1,0,%3 \n\ - bne- 1b" - : "=&r" (old), "=&r" (tmp), "=m" (*pmdp) - : "r" (pmdp), "i" (_PAGE_SPLITTING), "m" (*pmdp), "i" (_PAGE_BUSY) - : "cc" ); -#else - old = pmd_val(*pmdp); - *pmdp = __pmd(old | _PAGE_SPLITTING); -#endif - /* - * If we didn't had the splitting flag set, go and flush the - * HPTE entries. - */ - trace_hugepage_splitting(address, old); - if (!(old & _PAGE_SPLITTING)) { - /* We need to flush the hpte */ - if (old & _PAGE_HASHPTE) - hpte_do_hugepage_flush(vma->vm_mm, address, pmdp, old); - } - /* - * This ensures that generic code that rely on IRQ disabling - * to prevent a parallel THP split work as expected. - */ - kick_all_cpus_sync(); -} - -/* * We want to put the pgtable in pmd and use pgtable for tracking * the base page size hptes */ @@ -695,6 +646,28 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) return pgtable; } +void pmdp_huge_split_prepare(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp) +{ + VM_BUG_ON(address & ~HPAGE_PMD_MASK); + VM_BUG_ON(REGION_ID(address) != USER_REGION_ID); + + /* + * We can't mark the pmd none here, because that will cause a race + * against exit_mmap. We need to continue mark pmd TRANS HUGE, while + * we spilt, but at the same time we wan't rest of the ppc64 code + * not to insert hash pte on this, because we will be modifying + * the deposited pgtable in the caller of this function. Hence + * clear the _PAGE_USER so that we move the fault handling to + * higher level function and that will serialize against ptl. + * We need to flush existing hash pte entries here even though, + * the translation is still valid, because we will withdraw + * pgtable_t after this. + */ + pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_USER, 0); +} + + /* * set a new huge pmd. We should not be called for updating * an existing pmd entry. That should go via pmd_hugepage_update. @@ -712,10 +685,20 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd)); } +/* + * We use this to invalidate a pmdp entry before switching from a + * hugepte to regular pmd entry. + */ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0); + + /* + * This ensures that generic code that rely on IRQ disabling + * to prevent a parallel THP split work as expected. + */ + kick_all_cpus_sync(); } /* @@ -759,22 +742,15 @@ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot) { - pmd_val(pmd) |= pgprot_val(pgprot); - return pmd; + return __pmd(pmd_val(pmd) | pgprot_val(pgprot)); } pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot) { - pmd_t pmd; - /* - * For a valid pte, we would have _PAGE_PRESENT always - * set. We use this to check THP page at pmd level. - * leaf pte for huge page, bottom two bits != 00 - */ - pmd_val(pmd) = pfn << PTE_RPN_SHIFT; - pmd_val(pmd) |= _PAGE_THP_HUGE; - pmd = pmd_set_protbits(pmd, pgprot); - return pmd; + unsigned long pmdv; + + pmdv = pfn << PTE_RPN_SHIFT; + return pmd_set_protbits(__pmd(pmdv), pgprot); } pmd_t mk_pmd(struct page *page, pgprot_t pgprot) @@ -784,10 +760,11 @@ pmd_t mk_pmd(struct page *page, pgprot_t pgprot) pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) { + unsigned long pmdv; - pmd_val(pmd) &= _HPAGE_CHG_MASK; - pmd = pmd_set_protbits(pmd, newprot); - return pmd; + pmdv = pmd_val(pmd); + pmdv &= _HPAGE_CHG_MASK; + return pmd_set_protbits(__pmd(pmdv), newprot); } /* |