summaryrefslogtreecommitdiff
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c74
1 files changed, 36 insertions, 38 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index d9ec1a5b3..87e11d8ad 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1022,7 +1022,9 @@ static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
((node = hstate_next_node_to_free(hs, mask)) || 1); \
nr_nodes--)
-#if defined(CONFIG_X86_64) && ((defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA))
+#if (defined(CONFIG_X86_64) || defined(CONFIG_S390)) && \
+ ((defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || \
+ defined(CONFIG_CMA))
static void destroy_compound_gigantic_page(struct page *page,
unsigned int order)
{
@@ -1446,6 +1448,7 @@ static void dissolve_free_huge_page(struct page *page)
list_del(&page->lru);
h->free_huge_pages--;
h->free_huge_pages_node[nid]--;
+ h->max_huge_pages--;
update_and_free_page(h, page);
}
spin_unlock(&hugetlb_lock);
@@ -3181,7 +3184,6 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
unsigned long start, unsigned long end,
struct page *ref_page)
{
- int force_flush = 0;
struct mm_struct *mm = vma->vm_mm;
unsigned long address;
pte_t *ptep;
@@ -3200,19 +3202,22 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
tlb_start_vma(tlb, vma);
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
address = start;
-again:
for (; address < end; address += sz) {
ptep = huge_pte_offset(mm, address);
if (!ptep)
continue;
ptl = huge_pte_lock(h, mm, ptep);
- if (huge_pmd_unshare(mm, &address, ptep))
- goto unlock;
+ if (huge_pmd_unshare(mm, &address, ptep)) {
+ spin_unlock(ptl);
+ continue;
+ }
pte = huge_ptep_get(ptep);
- if (huge_pte_none(pte))
- goto unlock;
+ if (huge_pte_none(pte)) {
+ spin_unlock(ptl);
+ continue;
+ }
/*
* Migrating hugepage or HWPoisoned hugepage is already
@@ -3220,7 +3225,8 @@ again:
*/
if (unlikely(!pte_present(pte))) {
huge_pte_clear(mm, address, ptep);
- goto unlock;
+ spin_unlock(ptl);
+ continue;
}
page = pte_page(pte);
@@ -3230,9 +3236,10 @@ again:
* are about to unmap is the actual page of interest.
*/
if (ref_page) {
- if (page != ref_page)
- goto unlock;
-
+ if (page != ref_page) {
+ spin_unlock(ptl);
+ continue;
+ }
/*
* Mark the VMA as having unmapped its page so that
* future faults in this VMA will fail rather than
@@ -3248,30 +3255,14 @@ again:
hugetlb_count_sub(pages_per_huge_page(h), mm);
page_remove_rmap(page, true);
- force_flush = !__tlb_remove_page(tlb, page);
- if (force_flush) {
- address += sz;
- spin_unlock(ptl);
- break;
- }
- /* Bail out after unmapping reference page if supplied */
- if (ref_page) {
- spin_unlock(ptl);
- break;
- }
-unlock:
+
spin_unlock(ptl);
- }
- /*
- * mmu_gather ran out of room to batch pages, we break out of
- * the PTE lock to avoid doing the potential expensive TLB invalidate
- * and page-free while holding it.
- */
- if (force_flush) {
- force_flush = 0;
- tlb_flush_mmu(tlb);
- if (address < end && !ref_page)
- goto again;
+ tlb_remove_page_size(tlb, page, huge_page_size(h));
+ /*
+ * Bail out after unmapping reference page if supplied
+ */
+ if (ref_page)
+ break;
}
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
tlb_end_vma(tlb, vma);
@@ -3330,7 +3321,7 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
address = address & huge_page_mask(h);
pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
vma->vm_pgoff;
- mapping = file_inode(vma->vm_file)->i_mapping;
+ mapping = vma->vm_file->f_mapping;
/*
* Take the mapping lock for the duration of the table walk. As
@@ -3952,6 +3943,14 @@ same_page:
return i ? i : -EFAULT;
}
+#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
+/*
+ * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
+ * implement this.
+ */
+#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
+#endif
+
unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
unsigned long address, unsigned long end, pgprot_t newprot)
{
@@ -4012,7 +4011,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
* once we release i_mmap_rwsem, another task can do the final put_page
* and that page table be reused and filled with junk.
*/
- flush_tlb_range(vma, start, end);
+ flush_hugetlb_tlb_range(vma, start, end);
mmu_notifier_invalidate_range(mm, start, end);
i_mmap_unlock_write(vma->vm_file->f_mapping);
mmu_notifier_invalidate_range_end(mm, start, end);
@@ -4320,7 +4319,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
pte = (pte_t *)pmd_alloc(mm, pud, addr);
}
}
- BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
+ BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte));
return pte;
}
@@ -4405,7 +4404,6 @@ follow_huge_pud(struct mm_struct *mm, unsigned long address,
/*
* This function is called from memory failure code.
- * Assume the caller holds page lock of the head page.
*/
int dequeue_hwpoisoned_huge_page(struct page *hpage)
{