diff options
author | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2015-12-15 14:52:16 -0300 |
---|---|---|
committer | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2015-12-15 14:52:16 -0300 |
commit | 8d91c1e411f55d7ea91b1183a2e9f8088fb4d5be (patch) | |
tree | e9891aa6c295060d065adffd610c4f49ecf884f3 /mm | |
parent | a71852147516bc1cb5b0b3cbd13639bfd4022dc8 (diff) |
Linux-libre 4.3.2-gnu
Diffstat (limited to 'mm')
-rw-r--r-- | mm/Kconfig | 40 | ||||
-rw-r--r-- | mm/Makefile | 3 | ||||
-rw-r--r-- | mm/backing-dev.c | 48 | ||||
-rw-r--r-- | mm/bootmem.c | 7 | ||||
-rw-r--r-- | mm/cma.c | 4 | ||||
-rw-r--r-- | mm/compaction.c | 175 | ||||
-rw-r--r-- | mm/debug.c | 4 | ||||
-rw-r--r-- | mm/dmapool.c | 16 | ||||
-rw-r--r-- | mm/early_ioremap.c | 35 | ||||
-rw-r--r-- | mm/filemap.c | 11 | ||||
-rw-r--r-- | mm/frame_vector.c | 230 | ||||
-rw-r--r-- | mm/gup.c | 60 | ||||
-rw-r--r-- | mm/huge_memory.c | 243 | ||||
-rw-r--r-- | mm/hugetlb.c | 448 | ||||
-rw-r--r-- | mm/hwpoison-inject.c | 7 | ||||
-rw-r--r-- | mm/internal.h | 16 | ||||
-rw-r--r-- | mm/kasan/Makefile | 2 | ||||
-rw-r--r-- | mm/kasan/kasan.c | 3 | ||||
-rw-r--r-- | mm/kasan/kasan_init.c | 152 | ||||
-rw-r--r-- | mm/kmemleak.c | 24 | ||||
-rw-r--r-- | mm/list_lru.c | 4 | ||||
-rw-r--r-- | mm/maccess.c | 41 | ||||
-rw-r--r-- | mm/madvise.c | 11 | ||||
-rw-r--r-- | mm/memblock.c | 34 | ||||
-rw-r--r-- | mm/memcontrol.c | 514 | ||||
-rw-r--r-- | mm/memory-failure.c | 117 | ||||
-rw-r--r-- | mm/memory.c | 72 | ||||
-rw-r--r-- | mm/memory_hotplug.c | 14 | ||||
-rw-r--r-- | mm/mempolicy.c | 11 | ||||
-rw-r--r-- | mm/mempool.c | 3 | ||||
-rw-r--r-- | mm/memtest.c | 27 | ||||
-rw-r--r-- | mm/migrate.c | 37 | ||||
-rw-r--r-- | mm/mlock.c | 3 | ||||
-rw-r--r-- | mm/mmap.c | 130 | ||||
-rw-r--r-- | mm/mmu_notifier.c | 17 | ||||
-rw-r--r-- | mm/mprotect.c | 3 | ||||
-rw-r--r-- | mm/mremap.c | 50 | ||||
-rw-r--r-- | mm/nommu.c | 31 | ||||
-rw-r--r-- | mm/oom_kill.c | 142 | ||||
-rw-r--r-- | mm/page-writeback.c | 60 | ||||
-rw-r--r-- | mm/page_alloc.c | 83 | ||||
-rw-r--r-- | mm/page_ext.c | 4 | ||||
-rw-r--r-- | mm/page_idle.c | 232 | ||||
-rw-r--r-- | mm/page_io.c | 20 | ||||
-rw-r--r-- | mm/page_isolation.c | 35 | ||||
-rw-r--r-- | mm/percpu.c | 5 | ||||
-rw-r--r-- | mm/readahead.c | 8 | ||||
-rw-r--r-- | mm/rmap.c | 124 | ||||
-rw-r--r-- | mm/shmem.c | 16 | ||||
-rw-r--r-- | mm/slab.c | 15 | ||||
-rw-r--r-- | mm/slab.h | 11 | ||||
-rw-r--r-- | mm/slab_common.c | 28 | ||||
-rw-r--r-- | mm/slob.c | 17 | ||||
-rw-r--r-- | mm/slub.c | 206 | ||||
-rw-r--r-- | mm/swap.c | 3 | ||||
-rw-r--r-- | mm/swap_state.c | 37 | ||||
-rw-r--r-- | mm/swapfile.c | 67 | ||||
-rw-r--r-- | mm/userfaultfd.c | 308 | ||||
-rw-r--r-- | mm/vmscan.c | 42 | ||||
-rw-r--r-- | mm/vmstat.c | 7 | ||||
-rw-r--r-- | mm/zbud.c | 10 | ||||
-rw-r--r-- | mm/zpool.c | 51 | ||||
-rw-r--r-- | mm/zsmalloc.c | 235 | ||||
-rw-r--r-- | mm/zswap.c | 761 |
64 files changed, 3639 insertions, 1535 deletions
diff --git a/mm/Kconfig b/mm/Kconfig index 9156dd388..3f2ecb5d9 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -299,15 +299,9 @@ config BOUNCE # On the 'tile' arch, USB OHCI needs the bounce pool since tilegx will often # have more than 4GB of memory, but we don't currently use the IOTLB to present # a 32-bit address to OHCI. So we need to use a bounce pool instead. -# -# We also use the bounce pool to provide stable page writes for jbd. jbd -# initiates buffer writeback without locking the page or setting PG_writeback, -# and fixing that behavior (a second time; jbd2 doesn't have this problem) is -# a major rework effort. Instead, use the bounce buffer to snapshot pages -# (until jbd goes away). The only jbd user is ext3. config NEED_BOUNCE_POOL bool - default y if (TILE && USB_OHCI_HCD) || (BLK_DEV_INTEGRITY && JBD) + default y if TILE && USB_OHCI_HCD config NR_QUICK int @@ -680,3 +674,35 @@ config DEFERRED_STRUCT_PAGE_INIT when kswapd starts. This has a potential performance impact on processes running early in the lifetime of the systemm until kswapd finishes the initialisation. + +config IDLE_PAGE_TRACKING + bool "Enable idle page tracking" + depends on SYSFS && MMU + select PAGE_EXTENSION if !64BIT + help + This feature allows to estimate the amount of user pages that have + not been touched during a given period of time. This information can + be useful to tune memory cgroup limits and/or for job placement + within a compute cluster. + + See Documentation/vm/idle_page_tracking.txt for more details. + +config ZONE_DEVICE + bool "Device memory (pmem, etc...) hotplug support" if EXPERT + default !ZONE_DMA + depends on !ZONE_DMA + depends on MEMORY_HOTPLUG + depends on MEMORY_HOTREMOVE + depends on X86_64 #arch_add_memory() comprehends device memory + + help + Device memory hotplug support allows for establishing pmem, + or other device driver discovered memory regions, in the + memmap. This allows pfn_to_page() lookups of otherwise + "device-physical" addresses which is needed for using a DAX + mapping in an O_DIRECT operation, among other things. + + If FS_DAX is enabled, then say Y. + +config FRAME_VECTOR + bool diff --git a/mm/Makefile b/mm/Makefile index 6218349e5..f98e6e279 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -79,3 +79,6 @@ obj-$(CONFIG_CMA) += cma.o obj-$(CONFIG_MEMORY_BALLOON) += balloon_compaction.o obj-$(CONFIG_PAGE_EXTENSION) += page_ext.o obj-$(CONFIG_CMA_DEBUGFS) += cma_debug.o +obj-$(CONFIG_USERFAULTFD) += userfaultfd.o +obj-$(CONFIG_IDLE_PAGE_TRACKING) += page_idle.o +obj-$(CONFIG_FRAME_VECTOR) += frame_vector.o diff --git a/mm/backing-dev.c b/mm/backing-dev.c index dac5bf593..619984fc0 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -55,13 +55,13 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v) nr_dirty = nr_io = nr_more_io = nr_dirty_time = 0; spin_lock(&wb->list_lock); - list_for_each_entry(inode, &wb->b_dirty, i_wb_list) + list_for_each_entry(inode, &wb->b_dirty, i_io_list) nr_dirty++; - list_for_each_entry(inode, &wb->b_io, i_wb_list) + list_for_each_entry(inode, &wb->b_io, i_io_list) nr_io++; - list_for_each_entry(inode, &wb->b_more_io, i_wb_list) + list_for_each_entry(inode, &wb->b_more_io, i_io_list) nr_more_io++; - list_for_each_entry(inode, &wb->b_dirty_time, i_wb_list) + list_for_each_entry(inode, &wb->b_dirty_time, i_io_list) if (inode->i_state & I_DIRTY_TIME) nr_dirty_time++; spin_unlock(&wb->list_lock); @@ -480,6 +480,10 @@ static void cgwb_release_workfn(struct work_struct *work) release_work); struct backing_dev_info *bdi = wb->bdi; + spin_lock_irq(&cgwb_lock); + list_del_rcu(&wb->bdi_node); + spin_unlock_irq(&cgwb_lock); + wb_shutdown(wb); css_put(wb->memcg_css); @@ -523,7 +527,7 @@ static int cgwb_create(struct backing_dev_info *bdi, int ret = 0; memcg = mem_cgroup_from_css(memcg_css); - blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &blkio_cgrp_subsys); + blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys); blkcg = css_to_blkcg(blkcg_css); memcg_cgwb_list = mem_cgroup_cgwb_list(memcg); blkcg_cgwb_list = &blkcg->cgwb_list; @@ -575,6 +579,7 @@ static int cgwb_create(struct backing_dev_info *bdi, ret = radix_tree_insert(&bdi->cgwb_tree, memcg_css->id, wb); if (!ret) { atomic_inc(&bdi->usage_cnt); + list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list); list_add(&wb->memcg_node, memcg_cgwb_list); list_add(&wb->blkcg_node, blkcg_cgwb_list); css_get(memcg_css); @@ -645,7 +650,7 @@ struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi, /* see whether the blkcg association has changed */ blkcg_css = cgroup_get_e_css(memcg_css->cgroup, - &blkio_cgrp_subsys); + &io_cgrp_subsys); if (unlikely(wb->blkcg_css != blkcg_css || !wb_tryget(wb))) wb = NULL; @@ -676,7 +681,7 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi) static void cgwb_bdi_destroy(struct backing_dev_info *bdi) { struct radix_tree_iter iter; - struct bdi_writeback_congested *congested, *congested_n; + struct rb_node *rbn; void **slot; WARN_ON(test_bit(WB_registered, &bdi->wb.state)); @@ -686,9 +691,11 @@ static void cgwb_bdi_destroy(struct backing_dev_info *bdi) radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0) cgwb_kill(*slot); - rbtree_postorder_for_each_entry_safe(congested, congested_n, - &bdi->cgwb_congested_tree, rb_node) { - rb_erase(&congested->rb_node, &bdi->cgwb_congested_tree); + while ((rbn = rb_first(&bdi->cgwb_congested_tree))) { + struct bdi_writeback_congested *congested = + rb_entry(rbn, struct bdi_writeback_congested, rb_node); + + rb_erase(rbn, &bdi->cgwb_congested_tree); congested->bdi = NULL; /* mark @congested unlinked */ } @@ -764,15 +771,22 @@ static void cgwb_bdi_destroy(struct backing_dev_info *bdi) { } int bdi_init(struct backing_dev_info *bdi) { + int ret; + bdi->dev = NULL; bdi->min_ratio = 0; bdi->max_ratio = 100; bdi->max_prop_frac = FPROP_FRAC_BASE; INIT_LIST_HEAD(&bdi->bdi_list); + INIT_LIST_HEAD(&bdi->wb_list); init_waitqueue_head(&bdi->wb_waitq); - return cgwb_bdi_init(bdi); + ret = cgwb_bdi_init(bdi); + + list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list); + + return ret; } EXPORT_SYMBOL(bdi_init); @@ -823,7 +837,7 @@ static void bdi_remove_from_list(struct backing_dev_info *bdi) synchronize_rcu_expedited(); } -void bdi_destroy(struct backing_dev_info *bdi) +void bdi_unregister(struct backing_dev_info *bdi) { /* make sure nobody finds us on the bdi_list anymore */ bdi_remove_from_list(bdi); @@ -835,9 +849,19 @@ void bdi_destroy(struct backing_dev_info *bdi) device_unregister(bdi->dev); bdi->dev = NULL; } +} +void bdi_exit(struct backing_dev_info *bdi) +{ + WARN_ON_ONCE(bdi->dev); wb_exit(&bdi->wb); } + +void bdi_destroy(struct backing_dev_info *bdi) +{ + bdi_unregister(bdi); + bdi_exit(bdi); +} EXPORT_SYMBOL(bdi_destroy); /* diff --git a/mm/bootmem.c b/mm/bootmem.c index a23dd1934..3b6380784 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c @@ -236,6 +236,7 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) count += pages; while (pages--) __free_pages_bootmem(page++, cur++, 0); + bdata->node_bootmem_map = NULL; bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count); @@ -294,6 +295,9 @@ static void __init __free(bootmem_data_t *bdata, sidx + bdata->node_min_pfn, eidx + bdata->node_min_pfn); + if (WARN_ON(bdata->node_bootmem_map == NULL)) + return; + if (bdata->hint_idx > sidx) bdata->hint_idx = sidx; @@ -314,6 +318,9 @@ static int __init __reserve(bootmem_data_t *bdata, unsigned long sidx, eidx + bdata->node_min_pfn, flags); + if (WARN_ON(bdata->node_bootmem_map == NULL)) + return 0; + for (idx = sidx; idx < eidx; idx++) if (test_and_set_bit(idx, bdata->node_bootmem_map)) { if (exclusive) { @@ -361,7 +361,7 @@ err: * This function allocates part of contiguous memory on specific * contiguous memory area. */ -struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align) +struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align) { unsigned long mask, offset, pfn, start = 0; unsigned long bitmap_maxno, bitmap_no, bitmap_count; @@ -371,7 +371,7 @@ struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align) if (!cma || !cma->count) return NULL; - pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma, + pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma, count, align); if (!count) diff --git a/mm/compaction.c b/mm/compaction.c index 018f08da9..c5c627aae 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -207,6 +207,13 @@ static inline bool isolation_suitable(struct compact_control *cc, return !get_pageblock_skip(page); } +static void reset_cached_positions(struct zone *zone) +{ + zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; + zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; + zone->compact_cached_free_pfn = zone_end_pfn(zone); +} + /* * This function is called to clear all cached information on pageblocks that * should be skipped for page isolation when the migrate and free page scanner @@ -218,9 +225,6 @@ static void __reset_isolation_suitable(struct zone *zone) unsigned long end_pfn = zone_end_pfn(zone); unsigned long pfn; - zone->compact_cached_migrate_pfn[0] = start_pfn; - zone->compact_cached_migrate_pfn[1] = start_pfn; - zone->compact_cached_free_pfn = end_pfn; zone->compact_blockskip_flush = false; /* Walk the zone and mark every pageblock as suitable for isolation */ @@ -238,6 +242,8 @@ static void __reset_isolation_suitable(struct zone *zone) clear_pageblock_skip(page); } + + reset_cached_positions(zone); } void reset_isolation_suitable(pg_data_t *pgdat) @@ -431,6 +437,24 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, if (!valid_page) valid_page = page; + + /* + * For compound pages such as THP and hugetlbfs, we can save + * potentially a lot of iterations if we skip them at once. + * The check is racy, but we can consider only valid values + * and the only danger is skipping too much. + */ + if (PageCompound(page)) { + unsigned int comp_order = compound_order(page); + + if (likely(comp_order < MAX_ORDER)) { + blockpfn += (1UL << comp_order) - 1; + cursor += (1UL << comp_order) - 1; + } + + goto isolate_fail; + } + if (!PageBuddy(page)) goto isolate_fail; @@ -490,6 +514,13 @@ isolate_fail: } + /* + * There is a tiny chance that we have read bogus compound_order(), + * so be careful to not go outside of the pageblock. + */ + if (unlikely(blockpfn > end_pfn)) + blockpfn = end_pfn; + trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn, nr_scanned, total_isolated); @@ -674,6 +705,8 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, /* Time to isolate some pages for migration */ for (; low_pfn < end_pfn; low_pfn++) { + bool is_lru; + /* * Periodically drop the lock (if held) regardless of its * contention, to give chance to IRQs. Abort async compaction @@ -717,36 +750,35 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, * It's possible to migrate LRU pages and balloon pages * Skip any other type of page */ - if (!PageLRU(page)) { + is_lru = PageLRU(page); + if (!is_lru) { if (unlikely(balloon_page_movable(page))) { if (balloon_page_isolate(page)) { /* Successfully isolated */ goto isolate_success; } } - continue; } /* - * PageLRU is set. lru_lock normally excludes isolation - * splitting and collapsing (collapsing has already happened - * if PageLRU is set) but the lock is not necessarily taken - * here and it is wasteful to take it just to check transhuge. - * Check TransHuge without lock and skip the whole pageblock if - * it's either a transhuge or hugetlbfs page, as calling - * compound_order() without preventing THP from splitting the - * page underneath us may return surprising results. + * Regardless of being on LRU, compound pages such as THP and + * hugetlbfs are not to be compacted. We can potentially save + * a lot of iterations if we skip them at once. The check is + * racy, but we can consider only valid values and the only + * danger is skipping too much. */ - if (PageTransHuge(page)) { - if (!locked) - low_pfn = ALIGN(low_pfn + 1, - pageblock_nr_pages) - 1; - else - low_pfn += (1 << compound_order(page)) - 1; + if (PageCompound(page)) { + unsigned int comp_order = compound_order(page); + + if (likely(comp_order < MAX_ORDER)) + low_pfn += (1UL << comp_order) - 1; continue; } + if (!is_lru) + continue; + /* * Migration will fail if an anonymous page is pinned in memory, * so avoid taking lru_lock and isolating it unnecessarily in an @@ -763,11 +795,17 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, if (!locked) break; - /* Recheck PageLRU and PageTransHuge under lock */ + /* Recheck PageLRU and PageCompound under lock */ if (!PageLRU(page)) continue; - if (PageTransHuge(page)) { - low_pfn += (1 << compound_order(page)) - 1; + + /* + * Page become compound since the non-locked check, + * and it's on LRU. It can only be a THP so the order + * is safe to read and it's 0 for tail pages. + */ + if (unlikely(PageCompound(page))) { + low_pfn += (1UL << compound_order(page)) - 1; continue; } } @@ -778,7 +816,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, if (__isolate_lru_page(page, isolate_mode) != 0) continue; - VM_BUG_ON_PAGE(PageTransCompound(page), page); + VM_BUG_ON_PAGE(PageCompound(page), page); /* Successfully isolated */ del_page_from_lru_list(page, lruvec, page_lru(page)); @@ -898,6 +936,16 @@ static bool suitable_migration_target(struct page *page) } /* + * Test whether the free scanner has reached the same or lower pageblock than + * the migration scanner, and compaction should thus terminate. + */ +static inline bool compact_scanners_met(struct compact_control *cc) +{ + return (cc->free_pfn >> pageblock_order) + <= (cc->migrate_pfn >> pageblock_order); +} + +/* * Based on information in the current compact_control, find blocks * suitable for isolating free pages from and then isolate them. */ @@ -933,8 +981,7 @@ static void isolate_freepages(struct compact_control *cc) * pages on cc->migratepages. We stop searching if the migrate * and free page scanners meet or enough free pages are isolated. */ - for (; block_start_pfn >= low_pfn && - cc->nr_migratepages > cc->nr_freepages; + for (; block_start_pfn >= low_pfn; block_end_pfn = block_start_pfn, block_start_pfn -= pageblock_nr_pages, isolate_start_pfn = block_start_pfn) { @@ -966,6 +1013,8 @@ static void isolate_freepages(struct compact_control *cc) block_end_pfn, freelist, false); /* + * If we isolated enough freepages, or aborted due to async + * compaction being contended, terminate the loop. * Remember where the free scanner should restart next time, * which is where isolate_freepages_block() left off. * But if it scanned the whole pageblock, isolate_start_pfn @@ -974,27 +1023,31 @@ static void isolate_freepages(struct compact_control *cc) * In that case we will however want to restart at the start * of the previous pageblock. */ - cc->free_pfn = (isolate_start_pfn < block_end_pfn) ? - isolate_start_pfn : - block_start_pfn - pageblock_nr_pages; - - /* - * isolate_freepages_block() might have aborted due to async - * compaction being contended - */ - if (cc->contended) + if ((cc->nr_freepages >= cc->nr_migratepages) + || cc->contended) { + if (isolate_start_pfn >= block_end_pfn) + isolate_start_pfn = + block_start_pfn - pageblock_nr_pages; break; + } else { + /* + * isolate_freepages_block() should not terminate + * prematurely unless contended, or isolated enough + */ + VM_BUG_ON(isolate_start_pfn < block_end_pfn); + } } /* split_free_page does not map the pages */ map_pages(freelist); /* - * If we crossed the migrate scanner, we want to keep it that way - * so that compact_finished() may detect this + * Record where the free scanner will restart next time. Either we + * broke from the loop and set isolate_start_pfn based on the last + * call to isolate_freepages_block(), or we met the migration scanner + * and the loop terminated due to isolate_start_pfn < low_pfn */ - if (block_start_pfn < low_pfn) - cc->free_pfn = cc->migrate_pfn; + cc->free_pfn = isolate_start_pfn; } /* @@ -1062,6 +1115,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone, struct compact_control *cc) { unsigned long low_pfn, end_pfn; + unsigned long isolate_start_pfn; struct page *page; const isolate_mode_t isolate_mode = (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) | @@ -1110,6 +1164,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone, continue; /* Perform the isolation */ + isolate_start_pfn = low_pfn; low_pfn = isolate_migratepages_block(cc, low_pfn, end_pfn, isolate_mode); @@ -1119,6 +1174,15 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone, } /* + * Record where we could have freed pages by migration and not + * yet flushed them to buddy allocator. + * - this is the lowest page that could have been isolated and + * then freed by migration. + */ + if (cc->nr_migratepages && !cc->last_migrated_pfn) + cc->last_migrated_pfn = isolate_start_pfn; + + /* * Either we isolated something and proceed with migration. Or * we failed and compact_zone should decide if we should * continue or not. @@ -1127,12 +1191,8 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone, } acct_isolated(zone, cc); - /* - * Record where migration scanner will be restarted. If we end up in - * the same pageblock as the free scanner, make the scanners fully - * meet so that compact_finished() terminates compaction. - */ - cc->migrate_pfn = (end_pfn <= cc->free_pfn) ? low_pfn : cc->free_pfn; + /* Record where migration scanner will be restarted. */ + cc->migrate_pfn = low_pfn; return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE; } @@ -1147,11 +1207,9 @@ static int __compact_finished(struct zone *zone, struct compact_control *cc, return COMPACT_PARTIAL; /* Compaction run completes if the migrate and free scanner meet */ - if (cc->free_pfn <= cc->migrate_pfn) { + if (compact_scanners_met(cc)) { /* Let the next compaction start anew. */ - zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; - zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; - zone->compact_cached_free_pfn = zone_end_pfn(zone); + reset_cached_positions(zone); /* * Mark that the PG_migrate_skip information should be cleared @@ -1295,7 +1353,6 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) unsigned long end_pfn = zone_end_pfn(zone); const int migratetype = gfpflags_to_migratetype(cc->gfp_mask); const bool sync = cc->mode != MIGRATE_ASYNC; - unsigned long last_migrated_pfn = 0; ret = compaction_suitable(zone, cc->order, cc->alloc_flags, cc->classzone_idx); @@ -1333,6 +1390,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn; zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; } + cc->last_migrated_pfn = 0; trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, cc->free_pfn, end_pfn, sync); @@ -1342,7 +1400,6 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) while ((ret = compact_finished(zone, cc, migratetype)) == COMPACT_CONTINUE) { int err; - unsigned long isolate_start_pfn = cc->migrate_pfn; switch (isolate_migratepages(zone, cc)) { case ISOLATE_ABORT: @@ -1376,22 +1433,12 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) * migrate_pages() may return -ENOMEM when scanners meet * and we want compact_finished() to detect it */ - if (err == -ENOMEM && cc->free_pfn > cc->migrate_pfn) { + if (err == -ENOMEM && !compact_scanners_met(cc)) { ret = COMPACT_PARTIAL; goto out; } } - /* - * Record where we could have freed pages by migration and not - * yet flushed them to buddy allocator. We use the pfn that - * isolate_migratepages() started from in this loop iteration - * - this is the lowest page that could have been isolated and - * then freed by migration. - */ - if (!last_migrated_pfn) - last_migrated_pfn = isolate_start_pfn; - check_drain: /* * Has the migration scanner moved away from the previous @@ -1400,18 +1447,18 @@ check_drain: * compact_finished() can detect immediately if allocation * would succeed. */ - if (cc->order > 0 && last_migrated_pfn) { + if (cc->order > 0 && cc->last_migrated_pfn) { int cpu; unsigned long current_block_start = cc->migrate_pfn & ~((1UL << cc->order) - 1); - if (last_migrated_pfn < current_block_start) { + if (cc->last_migrated_pfn < current_block_start) { cpu = get_cpu(); lru_add_drain_cpu(cpu); drain_local_pages(zone); put_cpu(); /* No more flushing until we migrate again */ - last_migrated_pfn = 0; + cc->last_migrated_pfn = 0; } } diff --git a/mm/debug.c b/mm/debug.c index 689f5e4b1..2d106d7f6 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -54,6 +54,10 @@ static const struct trace_print_flags pageflag_names[] = { {1UL << PG_toi_cbw, "toi_cbw" }, {1UL << PG_toi_dirty, "toi_dirty" }, #endif +#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT) + {1UL << PG_young, "young" }, + {1UL << PG_idle, "idle" }, +#endif }; static void dump_flags(unsigned long flags, diff --git a/mm/dmapool.c b/mm/dmapool.c index fd5fe4342..312a716fa 100644 --- a/mm/dmapool.c +++ b/mm/dmapool.c @@ -242,7 +242,7 @@ static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags) return page; } -static inline int is_page_busy(struct dma_page *page) +static inline bool is_page_busy(struct dma_page *page) { return page->in_use != 0; } @@ -271,6 +271,9 @@ void dma_pool_destroy(struct dma_pool *pool) { bool empty = false; + if (unlikely(!pool)) + return; + mutex_lock(&pools_reg_lock); mutex_lock(&pools_lock); list_del(&pool->pools); @@ -334,7 +337,7 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, /* pool_alloc_page() might sleep, so temporarily drop &pool->lock */ spin_unlock_irqrestore(&pool->lock, flags); - page = pool_alloc_page(pool, mem_flags); + page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO)); if (!page) return NULL; @@ -372,9 +375,14 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, break; } } - memset(retval, POOL_POISON_ALLOCATED, pool->size); + if (!(mem_flags & __GFP_ZERO)) + memset(retval, POOL_POISON_ALLOCATED, pool->size); #endif spin_unlock_irqrestore(&pool->lock, flags); + + if (mem_flags & __GFP_ZERO) + memset(retval, 0, pool->size); + return retval; } EXPORT_SYMBOL(dma_pool_alloc); @@ -386,7 +394,7 @@ static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma) list_for_each_entry(page, &pool->page_list, page_list) { if (dma < page->dma) continue; - if (dma < (page->dma + pool->allocation)) + if ((dma - page->dma) < pool->allocation) return page; } return NULL; diff --git a/mm/early_ioremap.c b/mm/early_ioremap.c index e10ccd299..17ae14b5a 100644 --- a/mm/early_ioremap.c +++ b/mm/early_ioremap.c @@ -15,6 +15,7 @@ #include <linux/mm.h> #include <linux/vmalloc.h> #include <asm/fixmap.h> +#include <asm/early_ioremap.h> #ifdef CONFIG_MMU static int early_ioremap_debug __initdata; @@ -217,6 +218,35 @@ early_memremap(resource_size_t phys_addr, unsigned long size) return (__force void *)__early_ioremap(phys_addr, size, FIXMAP_PAGE_NORMAL); } +#ifdef FIXMAP_PAGE_RO +void __init * +early_memremap_ro(resource_size_t phys_addr, unsigned long size) +{ + return (__force void *)__early_ioremap(phys_addr, size, FIXMAP_PAGE_RO); +} +#endif + +#define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT) + +void __init copy_from_early_mem(void *dest, phys_addr_t src, unsigned long size) +{ + unsigned long slop, clen; + char *p; + + while (size) { + slop = src & ~PAGE_MASK; + clen = size; + if (clen > MAX_MAP_CHUNK - slop) + clen = MAX_MAP_CHUNK - slop; + p = early_memremap(src & PAGE_MASK, clen + slop); + memcpy(dest, p + slop, clen); + early_memunmap(p, clen + slop); + dest += clen; + src += clen; + size -= clen; + } +} + #else /* CONFIG_MMU */ void __init __iomem * @@ -231,6 +261,11 @@ early_memremap(resource_size_t phys_addr, unsigned long size) { return (void *)phys_addr; } +void __init * +early_memremap_ro(resource_size_t phys_addr, unsigned long size) +{ + return (void *)phys_addr; +} void __init early_iounmap(void __iomem *addr, unsigned long size) { diff --git a/mm/filemap.c b/mm/filemap.c index 128f18fc4..7bbc37290 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -674,7 +674,7 @@ struct page *__page_cache_alloc(gfp_t gfp) do { cpuset_mems_cookie = read_mems_allowed_begin(); n = cpuset_mem_spread_node(); - page = alloc_pages_exact_node(n, gfp, 0); + page = __alloc_pages_node(n, gfp, 0); } while (!page && read_mems_allowed_retry(cpuset_mems_cookie)); return page; @@ -2488,6 +2488,11 @@ again: break; } + if (fatal_signal_pending(current)) { + status = -EINTR; + break; + } + status = a_ops->write_begin(file, mapping, pos, bytes, flags, &page, &fsdata); if (unlikely(status < 0)) @@ -2525,10 +2530,6 @@ again: written += copied; balance_dirty_pages_ratelimited(mapping); - if (fatal_signal_pending(current)) { - status = -EINTR; - break; - } } while (iov_iter_count(i)); return written ? written : status; diff --git a/mm/frame_vector.c b/mm/frame_vector.c new file mode 100644 index 000000000..cdabcb93c --- /dev/null +++ b/mm/frame_vector.c @@ -0,0 +1,230 @@ +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/mm.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include <linux/pagemap.h> +#include <linux/sched.h> + +/* + * get_vaddr_frames() - map virtual addresses to pfns + * @start: starting user address + * @nr_frames: number of pages / pfns from start to map + * @write: whether pages will be written to by the caller + * @force: whether to force write access even if user mapping is + * readonly. See description of the same argument of + get_user_pages(). + * @vec: structure which receives pages / pfns of the addresses mapped. + * It should have space for at least nr_frames entries. + * + * This function maps virtual addresses from @start and fills @vec structure + * with page frame numbers or page pointers to corresponding pages (choice + * depends on the type of the vma underlying the virtual address). If @start + * belongs to a normal vma, the function grabs reference to each of the pages + * to pin them in memory. If @start belongs to VM_IO | VM_PFNMAP vma, we don't + * touch page structures and the caller must make sure pfns aren't reused for + * anything else while he is using them. + * + * The function returns number of pages mapped which may be less than + * @nr_frames. In particular we stop mapping if there are more vmas of + * different type underlying the specified range of virtual addresses. + * When the function isn't able to map a single page, it returns error. + * + * This function takes care of grabbing mmap_sem as necessary. + */ +int get_vaddr_frames(unsigned long start, unsigned int nr_frames, + bool write, bool force, struct frame_vector *vec) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + int ret = 0; + int err; + int locked; + + if (nr_frames == 0) + return 0; + + if (WARN_ON_ONCE(nr_frames > vec->nr_allocated)) + nr_frames = vec->nr_allocated; + + down_read(&mm->mmap_sem); + locked = 1; + vma = find_vma_intersection(mm, start, start + 1); + if (!vma) { + ret = -EFAULT; + goto out; + } + if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) { + vec->got_ref = true; + vec->is_pfns = false; + ret = get_user_pages_locked(current, mm, start, nr_frames, + write, force, (struct page **)(vec->ptrs), &locked); + goto out; + } + + vec->got_ref = false; + vec->is_pfns = true; + do { + unsigned long *nums = frame_vector_pfns(vec); + + while (ret < nr_frames && start + PAGE_SIZE <= vma->vm_end) { + err = follow_pfn(vma, start, &nums[ret]); + if (err) { + if (ret == 0) + ret = err; + goto out; + } + start += PAGE_SIZE; + ret++; + } + /* + * We stop if we have enough pages or if VMA doesn't completely + * cover the tail page. + */ + if (ret >= nr_frames || start < vma->vm_end) + break; + vma = find_vma_intersection(mm, start, start + 1); + } while (vma && vma->vm_flags & (VM_IO | VM_PFNMAP)); +out: + if (locked) + up_read(&mm->mmap_sem); + if (!ret) + ret = -EFAULT; + if (ret > 0) + vec->nr_frames = ret; + return ret; +} +EXPORT_SYMBOL(get_vaddr_frames); + +/** + * put_vaddr_frames() - drop references to pages if get_vaddr_frames() acquired + * them + * @vec: frame vector to put + * + * Drop references to pages if get_vaddr_frames() acquired them. We also + * invalidate the frame vector so that it is prepared for the next call into + * get_vaddr_frames(). + */ +void put_vaddr_frames(struct frame_vector *vec) +{ + int i; + struct page **pages; + + if (!vec->got_ref) + goto out; + pages = frame_vector_pages(vec); + /* + * frame_vector_pages() might needed to do a conversion when + * get_vaddr_frames() got pages but vec was later converted to pfns. + * But it shouldn't really fail to convert pfns back... + */ + if (WARN_ON(IS_ERR(pages))) + goto out; + for (i = 0; i < vec->nr_frames; i++) + put_page(pages[i]); + vec->got_ref = false; +out: + vec->nr_frames = 0; +} +EXPORT_SYMBOL(put_vaddr_frames); + +/** + * frame_vector_to_pages - convert frame vector to contain page pointers + * @vec: frame vector to convert + * + * Convert @vec to contain array of page pointers. If the conversion is + * successful, return 0. Otherwise return an error. Note that we do not grab + * page references for the page structures. + */ +int frame_vector_to_pages(struct frame_vector *vec) +{ + int i; + unsigned long *nums; + struct page **pages; + + if (!vec->is_pfns) + return 0; + nums = frame_vector_pfns(vec); + for (i = 0; i < vec->nr_frames; i++) + if (!pfn_valid(nums[i])) + return -EINVAL; + pages = (struct page **)nums; + for (i = 0; i < vec->nr_frames; i++) + pages[i] = pfn_to_page(nums[i]); + vec->is_pfns = false; + return 0; +} +EXPORT_SYMBOL(frame_vector_to_pages); + +/** + * frame_vector_to_pfns - convert frame vector to contain pfns + * @vec: frame vector to convert + * + * Convert @vec to contain array of pfns. + */ +void frame_vector_to_pfns(struct frame_vector *vec) +{ + int i; + unsigned long *nums; + struct page **pages; + + if (vec->is_pfns) + return; + pages = (struct page **)(vec->ptrs); + nums = (unsigned long *)pages; + for (i = 0; i < vec->nr_frames; i++) + nums[i] = page_to_pfn(pages[i]); + vec->is_pfns = true; +} +EXPORT_SYMBOL(frame_vector_to_pfns); + +/** + * frame_vector_create() - allocate & initialize structure for pinned pfns + * @nr_frames: number of pfns slots we should reserve + * + * Allocate and initialize struct pinned_pfns to be able to hold @nr_pfns + * pfns. + */ +struct frame_vector *frame_vector_create(unsigned int nr_frames) +{ + struct frame_vector *vec; + int size = sizeof(struct frame_vector) + sizeof(void *) * nr_frames; + + if (WARN_ON_ONCE(nr_frames == 0)) + return NULL; + /* + * This is absurdly high. It's here just to avoid strange effects when + * arithmetics overflows. + */ + if (WARN_ON_ONCE(nr_frames > INT_MAX / sizeof(void *) / 2)) + return NULL; + /* + * Avoid higher order allocations, use vmalloc instead. It should + * be rare anyway. + */ + if (size <= PAGE_SIZE) + vec = kmalloc(size, GFP_KERNEL); + else + vec = vmalloc(size); + if (!vec) + return NULL; + vec->nr_allocated = nr_frames; + vec->nr_frames = 0; + return vec; +} +EXPORT_SYMBOL(frame_vector_create); + +/** + * frame_vector_destroy() - free memory allocated to carry frame vector + * @vec: Frame vector to free + * + * Free structure allocated by frame_vector_create() to carry frames. + */ +void frame_vector_destroy(struct frame_vector *vec) +{ + /* Make sure put_vaddr_frames() got called properly... */ + VM_BUG_ON(vec->nr_frames > 0); + kvfree(vec); +} +EXPORT_SYMBOL(frame_vector_destroy); @@ -12,7 +12,9 @@ #include <linux/sched.h> #include <linux/rwsem.h> #include <linux/hugetlb.h> + #include <asm/pgtable.h> +#include <asm/tlbflush.h> #include "internal.h" @@ -32,6 +34,30 @@ static struct page *no_page_table(struct vm_area_struct *vma, return NULL; } +static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, + pte_t *pte, unsigned int flags) +{ + /* No page to get reference */ + if (flags & FOLL_GET) + return -EFAULT; + + if (flags & FOLL_TOUCH) { + pte_t entry = *pte; + + if (flags & FOLL_WRITE) + entry = pte_mkdirty(entry); + entry = pte_mkyoung(entry); + + if (!pte_same(*pte, entry)) { + set_pte_at(vma->vm_mm, address, pte, entry); + update_mmu_cache(vma, address, pte); + } + } + + /* Proper page table entry exists, but no corresponding struct page */ + return -EEXIST; +} + static struct page *follow_page_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, unsigned int flags) { @@ -73,10 +99,21 @@ retry: page = vm_normal_page(vma, address, pte); if (unlikely(!page)) { - if ((flags & FOLL_DUMP) || - !is_zero_pfn(pte_pfn(pte))) - goto bad_page; - page = pte_page(pte); + if (flags & FOLL_DUMP) { + /* Avoid special (like zero) pages in core dumps */ + page = ERR_PTR(-EFAULT); + goto out; + } + + if (is_zero_pfn(pte_pfn(pte))) { + page = pte_page(pte); + } else { + int ret; + + ret = follow_pfn_pte(vma, address, ptep, flags); + page = ERR_PTR(ret); + goto out; + } } if (flags & FOLL_GET) @@ -114,12 +151,9 @@ retry: unlock_page(page); } } +out: pte_unmap_unlock(ptep, ptl); return page; -bad_page: - pte_unmap_unlock(ptep, ptl); - return ERR_PTR(-EFAULT); - no_page: pte_unmap_unlock(ptep, ptl); if (!pte_none(pte)) @@ -489,9 +523,15 @@ retry: goto next_page; } BUG(); - } - if (IS_ERR(page)) + } else if (PTR_ERR(page) == -EEXIST) { + /* + * Proper page table entry exists, but no corresponding + * struct page. + */ + goto next_page; + } else if (IS_ERR(page)) { return i ? i : PTR_ERR(page); + } if (pages) { pages[i] = page; flush_anon_page(vma, page, start); diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 097c7a4bf..bbac913f9 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -16,6 +16,7 @@ #include <linux/swap.h> #include <linux/shrinker.h> #include <linux/mm_inline.h> +#include <linux/dax.h> #include <linux/kthread.h> #include <linux/khugepaged.h> #include <linux/freezer.h> @@ -23,6 +24,8 @@ #include <linux/pagemap.h> #include <linux/migrate.h> #include <linux/hashtable.h> +#include <linux/userfaultfd_k.h> +#include <linux/page_idle.h> #include <asm/tlb.h> #include <asm/pgalloc.h> @@ -104,7 +107,7 @@ static struct khugepaged_scan khugepaged_scan = { }; -static int set_recommended_min_free_kbytes(void) +static void set_recommended_min_free_kbytes(void) { struct zone *zone; int nr_zones = 0; @@ -139,7 +142,6 @@ static int set_recommended_min_free_kbytes(void) min_free_kbytes = recommended_min; } setup_per_zone_wmarks(); - return 0; } static int start_stop_khugepaged(void) @@ -171,12 +173,7 @@ fail: static atomic_t huge_zero_refcount; struct page *huge_zero_page __read_mostly; -static inline bool is_huge_zero_pmd(pmd_t pmd) -{ - return is_huge_zero_page(pmd_page(pmd)); -} - -static struct page *get_huge_zero_page(void) +struct page *get_huge_zero_page(void) { struct page *zero_page; retry: @@ -716,21 +713,27 @@ static inline pmd_t mk_huge_pmd(struct page *page, pgprot_t prot) static int __do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long haddr, pmd_t *pmd, - struct page *page, gfp_t gfp) + unsigned long address, pmd_t *pmd, + struct page *page, gfp_t gfp, + unsigned int flags) { struct mem_cgroup *memcg; pgtable_t pgtable; spinlock_t *ptl; + unsigned long haddr = address & HPAGE_PMD_MASK; VM_BUG_ON_PAGE(!PageCompound(page), page); - if (mem_cgroup_try_charge(page, mm, gfp, &memcg)) - return VM_FAULT_OOM; + if (mem_cgroup_try_charge(page, mm, gfp, &memcg)) { + put_page(page); + count_vm_event(THP_FAULT_FALLBACK); + return VM_FAULT_FALLBACK; + } pgtable = pte_alloc_one(mm, haddr); if (unlikely(!pgtable)) { mem_cgroup_cancel_charge(page, memcg); + put_page(page); return VM_FAULT_OOM; } @@ -750,6 +753,21 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm, pte_free(mm, pgtable); } else { pmd_t entry; + + /* Deliver the page fault to userland */ + if (userfaultfd_missing(vma)) { + int ret; + + spin_unlock(ptl); + mem_cgroup_cancel_charge(page, memcg); + put_page(page); + pte_free(mm, pgtable); + ret = handle_userfault(vma, address, flags, + VM_UFFD_MISSING); + VM_BUG_ON(ret & VM_FAULT_FALLBACK); + return ret; + } + entry = mk_huge_pmd(page, vma->vm_page_prot); entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); page_add_new_anon_rmap(page, vma, haddr); @@ -760,6 +778,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm, add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); atomic_long_inc(&mm->nr_ptes); spin_unlock(ptl); + count_vm_event(THP_FAULT_ALLOC); } return 0; @@ -806,6 +825,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, pgtable_t pgtable; struct page *zero_page; bool set; + int ret; pgtable = pte_alloc_one(mm, haddr); if (unlikely(!pgtable)) return VM_FAULT_OOM; @@ -816,14 +836,28 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, return VM_FAULT_FALLBACK; } ptl = pmd_lock(mm, pmd); - set = set_huge_zero_page(pgtable, mm, vma, haddr, pmd, - zero_page); - spin_unlock(ptl); + ret = 0; + set = false; + if (pmd_none(*pmd)) { + if (userfaultfd_missing(vma)) { + spin_unlock(ptl); + ret = handle_userfault(vma, address, flags, + VM_UFFD_MISSING); + VM_BUG_ON(ret & VM_FAULT_FALLBACK); + } else { + set_huge_zero_page(pgtable, mm, vma, + haddr, pmd, + zero_page); + spin_unlock(ptl); + set = true; + } + } else + spin_unlock(ptl); if (!set) { pte_free(mm, pgtable); put_huge_zero_page(); } - return 0; + return ret; } gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0); page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER); @@ -831,14 +865,51 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, count_vm_event(THP_FAULT_FALLBACK); return VM_FAULT_FALLBACK; } - if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page, gfp))) { - put_page(page); - count_vm_event(THP_FAULT_FALLBACK); - return VM_FAULT_FALLBACK; + return __do_huge_pmd_anonymous_page(mm, vma, address, pmd, page, gfp, + flags); +} + +static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, + pmd_t *pmd, unsigned long pfn, pgprot_t prot, bool write) +{ + struct mm_struct *mm = vma->vm_mm; + pmd_t entry; + spinlock_t *ptl; + + ptl = pmd_lock(mm, pmd); + if (pmd_none(*pmd)) { + entry = pmd_mkhuge(pfn_pmd(pfn, prot)); + if (write) { + entry = pmd_mkyoung(pmd_mkdirty(entry)); + entry = maybe_pmd_mkwrite(entry, vma); + } + set_pmd_at(mm, addr, pmd, entry); + update_mmu_cache_pmd(vma, addr, pmd); } + spin_unlock(ptl); +} - count_vm_event(THP_FAULT_ALLOC); - return 0; +int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, + pmd_t *pmd, unsigned long pfn, bool write) +{ + pgprot_t pgprot = vma->vm_page_prot; + /* + * If we had pmd_special, we could avoid all these restrictions, + * but we need to be consistent with PTEs and architectures that + * can't support a 'special' bit. + */ + BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); + BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == + (VM_PFNMAP|VM_MIXEDMAP)); + BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); + BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn)); + + if (addr < vma->vm_start || addr >= vma->vm_end) + return VM_FAULT_SIGBUS; + if (track_pfn_insert(vma, &pgprot, pfn)) + return VM_FAULT_SIGBUS; + insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write); + return VM_FAULT_NOPAGE; } int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, @@ -873,16 +944,14 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, */ if (is_huge_zero_pmd(pmd)) { struct page *zero_page; - bool set; /* * get_huge_zero_page() will never allocate a new page here, * since we already have a zero page to copy. It just takes a * reference. */ zero_page = get_huge_zero_page(); - set = set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd, + set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd, zero_page); - BUG_ON(!set); /* unexpected !pmd_none(dst_pmd) */ ret = 0; goto out_unlock; } @@ -1387,41 +1456,41 @@ out: int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr) { + pmd_t orig_pmd; spinlock_t *ptl; - int ret = 0; - if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { - struct page *page; - pgtable_t pgtable; - pmd_t orig_pmd; - /* - * For architectures like ppc64 we look at deposited pgtable - * when calling pmdp_huge_get_and_clear. So do the - * pgtable_trans_huge_withdraw after finishing pmdp related - * operations. - */ - orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd, - tlb->fullmm); - tlb_remove_pmd_tlb_entry(tlb, pmd, addr); - pgtable = pgtable_trans_huge_withdraw(tlb->mm, pmd); - if (is_huge_zero_pmd(orig_pmd)) { - atomic_long_dec(&tlb->mm->nr_ptes); - spin_unlock(ptl); + if (__pmd_trans_huge_lock(pmd, vma, &ptl) != 1) + return 0; + /* + * For architectures like ppc64 we look at deposited pgtable + * when calling pmdp_huge_get_and_clear. So do the + * pgtable_trans_huge_withdraw after finishing pmdp related + * operations. + */ + orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd, + tlb->fullmm); + tlb_remove_pmd_tlb_entry(tlb, pmd, addr); + if (vma_is_dax(vma)) { + spin_unlock(ptl); + if (is_huge_zero_pmd(orig_pmd)) put_huge_zero_page(); - } else { - page = pmd_page(orig_pmd); - page_remove_rmap(page); - VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); - add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); - VM_BUG_ON_PAGE(!PageHead(page), page); - atomic_long_dec(&tlb->mm->nr_ptes); - spin_unlock(ptl); - tlb_remove_page(tlb, page); - } - pte_free(tlb->mm, pgtable); - ret = 1; + } else if (is_huge_zero_pmd(orig_pmd)) { + pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd)); + atomic_long_dec(&tlb->mm->nr_ptes); + spin_unlock(ptl); + put_huge_zero_page(); + } else { + struct page *page = pmd_page(orig_pmd); + page_remove_rmap(page); + VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); + add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); + VM_BUG_ON_PAGE(!PageHead(page), page); + pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd)); + atomic_long_dec(&tlb->mm->nr_ptes); + spin_unlock(ptl); + tlb_remove_page(tlb, page); } - return ret; + return 1; } int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma, @@ -1689,6 +1758,11 @@ static void __split_huge_page_refcount(struct page *page, /* clear PageTail before overwriting first_page */ smp_wmb(); + if (page_is_young(page)) + set_page_young(page_tail); + if (page_is_idle(page)) + set_page_idle(page_tail); + /* * __split_huge_page_splitting() already set the * splitting bit in all pmd that could map this @@ -2132,8 +2206,10 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++, address += PAGE_SIZE) { pte_t pteval = *_pte; - if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { - if (++none_or_zero <= khugepaged_max_ptes_none) + if (pte_none(pteval) || (pte_present(pteval) && + is_zero_pfn(pte_pfn(pteval)))) { + if (!userfaultfd_armed(vma) && + ++none_or_zero <= khugepaged_max_ptes_none) continue; else goto out; @@ -2193,7 +2269,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, VM_BUG_ON_PAGE(PageLRU(page), page); /* If there is no mapped pte young don't collapse the page */ - if (pte_young(pteval) || PageReferenced(page) || + if (pte_young(pteval) || + page_is_young(page) || PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm, address)) referenced = true; } @@ -2257,8 +2334,12 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page, static void khugepaged_alloc_sleep(void) { - wait_event_freezable_timeout(khugepaged_wait, false, - msecs_to_jiffies(khugepaged_alloc_sleep_millisecs)); + DEFINE_WAIT(wait); + + add_wait_queue(&khugepaged_wait, &wait); + freezable_schedule_timeout_interruptible( + msecs_to_jiffies(khugepaged_alloc_sleep_millisecs)); + remove_wait_queue(&khugepaged_wait, &wait); } static int khugepaged_node_load[MAX_NUMNODES]; @@ -2345,7 +2426,7 @@ khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm, */ up_read(&mm->mmap_sem); - *hpage = alloc_pages_exact_node(node, gfp, HPAGE_PMD_ORDER); + *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER); if (unlikely(!*hpage)) { count_vm_event(THP_COLLAPSE_ALLOC_FAILED); *hpage = ERR_PTR(-ENOMEM); @@ -2586,7 +2667,8 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, _pte++, _address += PAGE_SIZE) { pte_t pteval = *_pte; if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { - if (++none_or_zero <= khugepaged_max_ptes_none) + if (!userfaultfd_armed(vma) && + ++none_or_zero <= khugepaged_max_ptes_none) continue; else goto out_unmap; @@ -2619,7 +2701,8 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, */ if (page_count(page) != 1 + !!PageSwapCache(page)) goto out_unmap; - if (pte_young(pteval) || PageReferenced(page) || + if (pte_young(pteval) || + page_is_young(page) || PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm, address)) referenced = true; } @@ -2882,7 +2965,7 @@ void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd) { spinlock_t *ptl; - struct page *page; + struct page *page = NULL; struct mm_struct *mm = vma->vm_mm; unsigned long haddr = address & HPAGE_PMD_MASK; unsigned long mmun_start; /* For mmu_notifiers */ @@ -2895,25 +2978,27 @@ void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address, again: mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); ptl = pmd_lock(mm, pmd); - if (unlikely(!pmd_trans_huge(*pmd))) { - spin_unlock(ptl); - mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); - return; - } - if (is_huge_zero_pmd(*pmd)) { + if (unlikely(!pmd_trans_huge(*pmd))) + goto unlock; + if (vma_is_dax(vma)) { + pmd_t _pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd); + if (is_huge_zero_pmd(_pmd)) + put_huge_zero_page(); + } else if (is_huge_zero_pmd(*pmd)) { __split_huge_zero_page_pmd(vma, haddr, pmd); - spin_unlock(ptl); - mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); - return; + } else { + page = pmd_page(*pmd); + VM_BUG_ON_PAGE(!page_count(page), page); + get_page(page); } - page = pmd_page(*pmd); - VM_BUG_ON_PAGE(!page_count(page), page); - get_page(page); + unlock: spin_unlock(ptl); mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); - split_huge_page(page); + if (!page) + return; + split_huge_page(page); put_page(page); /* @@ -2962,7 +3047,7 @@ static void split_huge_page_address(struct mm_struct *mm, split_huge_page_pmd_mm(mm, address, pmd); } -void __vma_adjust_trans_huge(struct vm_area_struct *vma, +void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start, unsigned long end, long adjust_next) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 62c1ec5a9..9cc773483 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -64,7 +64,7 @@ DEFINE_SPINLOCK(hugetlb_lock); * prevent spurious OOMs when the hugepage pool is fully utilized. */ static int num_fault_mutexes; -static struct mutex *htlb_fault_mutex_table ____cacheline_aligned_in_smp; +struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp; /* Forward declaration */ static int hugetlb_acct_memory(struct hstate *h, long delta); @@ -240,11 +240,14 @@ struct file_region { /* * Add the huge page range represented by [f, t) to the reserve - * map. Existing regions will be expanded to accommodate the - * specified range. We know only existing regions need to be - * expanded, because region_add is only called after region_chg - * with the same range. If a new file_region structure must - * be allocated, it is done in region_chg. + * map. In the normal case, existing regions will be expanded + * to accommodate the specified range. Sufficient regions should + * exist for expansion due to the previous call to region_chg + * with the same range. However, it is possible that region_del + * could have been called after region_chg and modifed the map + * in such a way that no region exists to be expanded. In this + * case, pull a region descriptor from the cache associated with + * the map and use that for the new range. * * Return the number of new huge pages added to the map. This * number is greater than or equal to zero. @@ -261,6 +264,28 @@ static long region_add(struct resv_map *resv, long f, long t) if (f <= rg->to) break; + /* + * If no region exists which can be expanded to include the + * specified range, the list must have been modified by an + * interleving call to region_del(). Pull a region descriptor + * from the cache and use it for this range. + */ + if (&rg->link == head || t < rg->from) { + VM_BUG_ON(resv->region_cache_count <= 0); + + resv->region_cache_count--; + nrg = list_first_entry(&resv->region_cache, struct file_region, + link); + list_del(&nrg->link); + + nrg->from = f; + nrg->to = t; + list_add(&nrg->link, rg->link.prev); + + add += t - f; + goto out_locked; + } + /* Round our left edge to the current segment if it encloses us. */ if (f > rg->from) f = rg->from; @@ -294,6 +319,8 @@ static long region_add(struct resv_map *resv, long f, long t) add += t - nrg->to; /* Added to end of region */ nrg->to = t; +out_locked: + resv->adds_in_progress--; spin_unlock(&resv->lock); VM_BUG_ON(add < 0); return add; @@ -312,11 +339,14 @@ static long region_add(struct resv_map *resv, long f, long t) * so that the subsequent region_add call will have all the * regions it needs and will not fail. * - * Returns the number of huge pages that need to be added - * to the existing reservation map for the range [f, t). - * This number is greater or equal to zero. -ENOMEM is - * returned if a new file_region structure is needed and can - * not be allocated. + * Upon entry, region_chg will also examine the cache of region descriptors + * associated with the map. If there are not enough descriptors cached, one + * will be allocated for the in progress add operation. + * + * Returns the number of huge pages that need to be added to the existing + * reservation map for the range [f, t). This number is greater or equal to + * zero. -ENOMEM is returned if a new file_region structure or cache entry + * is needed and can not be allocated. */ static long region_chg(struct resv_map *resv, long f, long t) { @@ -326,6 +356,31 @@ static long region_chg(struct resv_map *resv, long f, long t) retry: spin_lock(&resv->lock); +retry_locked: + resv->adds_in_progress++; + + /* + * Check for sufficient descriptors in the cache to accommodate + * the number of in progress add operations. + */ + if (resv->adds_in_progress > resv->region_cache_count) { + struct file_region *trg; + + VM_BUG_ON(resv->adds_in_progress - resv->region_cache_count > 1); + /* Must drop lock to allocate a new descriptor. */ + resv->adds_in_progress--; + spin_unlock(&resv->lock); + + trg = kmalloc(sizeof(*trg), GFP_KERNEL); + if (!trg) + return -ENOMEM; + + spin_lock(&resv->lock); + list_add(&trg->link, &resv->region_cache); + resv->region_cache_count++; + goto retry_locked; + } + /* Locate the region we are before or in. */ list_for_each_entry(rg, head, link) if (f <= rg->to) @@ -336,6 +391,7 @@ retry: * size such that we can guarantee to record the reservation. */ if (&rg->link == head || t < rg->from) { if (!nrg) { + resv->adds_in_progress--; spin_unlock(&resv->lock); nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); if (!nrg) @@ -385,43 +441,131 @@ out_nrg: } /* - * Truncate the reserve map at index 'end'. Modify/truncate any - * region which contains end. Delete any regions past end. - * Return the number of huge pages removed from the map. + * Abort the in progress add operation. The adds_in_progress field + * of the resv_map keeps track of the operations in progress between + * calls to region_chg and region_add. Operations are sometimes + * aborted after the call to region_chg. In such cases, region_abort + * is called to decrement the adds_in_progress counter. + * + * NOTE: The range arguments [f, t) are not needed or used in this + * routine. They are kept to make reading the calling code easier as + * arguments will match the associated region_chg call. + */ +static void region_abort(struct resv_map *resv, long f, long t) +{ + spin_lock(&resv->lock); + VM_BUG_ON(!resv->region_cache_count); + resv->adds_in_progress--; + spin_unlock(&resv->lock); +} + +/* + * Delete the specified range [f, t) from the reserve map. If the + * t parameter is LONG_MAX, this indicates that ALL regions after f + * should be deleted. Locate the regions which intersect [f, t) + * and either trim, delete or split the existing regions. + * + * Returns the number of huge pages deleted from the reserve map. + * In the normal case, the return value is zero or more. In the + * case where a region must be split, a new region descriptor must + * be allocated. If the allocation fails, -ENOMEM will be returned. + * NOTE: If the parameter t == LONG_MAX, then we will never split + * a region and possibly return -ENOMEM. Callers specifying + * t == LONG_MAX do not need to check for -ENOMEM error. */ -static long region_truncate(struct resv_map *resv, long end) +static long region_del(struct resv_map *resv, long f, long t) { struct list_head *head = &resv->regions; struct file_region *rg, *trg; - long chg = 0; + struct file_region *nrg = NULL; + long del = 0; +retry: spin_lock(&resv->lock); - /* Locate the region we are either in or before. */ - list_for_each_entry(rg, head, link) - if (end <= rg->to) + list_for_each_entry_safe(rg, trg, head, link) { + if (rg->to <= f) + continue; + if (rg->from >= t) break; - if (&rg->link == head) - goto out; - /* If we are in the middle of a region then adjust it. */ - if (end > rg->from) { - chg = rg->to - end; - rg->to = end; - rg = list_entry(rg->link.next, typeof(*rg), link); - } + if (f > rg->from && t < rg->to) { /* Must split region */ + /* + * Check for an entry in the cache before dropping + * lock and attempting allocation. + */ + if (!nrg && + resv->region_cache_count > resv->adds_in_progress) { + nrg = list_first_entry(&resv->region_cache, + struct file_region, + link); + list_del(&nrg->link); + resv->region_cache_count--; + } - /* Drop any remaining regions. */ - list_for_each_entry_safe(rg, trg, rg->link.prev, link) { - if (&rg->link == head) + if (!nrg) { + spin_unlock(&resv->lock); + nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); + if (!nrg) + return -ENOMEM; + goto retry; + } + + del += t - f; + + /* New entry for end of split region */ + nrg->from = t; + nrg->to = rg->to; + INIT_LIST_HEAD(&nrg->link); + + /* Original entry is trimmed */ + rg->to = f; + + list_add(&nrg->link, &rg->link); + nrg = NULL; break; - chg += rg->to - rg->from; - list_del(&rg->link); - kfree(rg); + } + + if (f <= rg->from && t >= rg->to) { /* Remove entire region */ + del += rg->to - rg->from; + list_del(&rg->link); + kfree(rg); + continue; + } + + if (f <= rg->from) { /* Trim beginning of region */ + del += t - rg->from; + rg->from = t; + } else { /* Trim end of region */ + del += rg->to - f; + rg->to = f; + } } -out: spin_unlock(&resv->lock); - return chg; + kfree(nrg); + return del; +} + +/* + * A rare out of memory error was encountered which prevented removal of + * the reserve map region for a page. The huge page itself was free'ed + * and removed from the page cache. This routine will adjust the subpool + * usage count, and the global reserve count if needed. By incrementing + * these counts, the reserve map entry which could not be deleted will + * appear as a "reserved" entry instead of simply dangling with incorrect + * counts. + */ +void hugetlb_fix_reserve_counts(struct inode *inode, bool restore_reserve) +{ + struct hugepage_subpool *spool = subpool_inode(inode); + long rsv_adjust; + + rsv_adjust = hugepage_subpool_get_pages(spool, 1); + if (restore_reserve && rsv_adjust) { + struct hstate *h = hstate_inode(inode); + + hugetlb_acct_memory(h, 1); + } } /* @@ -544,22 +688,44 @@ static void set_vma_private_data(struct vm_area_struct *vma, struct resv_map *resv_map_alloc(void) { struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL); - if (!resv_map) + struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL); + + if (!resv_map || !rg) { + kfree(resv_map); + kfree(rg); return NULL; + } kref_init(&resv_map->refs); spin_lock_init(&resv_map->lock); INIT_LIST_HEAD(&resv_map->regions); + resv_map->adds_in_progress = 0; + + INIT_LIST_HEAD(&resv_map->region_cache); + list_add(&rg->link, &resv_map->region_cache); + resv_map->region_cache_count = 1; + return resv_map; } void resv_map_release(struct kref *ref) { struct resv_map *resv_map = container_of(ref, struct resv_map, refs); + struct list_head *head = &resv_map->region_cache; + struct file_region *rg, *trg; /* Clear out any active regions before we release the map. */ - region_truncate(resv_map, 0); + region_del(resv_map, 0, LONG_MAX); + + /* ... and any entries left in the cache */ + list_for_each_entry_safe(rg, trg, head, link) { + list_del(&rg->link); + kfree(rg); + } + + VM_BUG_ON(resv_map->adds_in_progress); + kfree(resv_map); } @@ -616,7 +782,7 @@ void reset_vma_resv_huge_pages(struct vm_area_struct *vma) } /* Returns true if the VMA has associated reserve pages */ -static int vma_has_reserves(struct vm_area_struct *vma, long chg) +static bool vma_has_reserves(struct vm_area_struct *vma, long chg) { if (vma->vm_flags & VM_NORESERVE) { /* @@ -629,23 +795,34 @@ static int vma_has_reserves(struct vm_area_struct *vma, long chg) * properly, so add work-around here. */ if (vma->vm_flags & VM_MAYSHARE && chg == 0) - return 1; + return true; else - return 0; + return false; } /* Shared mappings always use reserves */ - if (vma->vm_flags & VM_MAYSHARE) - return 1; + if (vma->vm_flags & VM_MAYSHARE) { + /* + * We know VM_NORESERVE is not set. Therefore, there SHOULD + * be a region map for all pages. The only situation where + * there is no region map is if a hole was punched via + * fallocate. In this case, there really are no reverves to + * use. This situation is indicated if chg != 0. + */ + if (chg) + return false; + else + return true; + } /* * Only the process that called mmap() has reserves for * private mappings. */ if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) - return 1; + return true; - return 0; + return false; } static void enqueue_huge_page(struct hstate *h, struct page *page) @@ -1154,7 +1331,7 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid) { struct page *page; - page = alloc_pages_exact_node(nid, + page = __alloc_pages_node(nid, htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE| __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h)); @@ -1306,7 +1483,7 @@ static struct page *alloc_buddy_huge_page(struct hstate *h, int nid) __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h)); else - page = alloc_pages_exact_node(nid, + page = __alloc_pages_node(nid, htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE| __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h)); @@ -1473,16 +1650,19 @@ static void return_unused_surplus_pages(struct hstate *h, } } + /* - * vma_needs_reservation and vma_commit_reservation are used by the huge - * page allocation routines to manage reservations. + * vma_needs_reservation, vma_commit_reservation and vma_end_reservation + * are used by the huge page allocation routines to manage reservations. * * vma_needs_reservation is called to determine if the huge page at addr * within the vma has an associated reservation. If a reservation is * needed, the value 1 is returned. The caller is then responsible for * managing the global reservation and subpool usage counts. After * the huge page has been allocated, vma_commit_reservation is called - * to add the page to the reservation map. + * to add the page to the reservation map. If the page allocation fails, + * the reservation must be ended instead of committed. vma_end_reservation + * is called in such cases. * * In the normal case, vma_commit_reservation returns the same value * as the preceding vma_needs_reservation call. The only time this @@ -1490,9 +1670,14 @@ static void return_unused_surplus_pages(struct hstate *h, * is the responsibility of the caller to notice the difference and * take appropriate action. */ +enum vma_resv_mode { + VMA_NEEDS_RESV, + VMA_COMMIT_RESV, + VMA_END_RESV, +}; static long __vma_reservation_common(struct hstate *h, struct vm_area_struct *vma, unsigned long addr, - bool commit) + enum vma_resv_mode mode) { struct resv_map *resv; pgoff_t idx; @@ -1503,10 +1688,20 @@ static long __vma_reservation_common(struct hstate *h, return 1; idx = vma_hugecache_offset(h, vma, addr); - if (commit) - ret = region_add(resv, idx, idx + 1); - else + switch (mode) { + case VMA_NEEDS_RESV: ret = region_chg(resv, idx, idx + 1); + break; + case VMA_COMMIT_RESV: + ret = region_add(resv, idx, idx + 1); + break; + case VMA_END_RESV: + region_abort(resv, idx, idx + 1); + ret = 0; + break; + default: + BUG(); + } if (vma->vm_flags & VM_MAYSHARE) return ret; @@ -1517,47 +1712,79 @@ static long __vma_reservation_common(struct hstate *h, static long vma_needs_reservation(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) { - return __vma_reservation_common(h, vma, addr, false); + return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV); } static long vma_commit_reservation(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) { - return __vma_reservation_common(h, vma, addr, true); + return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV); } -static struct page *alloc_huge_page(struct vm_area_struct *vma, +static void vma_end_reservation(struct hstate *h, + struct vm_area_struct *vma, unsigned long addr) +{ + (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV); +} + +struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve) { struct hugepage_subpool *spool = subpool_vma(vma); struct hstate *h = hstate_vma(vma); struct page *page; - long chg, commit; + long map_chg, map_commit; + long gbl_chg; int ret, idx; struct hugetlb_cgroup *h_cg; idx = hstate_index(h); /* - * Processes that did not create the mapping will have no - * reserves and will not have accounted against subpool - * limit. Check that the subpool limit can be made before - * satisfying the allocation MAP_NORESERVE mappings may also - * need pages and subpool limit allocated allocated if no reserve - * mapping overlaps. + * Examine the region/reserve map to determine if the process + * has a reservation for the page to be allocated. A return + * code of zero indicates a reservation exists (no change). */ - chg = vma_needs_reservation(h, vma, addr); - if (chg < 0) + map_chg = gbl_chg = vma_needs_reservation(h, vma, addr); + if (map_chg < 0) return ERR_PTR(-ENOMEM); - if (chg || avoid_reserve) - if (hugepage_subpool_get_pages(spool, 1) < 0) + + /* + * Processes that did not create the mapping will have no + * reserves as indicated by the region/reserve map. Check + * that the allocation will not exceed the subpool limit. + * Allocations for MAP_NORESERVE mappings also need to be + * checked against any subpool limit. + */ + if (map_chg || avoid_reserve) { + gbl_chg = hugepage_subpool_get_pages(spool, 1); + if (gbl_chg < 0) { + vma_end_reservation(h, vma, addr); return ERR_PTR(-ENOSPC); + } + + /* + * Even though there was no reservation in the region/reserve + * map, there could be reservations associated with the + * subpool that can be used. This would be indicated if the + * return value of hugepage_subpool_get_pages() is zero. + * However, if avoid_reserve is specified we still avoid even + * the subpool reservations. + */ + if (avoid_reserve) + gbl_chg = 1; + } ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg); if (ret) goto out_subpool_put; spin_lock(&hugetlb_lock); - page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, chg); + /* + * glb_chg is passed to indicate whether or not a page must be taken + * from the global free pool (global change). gbl_chg == 0 indicates + * a reservation exists for the allocation. + */ + page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg); if (!page) { spin_unlock(&hugetlb_lock); page = alloc_buddy_huge_page(h, NUMA_NO_NODE); @@ -1573,8 +1800,8 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma, set_page_private(page, (unsigned long)spool); - commit = vma_commit_reservation(h, vma, addr); - if (unlikely(chg > commit)) { + map_commit = vma_commit_reservation(h, vma, addr); + if (unlikely(map_chg > map_commit)) { /* * The page was added to the reservation map between * vma_needs_reservation and vma_commit_reservation. @@ -1594,8 +1821,9 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma, out_uncharge_cgroup: hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg); out_subpool_put: - if (chg || avoid_reserve) + if (map_chg || avoid_reserve) hugepage_subpool_put_pages(spool, 1); + vma_end_reservation(h, vma, addr); return ERR_PTR(-ENOSPC); } @@ -2311,7 +2539,7 @@ static void __exit hugetlb_exit(void) } kobject_put(hugepages_kobj); - kfree(htlb_fault_mutex_table); + kfree(hugetlb_fault_mutex_table); } module_exit(hugetlb_exit); @@ -2344,12 +2572,12 @@ static int __init hugetlb_init(void) #else num_fault_mutexes = 1; #endif - htlb_fault_mutex_table = + hugetlb_fault_mutex_table = kmalloc(sizeof(struct mutex) * num_fault_mutexes, GFP_KERNEL); - BUG_ON(!htlb_fault_mutex_table); + BUG_ON(!hugetlb_fault_mutex_table); for (i = 0; i < num_fault_mutexes; i++) - mutex_init(&htlb_fault_mutex_table[i]); + mutex_init(&hugetlb_fault_mutex_table[i]); return 0; } module_init(hugetlb_init); @@ -3155,6 +3383,23 @@ static bool hugetlbfs_pagecache_present(struct hstate *h, return page != NULL; } +int huge_add_to_page_cache(struct page *page, struct address_space *mapping, + pgoff_t idx) +{ + struct inode *inode = mapping->host; + struct hstate *h = hstate_inode(inode); + int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); + + if (err) + return err; + ClearPagePrivate(page); + + spin_lock(&inode->i_lock); + inode->i_blocks += blocks_per_huge_page(h); + spin_unlock(&inode->i_lock); + return 0; +} + static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, struct address_space *mapping, pgoff_t idx, unsigned long address, pte_t *ptep, unsigned int flags) @@ -3202,21 +3447,13 @@ retry: set_page_huge_active(page); if (vma->vm_flags & VM_MAYSHARE) { - int err; - struct inode *inode = mapping->host; - - err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); + int err = huge_add_to_page_cache(page, mapping, idx); if (err) { put_page(page); if (err == -EEXIST) goto retry; goto out; } - ClearPagePrivate(page); - - spin_lock(&inode->i_lock); - inode->i_blocks += blocks_per_huge_page(h); - spin_unlock(&inode->i_lock); } else { lock_page(page); if (unlikely(anon_vma_prepare(vma))) { @@ -3244,11 +3481,14 @@ retry: * any allocations necessary to record that reservation occur outside * the spinlock. */ - if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) + if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { if (vma_needs_reservation(h, vma, address) < 0) { ret = VM_FAULT_OOM; goto backout_unlocked; } + /* Just decrements count, does not deallocate */ + vma_end_reservation(h, vma, address); + } ptl = huge_pte_lockptr(h, mm, ptep); spin_lock(ptl); @@ -3288,7 +3528,7 @@ backout_unlocked: } #ifdef CONFIG_SMP -static u32 fault_mutex_hash(struct hstate *h, struct mm_struct *mm, +u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm, struct vm_area_struct *vma, struct address_space *mapping, pgoff_t idx, unsigned long address) @@ -3313,7 +3553,7 @@ static u32 fault_mutex_hash(struct hstate *h, struct mm_struct *mm, * For uniprocesor systems we always use a single mutex, so just * return 0 and avoid the hashing overhead. */ -static u32 fault_mutex_hash(struct hstate *h, struct mm_struct *mm, +u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm, struct vm_area_struct *vma, struct address_space *mapping, pgoff_t idx, unsigned long address) @@ -3361,8 +3601,8 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, * get spurious allocation failures if two CPUs race to instantiate * the same page in the page cache. */ - hash = fault_mutex_hash(h, mm, vma, mapping, idx, address); - mutex_lock(&htlb_fault_mutex_table[hash]); + hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, address); + mutex_lock(&hugetlb_fault_mutex_table[hash]); entry = huge_ptep_get(ptep); if (huge_pte_none(entry)) { @@ -3395,6 +3635,8 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, ret = VM_FAULT_OOM; goto out_mutex; } + /* Just decrements count, does not deallocate */ + vma_end_reservation(h, vma, address); if (!(vma->vm_flags & VM_MAYSHARE)) pagecache_page = hugetlbfs_pagecache_page(h, @@ -3445,7 +3687,7 @@ out_ptl: put_page(pagecache_page); } out_mutex: - mutex_unlock(&htlb_fault_mutex_table[hash]); + mutex_unlock(&hugetlb_fault_mutex_table[hash]); /* * Generally it's safe to hold refcount during waiting page lock. But * here we just wait to defer the next page fault to avoid busy loop and @@ -3734,12 +3976,15 @@ int hugetlb_reserve_pages(struct inode *inode, } return 0; out_err: + if (!vma || vma->vm_flags & VM_MAYSHARE) + region_abort(resv_map, from, to); if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) kref_put(&resv_map->refs, resv_map_release); return ret; } -void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed) +long hugetlb_unreserve_pages(struct inode *inode, long start, long end, + long freed) { struct hstate *h = hstate_inode(inode); struct resv_map *resv_map = inode_resv_map(inode); @@ -3747,8 +3992,17 @@ void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed) struct hugepage_subpool *spool = subpool_inode(inode); long gbl_reserve; - if (resv_map) - chg = region_truncate(resv_map, offset); + if (resv_map) { + chg = region_del(resv_map, start, end); + /* + * region_del() can fail in the rare case where a region + * must be split and another region descriptor can not be + * allocated. If end == LONG_MAX, it will not fail. + */ + if (chg < 0) + return chg; + } + spin_lock(&inode->i_lock); inode->i_blocks -= (blocks_per_huge_page(h) * freed); spin_unlock(&inode->i_lock); @@ -3759,6 +4013,8 @@ void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed) */ gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed)); hugetlb_acct_memory(h, -gbl_reserve); + + return 0; } #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE @@ -3787,7 +4043,7 @@ static unsigned long page_table_shareable(struct vm_area_struct *svma, return saddr; } -static int vma_shareable(struct vm_area_struct *vma, unsigned long addr) +static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr) { unsigned long base = addr & PUD_MASK; unsigned long end = base + PUD_SIZE; @@ -3797,8 +4053,8 @@ static int vma_shareable(struct vm_area_struct *vma, unsigned long addr) */ if (vma->vm_flags & VM_MAYSHARE && vma->vm_start <= base && end <= vma->vm_end) - return 1; - return 0; + return true; + return false; } /* diff --git a/mm/hwpoison-inject.c b/mm/hwpoison-inject.c index bf73ac17d..9d26fd9fe 100644 --- a/mm/hwpoison-inject.c +++ b/mm/hwpoison-inject.c @@ -45,12 +45,9 @@ static int hwpoison_inject(void *data, u64 val) /* * do a racy check with elevated page count, to make sure PG_hwpoison * will only be set for the targeted owner (or on a free page). - * We temporarily take page lock for try_get_mem_cgroup_from_page(). * memory_failure() will redo the check reliably inside page lock. */ - lock_page(hpage); err = hwpoison_filter(hpage); - unlock_page(hpage); if (err) goto put_out; @@ -58,7 +55,7 @@ inject: pr_info("Injecting memory failure at pfn %#lx\n", pfn); return memory_failure(pfn, 18, MF_COUNT_INCREASED); put_out: - put_page(p); + put_hwpoison_page(p); return 0; } @@ -126,7 +123,7 @@ static int pfn_inject_init(void) if (!dentry) goto fail; -#ifdef CONFIG_MEMCG_SWAP +#ifdef CONFIG_MEMCG dentry = debugfs_create_u64("corrupt-filter-memcg", 0600, hwpoison_dir, &hwpoison_filter_memcg); if (!dentry) diff --git a/mm/internal.h b/mm/internal.h index 36b23f1e2..bc0fa9a69 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -182,6 +182,7 @@ struct compact_control { unsigned long nr_migratepages; /* Number of pages to migrate */ unsigned long free_pfn; /* isolate_freepages search base */ unsigned long migrate_pfn; /* isolate_migratepages search base */ + unsigned long last_migrated_pfn;/* Not yet flushed page being freed */ enum migrate_mode mode; /* Async or sync migration mode */ bool ignore_skip_hint; /* Scan blocks even if marked skip */ int order; /* order a direct compactor needs */ @@ -426,4 +427,19 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone, #define ALLOC_CMA 0x80 /* allow allocations from CMA areas */ #define ALLOC_FAIR 0x100 /* fair zone allocation */ +enum ttu_flags; +struct tlbflush_unmap_batch; + +#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH +void try_to_unmap_flush(void); +void try_to_unmap_flush_dirty(void); +#else +static inline void try_to_unmap_flush(void) +{ +} +static inline void try_to_unmap_flush_dirty(void) +{ +} + +#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ #endif /* __MM_INTERNAL_H */ diff --git a/mm/kasan/Makefile b/mm/kasan/Makefile index bd837b8c2..647101489 100644 --- a/mm/kasan/Makefile +++ b/mm/kasan/Makefile @@ -5,4 +5,4 @@ CFLAGS_REMOVE_kasan.o = -pg # see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533 CFLAGS_kasan.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) -obj-y := kasan.o report.o +obj-y := kasan.o report.o kasan_init.o diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index 7b28e9cdf..8da211411 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c @@ -135,12 +135,11 @@ static __always_inline bool memory_is_poisoned_16(unsigned long addr) if (unlikely(*shadow_addr)) { u16 shadow_first_bytes = *(u16 *)shadow_addr; - s8 last_byte = (addr + 15) & KASAN_SHADOW_MASK; if (unlikely(shadow_first_bytes)) return true; - if (likely(!last_byte)) + if (likely(IS_ALIGNED(addr, 8))) return false; return memory_is_poisoned_1(addr + 15); diff --git a/mm/kasan/kasan_init.c b/mm/kasan/kasan_init.c new file mode 100644 index 000000000..3f9a41cf0 --- /dev/null +++ b/mm/kasan/kasan_init.c @@ -0,0 +1,152 @@ +/* + * This file contains some kasan initialization code. + * + * Copyright (c) 2015 Samsung Electronics Co., Ltd. + * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include <linux/bootmem.h> +#include <linux/init.h> +#include <linux/kasan.h> +#include <linux/kernel.h> +#include <linux/memblock.h> +#include <linux/pfn.h> + +#include <asm/page.h> +#include <asm/pgalloc.h> + +/* + * This page serves two purposes: + * - It used as early shadow memory. The entire shadow region populated + * with this page, before we will be able to setup normal shadow memory. + * - Latter it reused it as zero shadow to cover large ranges of memory + * that allowed to access, but not handled by kasan (vmalloc/vmemmap ...). + */ +unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss; + +#if CONFIG_PGTABLE_LEVELS > 3 +pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss; +#endif +#if CONFIG_PGTABLE_LEVELS > 2 +pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_bss; +#endif +pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_bss; + +static __init void *early_alloc(size_t size, int node) +{ + return memblock_virt_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS), + BOOTMEM_ALLOC_ACCESSIBLE, node); +} + +static void __init zero_pte_populate(pmd_t *pmd, unsigned long addr, + unsigned long end) +{ + pte_t *pte = pte_offset_kernel(pmd, addr); + pte_t zero_pte; + + zero_pte = pfn_pte(PFN_DOWN(__pa(kasan_zero_page)), PAGE_KERNEL); + zero_pte = pte_wrprotect(zero_pte); + + while (addr + PAGE_SIZE <= end) { + set_pte_at(&init_mm, addr, pte, zero_pte); + addr += PAGE_SIZE; + pte = pte_offset_kernel(pmd, addr); + } +} + +static void __init zero_pmd_populate(pud_t *pud, unsigned long addr, + unsigned long end) +{ + pmd_t *pmd = pmd_offset(pud, addr); + unsigned long next; + + do { + next = pmd_addr_end(addr, end); + + if (IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) { + pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte); + continue; + } + + if (pmd_none(*pmd)) { + pmd_populate_kernel(&init_mm, pmd, + early_alloc(PAGE_SIZE, NUMA_NO_NODE)); + } + zero_pte_populate(pmd, addr, next); + } while (pmd++, addr = next, addr != end); +} + +static void __init zero_pud_populate(pgd_t *pgd, unsigned long addr, + unsigned long end) +{ + pud_t *pud = pud_offset(pgd, addr); + unsigned long next; + + do { + next = pud_addr_end(addr, end); + if (IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) { + pmd_t *pmd; + + pud_populate(&init_mm, pud, kasan_zero_pmd); + pmd = pmd_offset(pud, addr); + pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte); + continue; + } + + if (pud_none(*pud)) { + pud_populate(&init_mm, pud, + early_alloc(PAGE_SIZE, NUMA_NO_NODE)); + } + zero_pmd_populate(pud, addr, next); + } while (pud++, addr = next, addr != end); +} + +/** + * kasan_populate_zero_shadow - populate shadow memory region with + * kasan_zero_page + * @shadow_start - start of the memory range to populate + * @shadow_end - end of the memory range to populate + */ +void __init kasan_populate_zero_shadow(const void *shadow_start, + const void *shadow_end) +{ + unsigned long addr = (unsigned long)shadow_start; + unsigned long end = (unsigned long)shadow_end; + pgd_t *pgd = pgd_offset_k(addr); + unsigned long next; + + do { + next = pgd_addr_end(addr, end); + + if (IS_ALIGNED(addr, PGDIR_SIZE) && end - addr >= PGDIR_SIZE) { + pud_t *pud; + pmd_t *pmd; + + /* + * kasan_zero_pud should be populated with pmds + * at this moment. + * [pud,pmd]_populate*() below needed only for + * 3,2 - level page tables where we don't have + * puds,pmds, so pgd_populate(), pud_populate() + * is noops. + */ + pgd_populate(&init_mm, pgd, kasan_zero_pud); + pud = pud_offset(pgd, addr); + pud_populate(&init_mm, pud, kasan_zero_pmd); + pmd = pmd_offset(pud, addr); + pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte); + continue; + } + + if (pgd_none(*pgd)) { + pgd_populate(&init_mm, pgd, + early_alloc(PAGE_SIZE, NUMA_NO_NODE)); + } + zero_pud_populate(pgd, addr, next); + } while (pgd++, addr = next, addr != end); +} diff --git a/mm/kmemleak.c b/mm/kmemleak.c index cf79f1101..77191eccd 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -302,23 +302,14 @@ static void hex_dump_object(struct seq_file *seq, struct kmemleak_object *object) { const u8 *ptr = (const u8 *)object->pointer; - int i, len, remaining; - unsigned char linebuf[HEX_ROW_SIZE * 5]; + size_t len; /* limit the number of lines to HEX_MAX_LINES */ - remaining = len = - min(object->size, (size_t)(HEX_MAX_LINES * HEX_ROW_SIZE)); - - seq_printf(seq, " hex dump (first %d bytes):\n", len); - for (i = 0; i < len; i += HEX_ROW_SIZE) { - int linelen = min(remaining, HEX_ROW_SIZE); - - remaining -= HEX_ROW_SIZE; - hex_dump_to_buffer(ptr + i, linelen, HEX_ROW_SIZE, - HEX_GROUP_SIZE, linebuf, sizeof(linebuf), - HEX_ASCII); - seq_printf(seq, " %s\n", linebuf); - } + len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE); + + seq_printf(seq, " hex dump (first %zu bytes):\n", len); + seq_hex_dump(seq, " ", DUMP_PREFIX_NONE, HEX_ROW_SIZE, + HEX_GROUP_SIZE, ptr, len, HEX_ASCII); } /* @@ -838,6 +829,7 @@ static void __init log_early(int op_type, const void *ptr, size_t size, } if (crt_early_log >= ARRAY_SIZE(early_log)) { + crt_early_log++; kmemleak_disable(); return; } @@ -1882,7 +1874,7 @@ void __init kmemleak_init(void) object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE); scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE); - if (crt_early_log >= ARRAY_SIZE(early_log)) + if (crt_early_log > ARRAY_SIZE(early_log)) pr_warning("Early log buffer exceeded (%d), please increase " "DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n", crt_early_log); diff --git a/mm/list_lru.c b/mm/list_lru.c index 909eca2c8..e1da19fac 100644 --- a/mm/list_lru.c +++ b/mm/list_lru.c @@ -99,8 +99,8 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item) struct list_lru_one *l; spin_lock(&nlru->lock); - l = list_lru_from_kmem(nlru, item); if (list_empty(item)) { + l = list_lru_from_kmem(nlru, item); list_add_tail(item, &l->list); l->nr_items++; spin_unlock(&nlru->lock); @@ -118,8 +118,8 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item) struct list_lru_one *l; spin_lock(&nlru->lock); - l = list_lru_from_kmem(nlru, item); if (!list_empty(item)) { + l = list_lru_from_kmem(nlru, item); list_del_init(item); l->nr_items--; spin_unlock(&nlru->lock); diff --git a/mm/maccess.c b/mm/maccess.c index d53adf9ba..34fe24759 100644 --- a/mm/maccess.c +++ b/mm/maccess.c @@ -60,3 +60,44 @@ long __probe_kernel_write(void *dst, const void *src, size_t size) return ret ? -EFAULT : 0; } EXPORT_SYMBOL_GPL(probe_kernel_write); + +/** + * strncpy_from_unsafe: - Copy a NUL terminated string from unsafe address. + * @dst: Destination address, in kernel space. This buffer must be at + * least @count bytes long. + * @src: Unsafe address. + * @count: Maximum number of bytes to copy, including the trailing NUL. + * + * Copies a NUL-terminated string from unsafe address to kernel buffer. + * + * On success, returns the length of the string INCLUDING the trailing NUL. + * + * If access fails, returns -EFAULT (some data may have been copied + * and the trailing NUL added). + * + * If @count is smaller than the length of the string, copies @count-1 bytes, + * sets the last byte of @dst buffer to NUL and returns @count. + */ +long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count) +{ + mm_segment_t old_fs = get_fs(); + const void *src = unsafe_addr; + long ret; + + if (unlikely(count <= 0)) + return 0; + + set_fs(KERNEL_DS); + pagefault_disable(); + + do { + ret = __copy_from_user_inatomic(dst++, + (const void __user __force *)src++, 1); + } while (dst[-1] && ret == 0 && src - unsafe_addr < count); + + dst[-1] = '\0'; + pagefault_enable(); + set_fs(old_fs); + + return ret < 0 ? ret : src - unsafe_addr; +} diff --git a/mm/madvise.c b/mm/madvise.c index 64bb8a221..c889fcbb5 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -103,7 +103,8 @@ static long madvise_behavior(struct vm_area_struct *vma, pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma, - vma->vm_file, pgoff, vma_policy(vma)); + vma->vm_file, pgoff, vma_policy(vma), + vma->vm_userfaultfd_ctx); if (*prev) { vma = *prev; goto success; @@ -300,7 +301,7 @@ static long madvise_remove(struct vm_area_struct *vma, *prev = NULL; /* tell sys_madvise we drop mmap_sem */ - if (vma->vm_flags & (VM_LOCKED | VM_HUGETLB)) + if (vma->vm_flags & VM_LOCKED) return -EINVAL; f = vma->vm_file; @@ -385,7 +386,7 @@ madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, } } -static int +static bool madvise_behavior_valid(int behavior) { switch (behavior) { @@ -407,10 +408,10 @@ madvise_behavior_valid(int behavior) #endif case MADV_DONTDUMP: case MADV_DODUMP: - return 1; + return true; default: - return 0; + return false; } } diff --git a/mm/memblock.c b/mm/memblock.c index 87108e77e..1c7b647e5 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -91,7 +91,7 @@ static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, p return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); } -static long __init_memblock memblock_overlaps_region(struct memblock_type *type, +bool __init_memblock memblock_overlaps_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size) { unsigned long i; @@ -103,7 +103,7 @@ static long __init_memblock memblock_overlaps_region(struct memblock_type *type, break; } - return (i < type->cnt) ? i : -1; + return i < type->cnt; } /* @@ -566,6 +566,10 @@ repeat: * area, insert that portion. */ if (rbase > base) { +#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP + WARN_ON(nid != memblock_get_region_node(rgn)); +#endif + WARN_ON(flags != rgn->flags); nr_new++; if (insert) memblock_insert_region(type, i++, base, @@ -611,14 +615,14 @@ static int __init_memblock memblock_add_region(phys_addr_t base, int nid, unsigned long flags) { - struct memblock_type *_rgn = &memblock.memory; + struct memblock_type *type = &memblock.memory; memblock_dbg("memblock_add: [%#016llx-%#016llx] flags %#02lx %pF\n", (unsigned long long)base, (unsigned long long)base + size - 1, flags, (void *)_RET_IP_); - return memblock_add_range(_rgn, base, size, nid, flags); + return memblock_add_range(type, base, size, nid, flags); } int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) @@ -758,7 +762,7 @@ int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) * * This function isolates region [@base, @base + @size), and sets/clears flag * - * Return 0 on succees, -errno on failure. + * Return 0 on success, -errno on failure. */ static int __init_memblock memblock_setclr_flag(phys_addr_t base, phys_addr_t size, int set, int flag) @@ -785,7 +789,7 @@ static int __init_memblock memblock_setclr_flag(phys_addr_t base, * @base: the base phys addr of the region * @size: the size of the region * - * Return 0 on succees, -errno on failure. + * Return 0 on success, -errno on failure. */ int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size) { @@ -797,7 +801,7 @@ int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size) * @base: the base phys addr of the region * @size: the size of the region * - * Return 0 on succees, -errno on failure. + * Return 0 on success, -errno on failure. */ int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size) { @@ -809,7 +813,7 @@ int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size) * @base: the base phys addr of the region * @size: the size of the region * - * Return 0 on succees, -errno on failure. + * Return 0 on success, -errno on failure. */ int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size) { @@ -831,10 +835,10 @@ void __init_memblock __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start, phys_addr_t *out_end) { - struct memblock_type *rsv = &memblock.reserved; + struct memblock_type *type = &memblock.reserved; - if (*idx >= 0 && *idx < rsv->cnt) { - struct memblock_region *r = &rsv->regions[*idx]; + if (*idx >= 0 && *idx < type->cnt) { + struct memblock_region *r = &type->regions[*idx]; phys_addr_t base = r->base; phys_addr_t size = r->size; @@ -972,7 +976,7 @@ void __init_memblock __next_mem_range(u64 *idx, int nid, ulong flags, * in type_b. * * @idx: pointer to u64 loop variable - * @nid: nid: node selector, %NUMA_NO_NODE for all nodes + * @nid: node selector, %NUMA_NO_NODE for all nodes * @flags: pick from blocks based on memory attributes * @type_a: pointer to memblock_type from where the range is taken * @type_b: pointer to memblock_type which excludes memory from being taken @@ -1562,12 +1566,12 @@ int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size * Check if the region [@base, @base+@size) intersects a reserved memory block. * * RETURNS: - * 0 if false, non-zero if true + * True if they intersect, false if not. */ -int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) +bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) { memblock_cap_size(base, &size); - return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; + return memblock_overlaps_region(&memblock.reserved, base, size); } void __init_memblock memblock_trim_memory(phys_addr_t align) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 03a6f7506..c57c4423c 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -111,56 +111,10 @@ static const char * const mem_cgroup_lru_names[] = { "unevictable", }; -/* - * Per memcg event counter is incremented at every pagein/pageout. With THP, - * it will be incremated by the number of pages. This counter is used for - * for trigger some periodic events. This is straightforward and better - * than using jiffies etc. to handle periodic memcg event. - */ -enum mem_cgroup_events_target { - MEM_CGROUP_TARGET_THRESH, - MEM_CGROUP_TARGET_SOFTLIMIT, - MEM_CGROUP_TARGET_NUMAINFO, - MEM_CGROUP_NTARGETS, -}; #define THRESHOLDS_EVENTS_TARGET 128 #define SOFTLIMIT_EVENTS_TARGET 1024 #define NUMAINFO_EVENTS_TARGET 1024 -struct mem_cgroup_stat_cpu { - long count[MEM_CGROUP_STAT_NSTATS]; - unsigned long events[MEMCG_NR_EVENTS]; - unsigned long nr_page_events; - unsigned long targets[MEM_CGROUP_NTARGETS]; -}; - -struct reclaim_iter { - struct mem_cgroup *position; - /* scan generation, increased every round-trip */ - unsigned int generation; -}; - -/* - * per-zone information in memory controller. - */ -struct mem_cgroup_per_zone { - struct lruvec lruvec; - unsigned long lru_size[NR_LRU_LISTS]; - - struct reclaim_iter iter[DEF_PRIORITY + 1]; - - struct rb_node tree_node; /* RB tree node */ - unsigned long usage_in_excess;/* Set to the value by which */ - /* the soft limit is exceeded*/ - bool on_tree; - struct mem_cgroup *memcg; /* Back pointer, we cannot */ - /* use container_of */ -}; - -struct mem_cgroup_per_node { - struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES]; -}; - /* * Cgroups above their limits are maintained in a RB-Tree, independent of * their hierarchy representation @@ -181,32 +135,6 @@ struct mem_cgroup_tree { static struct mem_cgroup_tree soft_limit_tree __read_mostly; -struct mem_cgroup_threshold { - struct eventfd_ctx *eventfd; - unsigned long threshold; -}; - -/* For threshold */ -struct mem_cgroup_threshold_ary { - /* An array index points to threshold just below or equal to usage. */ - int current_threshold; - /* Size of entries[] */ - unsigned int size; - /* Array of thresholds */ - struct mem_cgroup_threshold entries[0]; -}; - -struct mem_cgroup_thresholds { - /* Primary thresholds array */ - struct mem_cgroup_threshold_ary *primary; - /* - * Spare threshold array. - * This is needed to make mem_cgroup_unregister_event() "never fail". - * It must be able to store at least primary->size - 1 entries. - */ - struct mem_cgroup_threshold_ary *spare; -}; - /* for OOM */ struct mem_cgroup_eventfd_list { struct list_head list; @@ -256,113 +184,6 @@ struct mem_cgroup_event { static void mem_cgroup_threshold(struct mem_cgroup *memcg); static void mem_cgroup_oom_notify(struct mem_cgroup *memcg); -/* - * The memory controller data structure. The memory controller controls both - * page cache and RSS per cgroup. We would eventually like to provide - * statistics based on the statistics developed by Rik Van Riel for clock-pro, - * to help the administrator determine what knobs to tune. - */ -struct mem_cgroup { - struct cgroup_subsys_state css; - - /* Accounted resources */ - struct page_counter memory; - struct page_counter memsw; - struct page_counter kmem; - - /* Normal memory consumption range */ - unsigned long low; - unsigned long high; - - unsigned long soft_limit; - - /* vmpressure notifications */ - struct vmpressure vmpressure; - - /* css_online() has been completed */ - int initialized; - - /* - * Should the accounting and control be hierarchical, per subtree? - */ - bool use_hierarchy; - - /* protected by memcg_oom_lock */ - bool oom_lock; - int under_oom; - - int swappiness; - /* OOM-Killer disable */ - int oom_kill_disable; - - /* protect arrays of thresholds */ - struct mutex thresholds_lock; - - /* thresholds for memory usage. RCU-protected */ - struct mem_cgroup_thresholds thresholds; - - /* thresholds for mem+swap usage. RCU-protected */ - struct mem_cgroup_thresholds memsw_thresholds; - - /* For oom notifier event fd */ - struct list_head oom_notify; - - /* - * Should we move charges of a task when a task is moved into this - * mem_cgroup ? And what type of charges should we move ? - */ - unsigned long move_charge_at_immigrate; - /* - * set > 0 if pages under this cgroup are moving to other cgroup. - */ - atomic_t moving_account; - /* taken only while moving_account > 0 */ - spinlock_t move_lock; - struct task_struct *move_lock_task; - unsigned long move_lock_flags; - /* - * percpu counter. - */ - struct mem_cgroup_stat_cpu __percpu *stat; - spinlock_t pcp_counter_lock; - -#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET) - struct cg_proto tcp_mem; -#endif -#if defined(CONFIG_MEMCG_KMEM) - /* Index in the kmem_cache->memcg_params.memcg_caches array */ - int kmemcg_id; - bool kmem_acct_activated; - bool kmem_acct_active; -#endif - - int last_scanned_node; -#if MAX_NUMNODES > 1 - nodemask_t scan_nodes; - atomic_t numainfo_events; - atomic_t numainfo_updating; -#endif - -#ifdef CONFIG_CGROUP_WRITEBACK - struct list_head cgwb_list; - struct wb_domain cgwb_domain; -#endif - - /* List of events which userspace want to receive */ - struct list_head event_list; - spinlock_t event_list_lock; - - struct mem_cgroup_per_node *nodeinfo[0]; - /* WARNING: nodeinfo must be the last member here */ -}; - -#ifdef CONFIG_MEMCG_KMEM -bool memcg_kmem_is_active(struct mem_cgroup *memcg) -{ - return memcg->kmem_acct_active; -} -#endif - /* Stuffs for move charges at task migration. */ /* * Types of charges to be moved. @@ -423,11 +244,6 @@ enum res_type { */ static DEFINE_MUTEX(memcg_create_mutex); -struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *s) -{ - return s ? container_of(s, struct mem_cgroup, css) : NULL; -} - /* Some nice accessors for the vmpressure. */ struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) { @@ -499,8 +315,7 @@ void sock_update_memcg(struct sock *sk) rcu_read_lock(); memcg = mem_cgroup_from_task(current); cg_proto = sk->sk_prot->proto_cgroup(memcg); - if (!mem_cgroup_is_root(memcg) && - memcg_proto_active(cg_proto) && + if (cg_proto && test_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags) && css_tryget_online(&memcg->css)) { sk->sk_cgrp = cg_proto; } @@ -593,11 +408,6 @@ mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone) return &memcg->nodeinfo[nid]->zoneinfo[zid]; } -struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg) -{ - return &memcg->css; -} - /** * mem_cgroup_css_from_page - css of the memcg associated with a page * @page: page of interest @@ -631,6 +441,34 @@ struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page) return &memcg->css; } +/** + * page_cgroup_ino - return inode number of the memcg a page is charged to + * @page: the page + * + * Look up the closest online ancestor of the memory cgroup @page is charged to + * and return its inode number or 0 if @page is not charged to any cgroup. It + * is safe to call this function without holding a reference to @page. + * + * Note, this function is inherently racy, because there is nothing to prevent + * the cgroup inode from getting torn down and potentially reallocated a moment + * after page_cgroup_ino() returns, so it only should be used by callers that + * do not care (such as procfs interfaces). + */ +ino_t page_cgroup_ino(struct page *page) +{ + struct mem_cgroup *memcg; + unsigned long ino = 0; + + rcu_read_lock(); + memcg = READ_ONCE(page->mem_cgroup); + while (memcg && !(memcg->css.flags & CSS_ONLINE)) + memcg = parent_mem_cgroup(memcg); + if (memcg) + ino = cgroup_ino(memcg->css.cgroup); + rcu_read_unlock(); + return ino; +} + static struct mem_cgroup_per_zone * mem_cgroup_page_zoneinfo(struct mem_cgroup *memcg, struct page *page) { @@ -885,14 +723,6 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, __this_cpu_add(memcg->stat->nr_page_events, nr_pages); } -unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) -{ - struct mem_cgroup_per_zone *mz; - - mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec); - return mz->lru_size[lru]; -} - static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, int nid, unsigned int lru_mask) @@ -995,6 +825,7 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) return mem_cgroup_from_css(task_css(p, memory_cgrp_id)); } +EXPORT_SYMBOL(mem_cgroup_from_task); static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) { @@ -1040,7 +871,7 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, struct mem_cgroup *prev, struct mem_cgroup_reclaim_cookie *reclaim) { - struct reclaim_iter *uninitialized_var(iter); + struct mem_cgroup_reclaim_iter *uninitialized_var(iter); struct cgroup_subsys_state *css = NULL; struct mem_cgroup *memcg = NULL; struct mem_cgroup *pos = NULL; @@ -1182,30 +1013,6 @@ void mem_cgroup_iter_break(struct mem_cgroup *root, iter != NULL; \ iter = mem_cgroup_iter(NULL, iter, NULL)) -void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) -{ - struct mem_cgroup *memcg; - - rcu_read_lock(); - memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); - if (unlikely(!memcg)) - goto out; - - switch (idx) { - case PGFAULT: - this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]); - break; - case PGMAJFAULT: - this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]); - break; - default: - BUG(); - } -out: - rcu_read_unlock(); -} -EXPORT_SYMBOL(__mem_cgroup_count_vm_event); - /** * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg * @zone: zone of the wanted lruvec @@ -1304,15 +1111,6 @@ void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, VM_BUG_ON((long)(*lru_size) < 0); } -bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, struct mem_cgroup *root) -{ - if (root == memcg) - return true; - if (!root->use_hierarchy) - return false; - return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup); -} - bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg) { struct mem_cgroup *task_memcg; @@ -1339,39 +1137,6 @@ bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg) return ret; } -int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) -{ - unsigned long inactive_ratio; - unsigned long inactive; - unsigned long active; - unsigned long gb; - - inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_ANON); - active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_ANON); - - gb = (inactive + active) >> (30 - PAGE_SHIFT); - if (gb) - inactive_ratio = int_sqrt(10 * gb); - else - inactive_ratio = 1; - - return inactive * inactive_ratio < active; -} - -bool mem_cgroup_lruvec_online(struct lruvec *lruvec) -{ - struct mem_cgroup_per_zone *mz; - struct mem_cgroup *memcg; - - if (mem_cgroup_disabled()) - return true; - - mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec); - memcg = mz->memcg; - - return !!(memcg->css.flags & CSS_ONLINE); -} - #define mem_cgroup_from_counter(counter, member) \ container_of(counter, struct mem_cgroup, member) @@ -1403,15 +1168,6 @@ static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) return margin; } -int mem_cgroup_swappiness(struct mem_cgroup *memcg) -{ - /* root ? */ - if (mem_cgroup_disabled() || !memcg->css.parent) - return vm_swappiness; - - return memcg->swappiness; -} - /* * A routine for checking "mem" is under move_account() or not. * @@ -1554,6 +1310,12 @@ static unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg) static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, int order) { + struct oom_control oc = { + .zonelist = NULL, + .nodemask = NULL, + .gfp_mask = gfp_mask, + .order = order, + }; struct mem_cgroup *iter; unsigned long chosen_points = 0; unsigned long totalpages; @@ -1572,7 +1334,7 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, goto unlock; } - check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL, memcg); + check_panic_on_oom(&oc, CONSTRAINT_MEMCG, memcg); totalpages = mem_cgroup_get_limit(memcg) ? : 1; for_each_mem_cgroup_tree(iter, memcg) { struct css_task_iter it; @@ -1580,8 +1342,7 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, css_task_iter_start(&iter->css, &it); while ((task = css_task_iter_next(&it))) { - switch (oom_scan_process_thread(task, totalpages, NULL, - false)) { + switch (oom_scan_process_thread(&oc, task, totalpages)) { case OOM_SCAN_SELECT: if (chosen) put_task_struct(chosen); @@ -1619,8 +1380,8 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, if (chosen) { points = chosen_points * 1000 / totalpages; - oom_kill_process(chosen, gfp_mask, order, points, totalpages, - memcg, NULL, "Memory cgroup out of memory"); + oom_kill_process(&oc, chosen, points, totalpages, memcg, + "Memory cgroup out of memory"); } unlock: mutex_unlock(&oom_lock); @@ -2071,23 +1832,6 @@ void mem_cgroup_end_page_stat(struct mem_cgroup *memcg) } EXPORT_SYMBOL(mem_cgroup_end_page_stat); -/** - * mem_cgroup_update_page_stat - update page state statistics - * @memcg: memcg to account against - * @idx: page state item to account - * @val: number of pages (positive or negative) - * - * See mem_cgroup_begin_page_stat() for locking requirements. - */ -void mem_cgroup_update_page_stat(struct mem_cgroup *memcg, - enum mem_cgroup_stat_index idx, int val) -{ - VM_BUG_ON(!rcu_read_lock_held()); - - if (memcg) - this_cpu_add(memcg->stat->count[idx], val); -} - /* * size of first charge trial. "32" comes from vmscan.c's magic value. * TODO: maybe necessary to use big numbers in big irons. @@ -2364,40 +2108,6 @@ static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) css_put_many(&memcg->css, nr_pages); } -/* - * try_get_mem_cgroup_from_page - look up page's memcg association - * @page: the page - * - * Look up, get a css reference, and return the memcg that owns @page. - * - * The page must be locked to prevent racing with swap-in and page - * cache charges. If coming from an unlocked page table, the caller - * must ensure the page is on the LRU or this can race with charging. - */ -struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) -{ - struct mem_cgroup *memcg; - unsigned short id; - swp_entry_t ent; - - VM_BUG_ON_PAGE(!PageLocked(page), page); - - memcg = page->mem_cgroup; - if (memcg) { - if (!css_tryget_online(&memcg->css)) - memcg = NULL; - } else if (PageSwapCache(page)) { - ent.val = page_private(page); - id = lookup_swap_cgroup_id(ent); - rcu_read_lock(); - memcg = mem_cgroup_from_id(id); - if (memcg && !css_tryget_online(&memcg->css)) - memcg = NULL; - rcu_read_unlock(); - } - return memcg; -} - static void lock_page_lru(struct page *page, int *isolated) { struct zone *zone = page_zone(page); @@ -2513,16 +2223,6 @@ void memcg_uncharge_kmem(struct mem_cgroup *memcg, unsigned long nr_pages) css_put_many(&memcg->css, nr_pages); } -/* - * helper for acessing a memcg's index. It will be used as an index in the - * child cache array in kmem_cache, and also to derive its name. This function - * will return -1 when this is not a kmem-limited memcg. - */ -int memcg_cache_id(struct mem_cgroup *memcg) -{ - return memcg ? memcg->kmemcg_id : -1; -} - static int memcg_alloc_cache_id(void) { int id, size; @@ -4041,44 +3741,43 @@ struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) /** * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg * @wb: bdi_writeback in question - * @pavail: out parameter for number of available pages + * @pfilepages: out parameter for number of file pages + * @pheadroom: out parameter for number of allocatable pages according to memcg * @pdirty: out parameter for number of dirty pages * @pwriteback: out parameter for number of pages under writeback * - * Determine the numbers of available, dirty, and writeback pages in @wb's - * memcg. Dirty and writeback are self-explanatory. Available is a bit - * more involved. + * Determine the numbers of file, headroom, dirty, and writeback pages in + * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom + * is a bit more involved. * - * A memcg's headroom is "min(max, high) - used". The available memory is - * calculated as the lowest headroom of itself and the ancestors plus the - * number of pages already being used for file pages. Note that this - * doesn't consider the actual amount of available memory in the system. - * The caller should further cap *@pavail accordingly. + * A memcg's headroom is "min(max, high) - used". In the hierarchy, the + * headroom is calculated as the lowest headroom of itself and the + * ancestors. Note that this doesn't consider the actual amount of + * available memory in the system. The caller should further cap + * *@pheadroom accordingly. */ -void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pavail, - unsigned long *pdirty, unsigned long *pwriteback) +void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, + unsigned long *pheadroom, unsigned long *pdirty, + unsigned long *pwriteback) { struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); struct mem_cgroup *parent; - unsigned long head_room = PAGE_COUNTER_MAX; - unsigned long file_pages; *pdirty = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_DIRTY); /* this should eventually include NR_UNSTABLE_NFS */ *pwriteback = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_WRITEBACK); + *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) | + (1 << LRU_ACTIVE_FILE)); + *pheadroom = PAGE_COUNTER_MAX; - file_pages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) | - (1 << LRU_ACTIVE_FILE)); while ((parent = parent_mem_cgroup(memcg))) { unsigned long ceiling = min(memcg->memory.limit, memcg->high); unsigned long used = page_counter_read(&memcg->memory); - head_room = min(head_room, ceiling - min(ceiling, used)); + *pheadroom = min(*pheadroom, ceiling - min(ceiling, used)); memcg = parent; } - - *pavail = file_pages + head_room; } #else /* CONFIG_CGROUP_WRITEBACK */ @@ -4486,7 +4185,6 @@ static struct mem_cgroup *mem_cgroup_alloc(void) if (memcg_wb_domain_init(memcg, GFP_KERNEL)) goto out_free_stat; - spin_lock_init(&memcg->pcp_counter_lock); return memcg; out_free_stat: @@ -5134,10 +4832,12 @@ static void mem_cgroup_clear_mc(void) static int mem_cgroup_can_attach(struct cgroup_subsys_state *css, struct cgroup_taskset *tset) { - struct task_struct *p = cgroup_taskset_first(tset); - int ret = 0; struct mem_cgroup *memcg = mem_cgroup_from_css(css); + struct mem_cgroup *from; + struct task_struct *p; + struct mm_struct *mm; unsigned long move_flags; + int ret = 0; /* * We are now commited to this value whatever it is. Changes in this @@ -5145,36 +4845,37 @@ static int mem_cgroup_can_attach(struct cgroup_subsys_state *css, * So we need to save it, and keep it going. */ move_flags = READ_ONCE(memcg->move_charge_at_immigrate); - if (move_flags) { - struct mm_struct *mm; - struct mem_cgroup *from = mem_cgroup_from_task(p); + if (!move_flags) + return 0; - VM_BUG_ON(from == memcg); + p = cgroup_taskset_first(tset); + from = mem_cgroup_from_task(p); - mm = get_task_mm(p); - if (!mm) - return 0; - /* We move charges only when we move a owner of the mm */ - if (mm->owner == p) { - VM_BUG_ON(mc.from); - VM_BUG_ON(mc.to); - VM_BUG_ON(mc.precharge); - VM_BUG_ON(mc.moved_charge); - VM_BUG_ON(mc.moved_swap); - - spin_lock(&mc.lock); - mc.from = from; - mc.to = memcg; - mc.flags = move_flags; - spin_unlock(&mc.lock); - /* We set mc.moving_task later */ - - ret = mem_cgroup_precharge_mc(mm); - if (ret) - mem_cgroup_clear_mc(); - } - mmput(mm); + VM_BUG_ON(from == memcg); + + mm = get_task_mm(p); + if (!mm) + return 0; + /* We move charges only when we move a owner of the mm */ + if (mm->owner == p) { + VM_BUG_ON(mc.from); + VM_BUG_ON(mc.to); + VM_BUG_ON(mc.precharge); + VM_BUG_ON(mc.moved_charge); + VM_BUG_ON(mc.moved_swap); + + spin_lock(&mc.lock); + mc.from = from; + mc.to = memcg; + mc.flags = move_flags; + spin_unlock(&mc.lock); + /* We set mc.moving_task later */ + + ret = mem_cgroup_precharge_mc(mm); + if (ret) + mem_cgroup_clear_mc(); } + mmput(mm); return ret; } @@ -5528,19 +5229,6 @@ struct cgroup_subsys memory_cgrp_subsys = { }; /** - * mem_cgroup_events - count memory events against a cgroup - * @memcg: the memory cgroup - * @idx: the event index - * @nr: the number of events to account for - */ -void mem_cgroup_events(struct mem_cgroup *memcg, - enum mem_cgroup_events_index idx, - unsigned int nr) -{ - this_cpu_add(memcg->stat->events[idx], nr); -} - -/** * mem_cgroup_low - check if memory consumption is below the normal range * @root: the highest ancestor to consider * @memcg: the memory cgroup to check @@ -5612,8 +5300,20 @@ int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, * the page lock, which serializes swap cache removal, which * in turn serializes uncharging. */ + VM_BUG_ON_PAGE(!PageLocked(page), page); if (page->mem_cgroup) goto out; + + if (do_swap_account) { + swp_entry_t ent = { .val = page_private(page), }; + unsigned short id = lookup_swap_cgroup_id(ent); + + rcu_read_lock(); + memcg = mem_cgroup_from_id(id); + if (memcg && !css_tryget_online(&memcg->css)) + memcg = NULL; + rcu_read_unlock(); + } } if (PageTransHuge(page)) { @@ -5621,8 +5321,6 @@ int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, VM_BUG_ON_PAGE(!PageTransHuge(page), page); } - if (do_swap_account && PageSwapCache(page)) - memcg = try_get_mem_cgroup_from_page(page); if (!memcg) memcg = get_mem_cgroup_from_mm(mm); @@ -5972,7 +5670,13 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) if (!mem_cgroup_is_root(memcg)) page_counter_uncharge(&memcg->memory, 1); - /* Caller disabled preemption with mapping->tree_lock */ + /* + * Interrupts should be disabled here because the caller holds the + * mapping->tree_lock lock which is taken with interrupts-off. It is + * important here to have the interrupts disabled because it is the + * only synchronisation we have for udpating the per-CPU variables. + */ + VM_BUG_ON(!irqs_disabled()); mem_cgroup_charge_statistics(memcg, page, -1); memcg_check_events(memcg, page); } diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 1f4446a90..95882692e 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -130,27 +130,15 @@ static int hwpoison_filter_flags(struct page *p) * can only guarantee that the page either belongs to the memcg tasks, or is * a freed page. */ -#ifdef CONFIG_MEMCG_SWAP +#ifdef CONFIG_MEMCG u64 hwpoison_filter_memcg; EXPORT_SYMBOL_GPL(hwpoison_filter_memcg); static int hwpoison_filter_task(struct page *p) { - struct mem_cgroup *mem; - struct cgroup_subsys_state *css; - unsigned long ino; - if (!hwpoison_filter_memcg) return 0; - mem = try_get_mem_cgroup_from_page(p); - if (!mem) - return -EINVAL; - - css = mem_cgroup_css(mem); - ino = cgroup_ino(css->cgroup); - css_put(css); - - if (ino != hwpoison_filter_memcg) + if (page_cgroup_ino(p) != hwpoison_filter_memcg) return -EINVAL; return 0; @@ -934,6 +922,27 @@ int get_hwpoison_page(struct page *page) } EXPORT_SYMBOL_GPL(get_hwpoison_page); +/** + * put_hwpoison_page() - Put refcount for memory error handling: + * @page: raw error page (hit by memory error) + */ +void put_hwpoison_page(struct page *page) +{ + struct page *head = compound_head(page); + + if (PageHuge(head)) { + put_page(head); + return; + } + + if (PageTransHuge(head)) + if (page != head) + put_page(head); + + put_page(page); +} +EXPORT_SYMBOL_GPL(put_hwpoison_page); + /* * Do all that is necessary to remove user space mappings. Unmap * the pages and send SIGBUS to the processes if the data was dirty. @@ -1100,7 +1109,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags) nr_pages = 1 << compound_order(hpage); else /* normal page or thp */ nr_pages = 1; - atomic_long_add(nr_pages, &num_poisoned_pages); + num_poisoned_pages_add(nr_pages); /* * We need/can do nothing about count=0 pages. @@ -1128,7 +1137,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags) if (PageHWPoison(hpage)) { if ((hwpoison_filter(p) && TestClearPageHWPoison(p)) || (p != hpage && TestSetPageHWPoison(hpage))) { - atomic_long_sub(nr_pages, &num_poisoned_pages); + num_poisoned_pages_sub(nr_pages); unlock_page(hpage); return 0; } @@ -1152,10 +1161,8 @@ int memory_failure(unsigned long pfn, int trapno, int flags) else pr_err("MCE: %#lx: thp split failed\n", pfn); if (TestClearPageHWPoison(p)) - atomic_long_sub(nr_pages, &num_poisoned_pages); - put_page(p); - if (p != hpage) - put_page(hpage); + num_poisoned_pages_sub(nr_pages); + put_hwpoison_page(p); return -EBUSY; } VM_BUG_ON_PAGE(!page_count(p), p); @@ -1214,16 +1221,16 @@ int memory_failure(unsigned long pfn, int trapno, int flags) */ if (!PageHWPoison(p)) { printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn); - atomic_long_sub(nr_pages, &num_poisoned_pages); + num_poisoned_pages_sub(nr_pages); unlock_page(hpage); - put_page(hpage); + put_hwpoison_page(hpage); return 0; } if (hwpoison_filter(p)) { if (TestClearPageHWPoison(p)) - atomic_long_sub(nr_pages, &num_poisoned_pages); + num_poisoned_pages_sub(nr_pages); unlock_page(hpage); - put_page(hpage); + put_hwpoison_page(hpage); return 0; } @@ -1237,7 +1244,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags) if (PageHuge(p) && PageTail(p) && TestSetPageHWPoison(hpage)) { action_result(pfn, MF_MSG_POISONED_HUGE, MF_IGNORED); unlock_page(hpage); - put_page(hpage); + put_hwpoison_page(hpage); return 0; } /* @@ -1426,6 +1433,22 @@ int unpoison_memory(unsigned long pfn) return 0; } + if (page_count(page) > 1) { + pr_info("MCE: Someone grabs the hwpoison page %#lx\n", pfn); + return 0; + } + + if (page_mapped(page)) { + pr_info("MCE: Someone maps the hwpoison page %#lx\n", pfn); + return 0; + } + + if (page_mapping(page)) { + pr_info("MCE: the hwpoison page has non-NULL mapping %#lx\n", + pfn); + return 0; + } + /* * unpoison_memory() can encounter thp only when the thp is being * worked by memory_failure() and the page lock is not held yet. @@ -1450,7 +1473,7 @@ int unpoison_memory(unsigned long pfn) return 0; } if (TestClearPageHWPoison(p)) - atomic_long_dec(&num_poisoned_pages); + num_poisoned_pages_dec(); pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn); return 0; } @@ -1464,16 +1487,16 @@ int unpoison_memory(unsigned long pfn) */ if (TestClearPageHWPoison(page)) { pr_info("MCE: Software-unpoisoned page %#lx\n", pfn); - atomic_long_sub(nr_pages, &num_poisoned_pages); + num_poisoned_pages_sub(nr_pages); freeit = 1; if (PageHuge(page)) clear_page_hwpoison_huge_page(page); } unlock_page(page); - put_page(page); + put_hwpoison_page(page); if (freeit && !(pfn == my_zero_pfn(0) && page_count(p) == 1)) - put_page(page); + put_hwpoison_page(page); return 0; } @@ -1486,7 +1509,7 @@ static struct page *new_page(struct page *p, unsigned long private, int **x) return alloc_huge_page_node(page_hstate(compound_head(p)), nid); else - return alloc_pages_exact_node(nid, GFP_HIGHUSER_MOVABLE, 0); + return __alloc_pages_node(nid, GFP_HIGHUSER_MOVABLE, 0); } /* @@ -1533,7 +1556,7 @@ static int get_any_page(struct page *page, unsigned long pfn, int flags) /* * Try to free it. */ - put_page(page); + put_hwpoison_page(page); shake_page(page, 1); /* @@ -1542,7 +1565,7 @@ static int get_any_page(struct page *page, unsigned long pfn, int flags) ret = __get_any_page(page, pfn, 0); if (!PageLRU(page)) { /* Drop page reference which is from __get_any_page() */ - put_page(page); + put_hwpoison_page(page); pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n", pfn, page->flags); return -EIO; @@ -1565,7 +1588,7 @@ static int soft_offline_huge_page(struct page *page, int flags) lock_page(hpage); if (PageHWPoison(hpage)) { unlock_page(hpage); - put_page(hpage); + put_hwpoison_page(hpage); pr_info("soft offline: %#lx hugepage already poisoned\n", pfn); return -EBUSY; } @@ -1576,7 +1599,7 @@ static int soft_offline_huge_page(struct page *page, int flags) * get_any_page() and isolate_huge_page() takes a refcount each, * so need to drop one here. */ - put_page(hpage); + put_hwpoison_page(hpage); if (!ret) { pr_info("soft offline: %#lx hugepage failed to isolate\n", pfn); return -EBUSY; @@ -1600,11 +1623,10 @@ static int soft_offline_huge_page(struct page *page, int flags) if (PageHuge(page)) { set_page_hwpoison_huge_page(hpage); dequeue_hwpoisoned_huge_page(hpage); - atomic_long_add(1 << compound_order(hpage), - &num_poisoned_pages); + num_poisoned_pages_add(1 << compound_order(hpage)); } else { SetPageHWPoison(page); - atomic_long_inc(&num_poisoned_pages); + num_poisoned_pages_inc(); } } return ret; @@ -1625,7 +1647,7 @@ static int __soft_offline_page(struct page *page, int flags) wait_on_page_writeback(page); if (PageHWPoison(page)) { unlock_page(page); - put_page(page); + put_hwpoison_page(page); pr_info("soft offline: %#lx page already poisoned\n", pfn); return -EBUSY; } @@ -1640,10 +1662,10 @@ static int __soft_offline_page(struct page *page, int flags) * would need to fix isolation locking first. */ if (ret == 1) { - put_page(page); + put_hwpoison_page(page); pr_info("soft_offline: %#lx: invalidated\n", pfn); SetPageHWPoison(page); - atomic_long_inc(&num_poisoned_pages); + num_poisoned_pages_inc(); return 0; } @@ -1657,14 +1679,12 @@ static int __soft_offline_page(struct page *page, int flags) * Drop page reference which is came from get_any_page() * successful isolate_lru_page() already took another one. */ - put_page(page); + put_hwpoison_page(page); if (!ret) { LIST_HEAD(pagelist); inc_zone_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page)); list_add(&page->lru, &pagelist); - if (!TestSetPageHWPoison(page)) - atomic_long_inc(&num_poisoned_pages); ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL, MIGRATE_SYNC, MR_MEMORY_FAILURE); if (ret) { @@ -1679,8 +1699,6 @@ static int __soft_offline_page(struct page *page, int flags) pfn, ret, page->flags); if (ret > 0) ret = -EIO; - if (TestClearPageHWPoison(page)) - atomic_long_dec(&num_poisoned_pages); } } else { pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n", @@ -1719,12 +1737,16 @@ int soft_offline_page(struct page *page, int flags) if (PageHWPoison(page)) { pr_info("soft offline: %#lx page already poisoned\n", pfn); + if (flags & MF_COUNT_INCREASED) + put_hwpoison_page(page); return -EBUSY; } if (!PageHuge(page) && PageTransHuge(hpage)) { if (PageAnon(hpage) && unlikely(split_huge_page(hpage))) { pr_info("soft offline: %#lx: failed to split THP\n", pfn); + if (flags & MF_COUNT_INCREASED) + put_hwpoison_page(page); return -EBUSY; } } @@ -1742,11 +1764,10 @@ int soft_offline_page(struct page *page, int flags) if (PageHuge(page)) { set_page_hwpoison_huge_page(hpage); if (!dequeue_hwpoisoned_huge_page(hpage)) - atomic_long_add(1 << compound_order(hpage), - &num_poisoned_pages); + num_poisoned_pages_add(1 << compound_order(hpage)); } else { if (!TestSetPageHWPoison(page)) - atomic_long_inc(&num_poisoned_pages); + num_poisoned_pages_inc(); } } return ret; diff --git a/mm/memory.c b/mm/memory.c index 106ae6439..96d64ceb6 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -61,6 +61,7 @@ #include <linux/string.h> #include <linux/dma-debug.h> #include <linux/debugfs.h> +#include <linux/userfaultfd_k.h> #include <asm/io.h> #include <asm/pgalloc.h> @@ -203,22 +204,22 @@ static void check_sync_rss_stat(struct task_struct *task) #ifdef HAVE_GENERIC_MMU_GATHER -static int tlb_next_batch(struct mmu_gather *tlb) +static bool tlb_next_batch(struct mmu_gather *tlb) { struct mmu_gather_batch *batch; batch = tlb->active; if (batch->next) { tlb->active = batch->next; - return 1; + return true; } if (tlb->batch_count == MAX_GATHER_BATCH_COUNT) - return 0; + return false; batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); if (!batch) - return 0; + return false; tlb->batch_count++; batch->next = NULL; @@ -228,7 +229,7 @@ static int tlb_next_batch(struct mmu_gather *tlb) tlb->active->next = batch; tlb->active = batch; - return 1; + return true; } /* tlb_gather_mmu @@ -2720,6 +2721,12 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, page_table = pte_offset_map_lock(mm, pmd, address, &ptl); if (!pte_none(*page_table)) goto unlock; + /* Deliver the page fault to userland, check inside PT lock */ + if (userfaultfd_missing(vma)) { + pte_unmap_unlock(page_table, ptl); + return handle_userfault(vma, address, flags, + VM_UFFD_MISSING); + } goto setpte; } @@ -2748,6 +2755,15 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, if (!pte_none(*page_table)) goto release; + /* Deliver the page fault to userland, check inside PT lock */ + if (userfaultfd_missing(vma)) { + pte_unmap_unlock(page_table, ptl); + mem_cgroup_cancel_charge(page, memcg); + page_cache_release(page); + return handle_userfault(vma, address, flags, + VM_UFFD_MISSING); + } + inc_mm_counter_fast(mm, MM_ANONPAGES); page_add_new_anon_rmap(page, vma, address); mem_cgroup_commit_charge(page, memcg, false); @@ -3034,9 +3050,9 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma, } else { /* * The fault handler has no page to lock, so it holds - * i_mmap_lock for read to protect against truncate. + * i_mmap_lock for write to protect against truncate. */ - i_mmap_unlock_read(vma->vm_file->f_mapping); + i_mmap_unlock_write(vma->vm_file->f_mapping); } goto uncharge_out; } @@ -3050,9 +3066,9 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma, } else { /* * The fault handler has no page to lock, so it holds - * i_mmap_lock for read to protect against truncate. + * i_mmap_lock for write to protect against truncate. */ - i_mmap_unlock_read(vma->vm_file->f_mapping); + i_mmap_unlock_write(vma->vm_file->f_mapping); } return ret; uncharge_out: @@ -3251,6 +3267,27 @@ out: return 0; } +static int create_huge_pmd(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long address, pmd_t *pmd, unsigned int flags) +{ + if (vma_is_anonymous(vma)) + return do_huge_pmd_anonymous_page(mm, vma, address, pmd, flags); + if (vma->vm_ops->pmd_fault) + return vma->vm_ops->pmd_fault(vma, address, pmd, flags); + return VM_FAULT_FALLBACK; +} + +static int wp_huge_pmd(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long address, pmd_t *pmd, pmd_t orig_pmd, + unsigned int flags) +{ + if (vma_is_anonymous(vma)) + return do_huge_pmd_wp_page(mm, vma, address, pmd, orig_pmd); + if (vma->vm_ops->pmd_fault) + return vma->vm_ops->pmd_fault(vma, address, pmd, flags); + return VM_FAULT_FALLBACK; +} + /* * These routines also need to handle stuff like marking pages dirty * and/or accessed for architectures that don't do it in hardware (most @@ -3286,12 +3323,12 @@ static int handle_pte_fault(struct mm_struct *mm, barrier(); if (!pte_present(entry)) { if (pte_none(entry)) { - if (vma->vm_ops) + if (vma_is_anonymous(vma)) + return do_anonymous_page(mm, vma, address, + pte, pmd, flags); + else return do_fault(mm, vma, address, pte, pmd, flags, entry); - - return do_anonymous_page(mm, vma, address, pte, pmd, - flags); } return do_swap_page(mm, vma, address, pte, pmd, flags, entry); @@ -3353,10 +3390,7 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, if (!pmd) return VM_FAULT_OOM; if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) { - int ret = VM_FAULT_FALLBACK; - if (!vma->vm_ops) - ret = do_huge_pmd_anonymous_page(mm, vma, address, - pmd, flags); + int ret = create_huge_pmd(mm, vma, address, pmd, flags); if (!(ret & VM_FAULT_FALLBACK)) return ret; } else { @@ -3380,8 +3414,8 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, orig_pmd, pmd); if (dirty && !pmd_write(orig_pmd)) { - ret = do_huge_pmd_wp_page(mm, vma, address, pmd, - orig_pmd); + ret = wp_huge_pmd(mm, vma, address, pmd, + orig_pmd, flags); if (!(ret & VM_FAULT_FALLBACK)) return ret; } else { diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 8fd97dac5..aa992e2df 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -778,7 +778,10 @@ int __remove_pages(struct zone *zone, unsigned long phys_start_pfn, start = phys_start_pfn << PAGE_SHIFT; size = nr_pages * PAGE_SIZE; - ret = release_mem_region_adjustable(&iomem_resource, start, size); + + /* in the ZONE_DEVICE case device driver owns the memory region */ + if (!is_dev_zone(zone)) + ret = release_mem_region_adjustable(&iomem_resource, start, size); if (ret) { resource_size_t endres = start + size - 1; @@ -1215,8 +1218,13 @@ static int should_add_memory_movable(int nid, u64 start, u64 size) return 0; } -int zone_for_memory(int nid, u64 start, u64 size, int zone_default) +int zone_for_memory(int nid, u64 start, u64 size, int zone_default, + bool for_device) { +#ifdef CONFIG_ZONE_DEVICE + if (for_device) + return ZONE_DEVICE; +#endif if (should_add_memory_movable(nid, start, size)) return ZONE_MOVABLE; @@ -1265,7 +1273,7 @@ int __ref add_memory(int nid, u64 start, u64 size) } /* call arch's memory hotadd */ - ret = arch_add_memory(nid, start, size); + ret = arch_add_memory(nid, start, size, false); if (ret < 0) goto error; diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 99d4c1d0b..87a177917 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -608,9 +608,6 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end, qp->prev = vma; - if (vma->vm_flags & VM_PFNMAP) - return 1; - if (flags & MPOL_MF_LAZY) { /* Similar to task_numa_work, skip inaccessible VMAs */ if (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) @@ -722,8 +719,8 @@ static int mbind_range(struct mm_struct *mm, unsigned long start, pgoff = vma->vm_pgoff + ((vmstart - vma->vm_start) >> PAGE_SHIFT); prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, - vma->anon_vma, vma->vm_file, pgoff, - new_pol); + vma->anon_vma, vma->vm_file, pgoff, + new_pol, vma->vm_userfaultfd_ctx); if (prev) { vma = prev; next = vma->vm_next; @@ -945,7 +942,7 @@ static struct page *new_node_page(struct page *page, unsigned long node, int **x return alloc_huge_page_node(page_hstate(compound_head(page)), node); else - return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE | + return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 0); } @@ -2001,7 +1998,7 @@ retry_cpuset: nmask = policy_nodemask(gfp, pol); if (!nmask || node_isset(hpage_node, *nmask)) { mpol_cond_put(pol); - page = alloc_pages_exact_node(hpage_node, + page = __alloc_pages_node(hpage_node, gfp | __GFP_THISNODE, order); goto out; } diff --git a/mm/mempool.c b/mm/mempool.c index 2cc08de8b..4c533bc51 100644 --- a/mm/mempool.c +++ b/mm/mempool.c @@ -150,6 +150,9 @@ static void *remove_element(mempool_t *pool) */ void mempool_destroy(mempool_t *pool) { + if (unlikely(!pool)) + return; + while (pool->curr_nr) { void *element = remove_element(pool); pool->free(element, pool->pool_data); diff --git a/mm/memtest.c b/mm/memtest.c index 0a1cc133f..8eaa4c3a5 100644 --- a/mm/memtest.c +++ b/mm/memtest.c @@ -1,11 +1,6 @@ #include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/string.h> #include <linux/types.h> -#include <linux/mm.h> -#include <linux/smp.h> #include <linux/init.h> -#include <linux/pfn.h> #include <linux/memblock.h> static u64 patterns[] __initdata = { @@ -31,10 +26,8 @@ static u64 patterns[] __initdata = { static void __init reserve_bad_mem(u64 pattern, phys_addr_t start_bad, phys_addr_t end_bad) { - printk(KERN_INFO " %016llx bad mem addr %010llx - %010llx reserved\n", - (unsigned long long) pattern, - (unsigned long long) start_bad, - (unsigned long long) end_bad); + pr_info(" %016llx bad mem addr %pa - %pa reserved\n", + cpu_to_be64(pattern), &start_bad, &end_bad); memblock_reserve(start_bad, end_bad - start_bad); } @@ -79,26 +72,26 @@ static void __init do_one_pass(u64 pattern, phys_addr_t start, phys_addr_t end) this_start = clamp(this_start, start, end); this_end = clamp(this_end, start, end); if (this_start < this_end) { - printk(KERN_INFO " %010llx - %010llx pattern %016llx\n", - (unsigned long long)this_start, - (unsigned long long)this_end, - (unsigned long long)cpu_to_be64(pattern)); + pr_info(" %pa - %pa pattern %016llx\n", + &this_start, &this_end, cpu_to_be64(pattern)); memtest(pattern, this_start, this_end - this_start); } } } /* default is disabled */ -static int memtest_pattern __initdata; +static unsigned int memtest_pattern __initdata; static int __init parse_memtest(char *arg) { + int ret = 0; + if (arg) - memtest_pattern = simple_strtoul(arg, NULL, 0); + ret = kstrtouint(arg, 0, &memtest_pattern); else memtest_pattern = ARRAY_SIZE(patterns); - return 0; + return ret; } early_param("memtest", parse_memtest); @@ -111,7 +104,7 @@ void __init early_memtest(phys_addr_t start, phys_addr_t end) if (!memtest_pattern) return; - printk(KERN_INFO "early_memtest: # of tests: %d\n", memtest_pattern); + pr_info("early_memtest: # of tests: %u\n", memtest_pattern); for (i = memtest_pattern-1; i < UINT_MAX; --i) { idx = i % ARRAY_SIZE(patterns); do_one_pass(patterns[idx], start, end); diff --git a/mm/migrate.c b/mm/migrate.c index fcb6204de..842ecd7aa 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -37,6 +37,7 @@ #include <linux/gfp.h> #include <linux/balloon_compaction.h> #include <linux/mmu_notifier.h> +#include <linux/page_idle.h> #include <asm/tlbflush.h> @@ -524,6 +525,11 @@ void migrate_page_copy(struct page *newpage, struct page *page) __set_page_dirty_nobuffers(newpage); } + if (page_is_young(page)) + set_page_young(newpage); + if (page_is_idle(page)) + set_page_idle(newpage); + /* * Copy NUMA information to the new page, to prevent over-eager * future migrations of this same page. @@ -890,8 +896,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage, /* Establish migration ptes or remove ptes */ if (page_mapped(page)) { try_to_unmap(page, - TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS| - TTU_IGNORE_HWPOISON); + TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); page_was_mapped = 1; } @@ -962,9 +967,11 @@ out: dec_zone_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page)); /* Soft-offlined page shouldn't go through lru cache list */ - if (reason == MR_MEMORY_FAILURE) + if (reason == MR_MEMORY_FAILURE) { put_page(page); - else + if (!test_set_page_hwpoison(page)) + num_poisoned_pages_inc(); + } else putback_lru_page(page); } @@ -1204,7 +1211,7 @@ static struct page *new_page_node(struct page *p, unsigned long private, return alloc_huge_page_node(page_hstate(compound_head(p)), pm->node); else - return alloc_pages_exact_node(pm->node, + return __alloc_pages_node(pm->node, GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 0); } @@ -1236,7 +1243,9 @@ static int do_move_page_to_node_array(struct mm_struct *mm, if (!vma || pp->addr < vma->vm_start || !vma_migratable(vma)) goto set_status; - page = follow_page(vma, pp->addr, FOLL_GET|FOLL_SPLIT); + /* FOLL_DUMP to ignore special (like zero) pages */ + page = follow_page(vma, pp->addr, + FOLL_GET | FOLL_SPLIT | FOLL_DUMP); err = PTR_ERR(page); if (IS_ERR(page)) @@ -1246,10 +1255,6 @@ static int do_move_page_to_node_array(struct mm_struct *mm, if (!page) goto set_status; - /* Use PageReserved to check for zero page */ - if (PageReserved(page)) - goto put_and_set; - pp->page = page; err = page_to_nid(page); @@ -1406,18 +1411,14 @@ static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages, if (!vma || addr < vma->vm_start) goto set_status; - page = follow_page(vma, addr, 0); + /* FOLL_DUMP to ignore special (like zero) pages */ + page = follow_page(vma, addr, FOLL_DUMP); err = PTR_ERR(page); if (IS_ERR(page)) goto set_status; - err = -ENOENT; - /* Use PageReserved to check for zero page */ - if (!page || PageReserved(page)) - goto set_status; - - err = page_to_nid(page); + err = page ? page_to_nid(page) : -ENOENT; set_status: *status = err; @@ -1570,7 +1571,7 @@ static struct page *alloc_misplaced_dst_page(struct page *page, int nid = (int) data; struct page *newpage; - newpage = alloc_pages_exact_node(nid, + newpage = __alloc_pages_node(nid, (GFP_HIGHUSER_MOVABLE | __GFP_THISNODE | __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN) & diff --git a/mm/mlock.c b/mm/mlock.c index 6fd2cf15e..259366800 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -510,7 +510,8 @@ static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); *prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma, - vma->vm_file, pgoff, vma_policy(vma)); + vma->vm_file, pgoff, vma_policy(vma), + vma->vm_userfaultfd_ctx); if (*prev) { vma = *prev; goto success; @@ -41,6 +41,7 @@ #include <linux/notifier.h> #include <linux/memory.h> #include <linux/printk.h> +#include <linux/userfaultfd_k.h> #include <linux/ksm.h> #include <asm/uaccess.h> @@ -935,7 +936,8 @@ again: remove_next = 1 + (end > next->vm_end); * per-vma resources, so we don't attempt to merge those. */ static inline int is_mergeable_vma(struct vm_area_struct *vma, - struct file *file, unsigned long vm_flags) + struct file *file, unsigned long vm_flags, + struct vm_userfaultfd_ctx vm_userfaultfd_ctx) { /* * VM_SOFTDIRTY should not prevent from VMA merging, if we @@ -951,6 +953,8 @@ static inline int is_mergeable_vma(struct vm_area_struct *vma, return 0; if (vma->vm_ops && vma->vm_ops->close) return 0; + if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx)) + return 0; return 1; } @@ -981,9 +985,11 @@ static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1, */ static int can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags, - struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff) + struct anon_vma *anon_vma, struct file *file, + pgoff_t vm_pgoff, + struct vm_userfaultfd_ctx vm_userfaultfd_ctx) { - if (is_mergeable_vma(vma, file, vm_flags) && + if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) && is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { if (vma->vm_pgoff == vm_pgoff) return 1; @@ -1000,9 +1006,11 @@ can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags, */ static int can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, - struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff) + struct anon_vma *anon_vma, struct file *file, + pgoff_t vm_pgoff, + struct vm_userfaultfd_ctx vm_userfaultfd_ctx) { - if (is_mergeable_vma(vma, file, vm_flags) && + if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) && is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { pgoff_t vm_pglen; vm_pglen = vma_pages(vma); @@ -1045,7 +1053,8 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, struct vm_area_struct *prev, unsigned long addr, unsigned long end, unsigned long vm_flags, struct anon_vma *anon_vma, struct file *file, - pgoff_t pgoff, struct mempolicy *policy) + pgoff_t pgoff, struct mempolicy *policy, + struct vm_userfaultfd_ctx vm_userfaultfd_ctx) { pgoff_t pglen = (end - addr) >> PAGE_SHIFT; struct vm_area_struct *area, *next; @@ -1072,14 +1081,17 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, if (prev && prev->vm_end == addr && mpol_equal(vma_policy(prev), policy) && can_vma_merge_after(prev, vm_flags, - anon_vma, file, pgoff)) { + anon_vma, file, pgoff, + vm_userfaultfd_ctx)) { /* * OK, it can. Can we now merge in the successor as well? */ if (next && end == next->vm_start && mpol_equal(policy, vma_policy(next)) && can_vma_merge_before(next, vm_flags, - anon_vma, file, pgoff+pglen) && + anon_vma, file, + pgoff+pglen, + vm_userfaultfd_ctx) && is_mergeable_anon_vma(prev->anon_vma, next->anon_vma, NULL)) { /* cases 1, 6 */ @@ -1100,7 +1112,8 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, if (next && end == next->vm_start && mpol_equal(policy, vma_policy(next)) && can_vma_merge_before(next, vm_flags, - anon_vma, file, pgoff+pglen)) { + anon_vma, file, pgoff+pglen, + vm_userfaultfd_ctx)) { if (prev && addr < prev->vm_end) /* case 4 */ err = vma_adjust(prev, prev->vm_start, addr, prev->vm_pgoff, NULL); @@ -1263,14 +1276,12 @@ static inline int mlock_future_check(struct mm_struct *mm, /* * The caller must hold down_write(¤t->mm->mmap_sem). */ - -unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, +unsigned long do_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, - unsigned long flags, unsigned long pgoff, - unsigned long *populate) + unsigned long flags, vm_flags_t vm_flags, + unsigned long pgoff, unsigned long *populate) { struct mm_struct *mm = current->mm; - vm_flags_t vm_flags; *populate = 0; @@ -1284,7 +1295,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, * mounted, in which case we dont add PROT_EXEC.) */ if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) - if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC))) + if (!(file && path_noexec(&file->f_path))) prot |= PROT_EXEC; if (!(flags & MAP_FIXED)) @@ -1314,7 +1325,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, * to. we assume access permissions have been handled by the open * of the memory object, so we don't do any here. */ - vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) | + vm_flags |= calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) | mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; /* If uksm is enabled, we add VM_MERGABLE to new VMAs. */ @@ -1356,7 +1367,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, case MAP_PRIVATE: if (!(file->f_mode & FMODE_READ)) return -EACCES; - if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) { + if (path_noexec(&file->f_path)) { if (vm_flags & VM_EXEC) return -EPERM; vm_flags &= ~VM_MAYEXEC; @@ -1498,13 +1509,14 @@ SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg) int vma_wants_writenotify(struct vm_area_struct *vma) { vm_flags_t vm_flags = vma->vm_flags; + const struct vm_operations_struct *vm_ops = vma->vm_ops; /* If it was private or non-writable, the write bit is already clear */ if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED))) return 0; /* The backer wishes to know when pages are first written to? */ - if (vma->vm_ops && vma->vm_ops->page_mkwrite) + if (vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite)) return 1; /* The open routine did something to the protections that pgprot_modify @@ -1589,8 +1601,8 @@ unsigned long mmap_region(struct file *file, unsigned long addr, /* * Can we just expand an old mapping? */ - vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, - NULL); + vma = vma_merge(mm, prev, addr, addr + len, vm_flags, + NULL, file, pgoff, NULL, NULL_VM_UFFD_CTX); if (vma) goto out; @@ -2463,7 +2475,7 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, int new_below) { struct vm_area_struct *new; - int err = -ENOMEM; + int err; if (is_vm_hugetlb_page(vma) && (addr & ~(huge_page_mask(hstate_vma(vma))))) @@ -2471,7 +2483,7 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); if (!new) - goto out_err; + return -ENOMEM; /* most fields are the same, copy all, and then fixup */ *new = *vma; @@ -2521,7 +2533,6 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, mpol_put(vma_policy(new)); out_free_vma: kmem_cache_free(vm_area_cachep, new); - out_err: return err; } @@ -2780,7 +2791,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len) /* Can we just expand an old private anonymous mapping? */ vma = vma_merge(mm, prev, addr, addr + len, flags, - NULL, NULL, pgoff, NULL); + NULL, NULL, pgoff, NULL, NULL_VM_UFFD_CTX); if (vma) goto out; @@ -2894,6 +2905,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) struct vm_area_struct *prev; struct rb_node **rb_link, *rb_parent; + if (find_vma_links(mm, vma->vm_start, vma->vm_end, + &prev, &rb_link, &rb_parent)) + return -ENOMEM; + if ((vma->vm_flags & VM_ACCOUNT) && + security_vm_enough_memory_mm(mm, vma_pages(vma))) + return -ENOMEM; + /* * The vm_pgoff of a purely anonymous vma should be irrelevant * until its first write fault, when page's anon_vma and index @@ -2906,16 +2924,10 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) * using the existing file pgoff checks and manipulations. * Similarly in do_mmap_pgoff and in do_brk. */ - if (!vma->vm_file) { + if (vma_is_anonymous(vma)) { BUG_ON(vma->anon_vma); vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; } - if (find_vma_links(mm, vma->vm_start, vma->vm_end, - &prev, &rb_link, &rb_parent)) - return -ENOMEM; - if ((vma->vm_flags & VM_ACCOUNT) && - security_vm_enough_memory_mm(mm, vma_pages(vma))) - return -ENOMEM; vma_link(mm, vma, prev, rb_link, rb_parent); return 0; @@ -2940,7 +2952,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, * If anonymous vma has not yet been faulted, update new pgoff * to match new location, to increase its chance of merging. */ - if (unlikely(!vma->vm_file && !vma->anon_vma)) { + if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) { pgoff = addr >> PAGE_SHIFT; faulted_in_anon_vma = false; } @@ -2948,7 +2960,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) return NULL; /* should never get here */ new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags, - vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma)); + vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), + vma->vm_userfaultfd_ctx); if (new_vma) { /* * Source vma may have been merged into new_vma @@ -2973,31 +2986,32 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff); } else { new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); - if (new_vma) { - *new_vma = *vma; - new_vma->vm_start = addr; - new_vma->vm_end = addr + len; - new_vma->vm_pgoff = pgoff; - if (vma_dup_policy(vma, new_vma)) - goto out_free_vma; - INIT_LIST_HEAD(&new_vma->anon_vma_chain); - if (anon_vma_clone(new_vma, vma)) - goto out_free_mempol; - if (new_vma->vm_file) - vma_get_file(new_vma); - if (new_vma->vm_ops && new_vma->vm_ops->open) - new_vma->vm_ops->open(new_vma); - vma_link(mm, new_vma, prev, rb_link, rb_parent); - *need_rmap_locks = false; - uksm_vma_add_new(new_vma); - } + if (!new_vma) + goto out; + *new_vma = *vma; + new_vma->vm_start = addr; + new_vma->vm_end = addr + len; + new_vma->vm_pgoff = pgoff; + if (vma_dup_policy(vma, new_vma)) + goto out_free_vma; + INIT_LIST_HEAD(&new_vma->anon_vma_chain); + if (anon_vma_clone(new_vma, vma)) + goto out_free_mempol; + if (new_vma->vm_file) + vma_get_file(new_vma); + if (new_vma->vm_ops && new_vma->vm_ops->open) + new_vma->vm_ops->open(new_vma); + vma_link(mm, new_vma, prev, rb_link, rb_parent); + *need_rmap_locks = false; + uksm_vma_add_new(new_vma); } return new_vma; - out_free_mempol: +out_free_mempol: mpol_put(vma_policy(new_vma)); - out_free_vma: +out_free_vma: kmem_cache_free(vm_area_cachep, new_vma); +out: return NULL; } @@ -3049,21 +3063,13 @@ static int special_mapping_fault(struct vm_area_struct *vma, pgoff_t pgoff; struct page **pages; - /* - * special mappings have no vm_file, and in that case, the mm - * uses vm_pgoff internally. So we have to subtract it from here. - * We are allowed to do this because we are the mm; do not copy - * this code into drivers! - */ - pgoff = vmf->pgoff - vma->vm_pgoff; - if (vma->vm_ops == &legacy_special_mapping_vmops) pages = vma->vm_private_data; else pages = ((struct vm_special_mapping *)vma->vm_private_data)-> pages; - for (; pgoff && *pages; ++pages) + for (pgoff = vmf->pgoff; pgoff && *pages; ++pages) pgoff--; if (*pages) { diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c index 3b9b3d074..5fbdd367b 100644 --- a/mm/mmu_notifier.c +++ b/mm/mmu_notifier.c @@ -123,6 +123,23 @@ int __mmu_notifier_clear_flush_young(struct mm_struct *mm, return young; } +int __mmu_notifier_clear_young(struct mm_struct *mm, + unsigned long start, + unsigned long end) +{ + struct mmu_notifier *mn; + int young = 0, id; + + id = srcu_read_lock(&srcu); + hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { + if (mn->ops->clear_young) + young |= mn->ops->clear_young(mn, mm, start, end); + } + srcu_read_unlock(&srcu, id); + + return young; +} + int __mmu_notifier_test_young(struct mm_struct *mm, unsigned long address) { diff --git a/mm/mprotect.c b/mm/mprotect.c index e7d6f1171..ef5be8eaa 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -292,7 +292,8 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, */ pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); *pprev = vma_merge(mm, *pprev, start, end, newflags, - vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma)); + vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), + vma->vm_userfaultfd_ctx); if (*pprev) { vma = *pprev; goto success; diff --git a/mm/mremap.c b/mm/mremap.c index a7c93eceb..5a71cce8c 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -276,6 +276,12 @@ static unsigned long move_vma(struct vm_area_struct *vma, moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len, need_rmap_locks); if (moved_len < old_len) { + err = -ENOMEM; + } else if (vma->vm_ops && vma->vm_ops->mremap) { + err = vma->vm_ops->mremap(new_vma); + } + + if (unlikely(err)) { /* * On error, move entries back from new area to old, * which will succeed since page tables still there, @@ -286,16 +292,8 @@ static unsigned long move_vma(struct vm_area_struct *vma, vma = new_vma; old_len = new_len; old_addr = new_addr; - new_addr = -ENOMEM; + new_addr = err; } else { - if (vma->vm_file && vma->vm_file->f_op->mremap) { - err = vma->vm_file->f_op->mremap(vma->vm_file, new_vma); - if (err < 0) { - move_page_tables(new_vma, new_addr, vma, - old_addr, moved_len, true); - return err; - } - } arch_remap(mm, old_addr, old_addr + old_len, new_addr, new_addr + new_len); } @@ -348,6 +346,7 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr, { struct mm_struct *mm = current->mm; struct vm_area_struct *vma = find_vma(mm, addr); + unsigned long pgoff; if (!vma || vma->vm_start > addr) return ERR_PTR(-EFAULT); @@ -359,17 +358,17 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr, if (old_len > vma->vm_end - addr) return ERR_PTR(-EFAULT); + if (new_len == old_len) + return vma; + /* Need to be careful about a growing mapping */ - if (new_len > old_len) { - unsigned long pgoff; - - if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) - return ERR_PTR(-EFAULT); - pgoff = (addr - vma->vm_start) >> PAGE_SHIFT; - pgoff += vma->vm_pgoff; - if (pgoff + (new_len >> PAGE_SHIFT) < pgoff) - return ERR_PTR(-EINVAL); - } + pgoff = (addr - vma->vm_start) >> PAGE_SHIFT; + pgoff += vma->vm_pgoff; + if (pgoff + (new_len >> PAGE_SHIFT) < pgoff) + return ERR_PTR(-EINVAL); + + if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) + return ERR_PTR(-EFAULT); if (vma->vm_flags & VM_LOCKED) { unsigned long locked, lock_limit; @@ -408,13 +407,8 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len, if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len) goto out; - /* Check if the location we're moving into overlaps the - * old location at all, and fail if it does. - */ - if ((new_addr <= addr) && (new_addr+new_len) > addr) - goto out; - - if ((addr <= new_addr) && (addr+old_len) > new_addr) + /* Ensure the old/new locations do not overlap */ + if (addr + old_len > new_addr && new_addr + new_len > addr) goto out; ret = do_munmap(mm, new_addr, new_len); @@ -580,8 +574,10 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked); } out: - if (ret & ~PAGE_MASK) + if (ret & ~PAGE_MASK) { vm_unacct_memory(charged); + locked = 0; + } up_write(¤t->mm->mmap_sem); if (locked && new_len > old_len) mm_populate(new_addr + old_len, new_len - old_len); diff --git a/mm/nommu.c b/mm/nommu.c index f937b7e52..fffc566b3 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -324,12 +324,12 @@ long vwrite(char *buf, char *addr, unsigned long count) } /* - * vmalloc - allocate virtually continguos memory + * vmalloc - allocate virtually contiguous memory * * @size: allocation size * * Allocate enough pages to cover @size from the page level - * allocator and map them into continguos kernel virtual space. + * allocator and map them into contiguous kernel virtual space. * * For tight control over page level allocator and protection flags * use __vmalloc() instead. @@ -341,12 +341,12 @@ void *vmalloc(unsigned long size) EXPORT_SYMBOL(vmalloc); /* - * vzalloc - allocate virtually continguos memory with zero fill + * vzalloc - allocate virtually contiguous memory with zero fill * * @size: allocation size * * Allocate enough pages to cover @size from the page level - * allocator and map them into continguos kernel virtual space. + * allocator and map them into contiguous kernel virtual space. * The memory allocated is set to zero. * * For tight control over page level allocator and protection flags @@ -420,7 +420,7 @@ void *vmalloc_exec(unsigned long size) * @size: allocation size * * Allocate enough 32bit PA addressable pages to cover @size from the - * page level allocator and map them into continguos kernel virtual space. + * page level allocator and map them into contiguous kernel virtual space. */ void *vmalloc_32(unsigned long size) { @@ -1035,7 +1035,7 @@ static int validate_mmap_request(struct file *file, /* handle executable mappings and implied executable * mappings */ - if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) { + if (path_noexec(&file->f_path)) { if (prot & PROT_EXEC) return -EPERM; } else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) { @@ -1233,18 +1233,19 @@ enomem: /* * handle mapping creation for uClinux */ -unsigned long do_mmap_pgoff(struct file *file, - unsigned long addr, - unsigned long len, - unsigned long prot, - unsigned long flags, - unsigned long pgoff, - unsigned long *populate) +unsigned long do_mmap(struct file *file, + unsigned long addr, + unsigned long len, + unsigned long prot, + unsigned long flags, + vm_flags_t vm_flags, + unsigned long pgoff, + unsigned long *populate) { struct vm_area_struct *vma; struct vm_region *region; struct rb_node *rb; - unsigned long capabilities, vm_flags, result; + unsigned long capabilities, result; int ret; *populate = 0; @@ -1262,7 +1263,7 @@ unsigned long do_mmap_pgoff(struct file *file, /* we've determined that we can make the mapping, now translate what we * now know into VMA flags */ - vm_flags = determine_vm_flags(file, prot, flags, capabilities); + vm_flags |= determine_vm_flags(file, prot, flags, capabilities); /* we're going to need to record the mapping */ region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL); diff --git a/mm/oom_kill.c b/mm/oom_kill.c index dff991e06..1ecc0bcae 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -196,27 +196,26 @@ unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg, * Determine the type of allocation constraint. */ #ifdef CONFIG_NUMA -static enum oom_constraint constrained_alloc(struct zonelist *zonelist, - gfp_t gfp_mask, nodemask_t *nodemask, - unsigned long *totalpages) +static enum oom_constraint constrained_alloc(struct oom_control *oc, + unsigned long *totalpages) { struct zone *zone; struct zoneref *z; - enum zone_type high_zoneidx = gfp_zone(gfp_mask); + enum zone_type high_zoneidx = gfp_zone(oc->gfp_mask); bool cpuset_limited = false; int nid; /* Default to all available memory */ *totalpages = totalram_pages + total_swap_pages; - if (!zonelist) + if (!oc->zonelist) return CONSTRAINT_NONE; /* * Reach here only when __GFP_NOFAIL is used. So, we should avoid * to kill current.We have to random task kill in this case. * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now. */ - if (gfp_mask & __GFP_THISNODE) + if (oc->gfp_mask & __GFP_THISNODE) return CONSTRAINT_NONE; /* @@ -224,17 +223,18 @@ static enum oom_constraint constrained_alloc(struct zonelist *zonelist, * the page allocator means a mempolicy is in effect. Cpuset policy * is enforced in get_page_from_freelist(). */ - if (nodemask && !nodes_subset(node_states[N_MEMORY], *nodemask)) { + if (oc->nodemask && + !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) { *totalpages = total_swap_pages; - for_each_node_mask(nid, *nodemask) + for_each_node_mask(nid, *oc->nodemask) *totalpages += node_spanned_pages(nid); return CONSTRAINT_MEMORY_POLICY; } /* Check this allocation failure is caused by cpuset's wall function */ - for_each_zone_zonelist_nodemask(zone, z, zonelist, - high_zoneidx, nodemask) - if (!cpuset_zone_allowed(zone, gfp_mask)) + for_each_zone_zonelist_nodemask(zone, z, oc->zonelist, + high_zoneidx, oc->nodemask) + if (!cpuset_zone_allowed(zone, oc->gfp_mask)) cpuset_limited = true; if (cpuset_limited) { @@ -246,20 +246,18 @@ static enum oom_constraint constrained_alloc(struct zonelist *zonelist, return CONSTRAINT_NONE; } #else -static enum oom_constraint constrained_alloc(struct zonelist *zonelist, - gfp_t gfp_mask, nodemask_t *nodemask, - unsigned long *totalpages) +static enum oom_constraint constrained_alloc(struct oom_control *oc, + unsigned long *totalpages) { *totalpages = totalram_pages + total_swap_pages; return CONSTRAINT_NONE; } #endif -enum oom_scan_t oom_scan_process_thread(struct task_struct *task, - unsigned long totalpages, const nodemask_t *nodemask, - bool force_kill) +enum oom_scan_t oom_scan_process_thread(struct oom_control *oc, + struct task_struct *task, unsigned long totalpages) { - if (oom_unkillable_task(task, NULL, nodemask)) + if (oom_unkillable_task(task, NULL, oc->nodemask)) return OOM_SCAN_CONTINUE; /* @@ -267,7 +265,7 @@ enum oom_scan_t oom_scan_process_thread(struct task_struct *task, * Don't allow any other task to have access to the reserves. */ if (test_tsk_thread_flag(task, TIF_MEMDIE)) { - if (!force_kill) + if (oc->order != -1) return OOM_SCAN_ABORT; } if (!task->mm) @@ -280,7 +278,7 @@ enum oom_scan_t oom_scan_process_thread(struct task_struct *task, if (oom_task_origin(task)) return OOM_SCAN_SELECT; - if (task_will_free_mem(task) && !force_kill) + if (task_will_free_mem(task) && oc->order != -1) return OOM_SCAN_ABORT; return OOM_SCAN_OK; @@ -289,12 +287,9 @@ enum oom_scan_t oom_scan_process_thread(struct task_struct *task, /* * Simple selection loop. We chose the process with the highest * number of 'points'. Returns -1 on scan abort. - * - * (not docbooked, we don't want this one cluttering up the manual) */ -static struct task_struct *select_bad_process(unsigned int *ppoints, - unsigned long totalpages, const nodemask_t *nodemask, - bool force_kill) +static struct task_struct *select_bad_process(struct oom_control *oc, + unsigned int *ppoints, unsigned long totalpages) { struct task_struct *g, *p; struct task_struct *chosen = NULL; @@ -304,8 +299,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints, for_each_process_thread(g, p) { unsigned int points; - switch (oom_scan_process_thread(p, totalpages, nodemask, - force_kill)) { + switch (oom_scan_process_thread(oc, p, totalpages)) { case OOM_SCAN_SELECT: chosen = p; chosen_points = ULONG_MAX; @@ -318,7 +312,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints, case OOM_SCAN_OK: break; }; - points = oom_badness(p, NULL, nodemask, totalpages); + points = oom_badness(p, NULL, oc->nodemask, totalpages); if (!points || points < chosen_points) continue; /* Prefer thread group leaders for display purposes */ @@ -380,13 +374,13 @@ static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask) rcu_read_unlock(); } -static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order, - struct mem_cgroup *memcg, const nodemask_t *nodemask) +static void dump_header(struct oom_control *oc, struct task_struct *p, + struct mem_cgroup *memcg) { task_lock(current); pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, " "oom_score_adj=%hd\n", - current->comm, gfp_mask, order, + current->comm, oc->gfp_mask, oc->order, current->signal->oom_score_adj); cpuset_print_task_mems_allowed(current); task_unlock(current); @@ -396,7 +390,7 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order, else show_mem(SHOW_MEM_FILTER_NODES); if (sysctl_oom_dump_tasks) - dump_tasks(memcg, nodemask); + dump_tasks(memcg, oc->nodemask); } /* @@ -487,10 +481,9 @@ void oom_killer_enable(void) * Must be called while holding a reference to p, which will be released upon * returning. */ -void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, +void oom_kill_process(struct oom_control *oc, struct task_struct *p, unsigned int points, unsigned long totalpages, - struct mem_cgroup *memcg, nodemask_t *nodemask, - const char *message) + struct mem_cgroup *memcg, const char *message) { struct task_struct *victim = p; struct task_struct *child; @@ -514,7 +507,7 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, task_unlock(p); if (__ratelimit(&oom_rs)) - dump_header(p, gfp_mask, order, memcg, nodemask); + dump_header(oc, p, memcg); task_lock(p); pr_err("%s: Kill process %d (%s) score %u or sacrifice child\n", @@ -537,7 +530,7 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, /* * oom_badness() returns 0 if the thread is unkillable */ - child_points = oom_badness(child, memcg, nodemask, + child_points = oom_badness(child, memcg, oc->nodemask, totalpages); if (child_points > victim_points) { put_task_struct(victim); @@ -600,8 +593,7 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, /* * Determines whether the kernel must panic because of the panic_on_oom sysctl. */ -void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask, - int order, const nodemask_t *nodemask, +void check_panic_on_oom(struct oom_control *oc, enum oom_constraint constraint, struct mem_cgroup *memcg) { if (likely(!sysctl_panic_on_oom)) @@ -615,7 +607,10 @@ void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask, if (constraint != CONSTRAINT_NONE) return; } - dump_header(NULL, gfp_mask, order, memcg, nodemask); + /* Do not panic for oom kills triggered by sysrq */ + if (oc->order == -1) + return; + dump_header(oc, NULL, memcg); panic("Out of memory: %s panic_on_oom is enabled\n", sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide"); } @@ -635,28 +630,21 @@ int unregister_oom_notifier(struct notifier_block *nb) EXPORT_SYMBOL_GPL(unregister_oom_notifier); /** - * __out_of_memory - kill the "best" process when we run out of memory - * @zonelist: zonelist pointer - * @gfp_mask: memory allocation flags - * @order: amount of memory being requested as a power of 2 - * @nodemask: nodemask passed to page allocator - * @force_kill: true if a task must be killed, even if others are exiting + * out_of_memory - kill the "best" process when we run out of memory + * @oc: pointer to struct oom_control * * If we run out of memory, we have the choice between either * killing a random task (bad), letting the system crash (worse) * OR try to be smart about which process to kill. Note that we * don't have to be perfect here, we just have to be good. */ -bool out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, - int order, nodemask_t *nodemask, bool force_kill) +bool out_of_memory(struct oom_control *oc) { - const nodemask_t *mpol_mask; struct task_struct *p; unsigned long totalpages; unsigned long freed = 0; unsigned int uninitialized_var(points); enum oom_constraint constraint = CONSTRAINT_NONE; - int killed = 0; if (oom_killer_disabled) return false; @@ -664,7 +652,7 @@ bool out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, blocking_notifier_call_chain(&oom_notify_list, 0, &freed); if (freed > 0) /* Got some memory back in the last second. */ - goto out; + return true; /* * If current has a pending SIGKILL or is exiting, then automatically @@ -677,47 +665,42 @@ bool out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, if (current->mm && (fatal_signal_pending(current) || task_will_free_mem(current))) { mark_oom_victim(current); - goto out; + return true; } /* * Check if there were limitations on the allocation (only relevant for * NUMA) that may require different handling. */ - constraint = constrained_alloc(zonelist, gfp_mask, nodemask, - &totalpages); - mpol_mask = (constraint == CONSTRAINT_MEMORY_POLICY) ? nodemask : NULL; - check_panic_on_oom(constraint, gfp_mask, order, mpol_mask, NULL); + constraint = constrained_alloc(oc, &totalpages); + if (constraint != CONSTRAINT_MEMORY_POLICY) + oc->nodemask = NULL; + check_panic_on_oom(oc, constraint, NULL); if (sysctl_oom_kill_allocating_task && current->mm && - !oom_unkillable_task(current, NULL, nodemask) && + !oom_unkillable_task(current, NULL, oc->nodemask) && current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) { get_task_struct(current); - oom_kill_process(current, gfp_mask, order, 0, totalpages, NULL, - nodemask, + oom_kill_process(oc, current, 0, totalpages, NULL, "Out of memory (oom_kill_allocating_task)"); - goto out; + return true; } - p = select_bad_process(&points, totalpages, mpol_mask, force_kill); + p = select_bad_process(oc, &points, totalpages); /* Found nothing?!?! Either we hang forever, or we panic. */ - if (!p) { - dump_header(NULL, gfp_mask, order, NULL, mpol_mask); + if (!p && oc->order != -1) { + dump_header(oc, NULL, NULL); panic("Out of memory and no killable processes...\n"); } - if (p != (void *)-1UL) { - oom_kill_process(p, gfp_mask, order, points, totalpages, NULL, - nodemask, "Out of memory"); - killed = 1; - } -out: - /* - * Give the killed threads a good chance of exiting before trying to - * allocate memory again. - */ - if (killed) + if (p && p != (void *)-1UL) { + oom_kill_process(oc, p, points, totalpages, NULL, + "Out of memory"); + /* + * Give the killed process a good chance to exit before trying + * to allocate memory again. + */ schedule_timeout_killable(1); - + } return true; } @@ -728,13 +711,20 @@ out: */ void pagefault_out_of_memory(void) { + struct oom_control oc = { + .zonelist = NULL, + .nodemask = NULL, + .gfp_mask = 0, + .order = 0, + }; + if (mem_cgroup_oom_synchronize(true)) return; if (!mutex_trylock(&oom_lock)) return; - if (!out_of_memory(NULL, 0, 0, NULL, false)) { + if (!out_of_memory(&oc)) { /* * There shouldn't be any user tasks runnable while the * OOM killer is disabled, so the current task has to diff --git a/mm/page-writeback.c b/mm/page-writeback.c index db0ff666a..c4daa9f0c 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -161,9 +161,6 @@ struct dirty_throttle_control { unsigned long pos_ratio; }; -#define DTC_INIT_COMMON(__wb) .wb = (__wb), \ - .wb_completions = &(__wb)->completions - /* * Length of period for aging writeout fractions of bdis. This is an * arbitrarily chosen number. The longer the period, the slower fractions will @@ -173,12 +170,16 @@ struct dirty_throttle_control { #ifdef CONFIG_CGROUP_WRITEBACK -#define GDTC_INIT(__wb) .dom = &global_wb_domain, \ - DTC_INIT_COMMON(__wb) +#define GDTC_INIT(__wb) .wb = (__wb), \ + .dom = &global_wb_domain, \ + .wb_completions = &(__wb)->completions + #define GDTC_INIT_NO_WB .dom = &global_wb_domain -#define MDTC_INIT(__wb, __gdtc) .dom = mem_cgroup_wb_domain(__wb), \ - .gdtc = __gdtc, \ - DTC_INIT_COMMON(__wb) + +#define MDTC_INIT(__wb, __gdtc) .wb = (__wb), \ + .dom = mem_cgroup_wb_domain(__wb), \ + .wb_completions = &(__wb)->memcg_completions, \ + .gdtc = __gdtc static bool mdtc_valid(struct dirty_throttle_control *dtc) { @@ -229,7 +230,8 @@ static void wb_min_max_ratio(struct bdi_writeback *wb, #else /* CONFIG_CGROUP_WRITEBACK */ -#define GDTC_INIT(__wb) DTC_INIT_COMMON(__wb) +#define GDTC_INIT(__wb) .wb = (__wb), \ + .wb_completions = &(__wb)->completions #define GDTC_INIT_NO_WB #define MDTC_INIT(__wb, __gdtc) @@ -698,13 +700,19 @@ static unsigned long hard_dirty_limit(struct wb_domain *dom, return max(thresh, dom->dirty_limit); } -/* memory available to a memcg domain is capped by system-wide clean memory */ -static void mdtc_cap_avail(struct dirty_throttle_control *mdtc) +/* + * Memory which can be further allocated to a memcg domain is capped by + * system-wide clean memory excluding the amount being used in the domain. + */ +static void mdtc_calc_avail(struct dirty_throttle_control *mdtc, + unsigned long filepages, unsigned long headroom) { struct dirty_throttle_control *gdtc = mdtc_gdtc(mdtc); - unsigned long clean = gdtc->avail - min(gdtc->avail, gdtc->dirty); + unsigned long clean = filepages - min(filepages, mdtc->dirty); + unsigned long global_clean = gdtc->avail - min(gdtc->avail, gdtc->dirty); + unsigned long other_clean = global_clean - min(global_clean, clean); - mdtc->avail = min(mdtc->avail, clean); + mdtc->avail = filepages + min(headroom, other_clean); } /** @@ -1305,7 +1313,7 @@ static void wb_update_dirty_ratelimit(struct dirty_throttle_control *dtc, wb->dirty_ratelimit = max(dirty_ratelimit, 1UL); wb->balanced_dirty_ratelimit = balanced_dirty_ratelimit; - trace_bdi_dirty_ratelimit(wb->bdi, dirty_rate, task_ratelimit); + trace_bdi_dirty_ratelimit(wb, dirty_rate, task_ratelimit); } static void __wb_update_bandwidth(struct dirty_throttle_control *gdtc, @@ -1578,16 +1586,16 @@ static void balance_dirty_pages(struct address_space *mapping, } if (mdtc) { - unsigned long writeback; + unsigned long filepages, headroom, writeback; /* * If @wb belongs to !root memcg, repeat the same * basic calculations for the memcg domain. */ - mem_cgroup_wb_stats(wb, &mdtc->avail, &mdtc->dirty, - &writeback); - mdtc_cap_avail(mdtc); + mem_cgroup_wb_stats(wb, &filepages, &headroom, + &mdtc->dirty, &writeback); mdtc->dirty += writeback; + mdtc_calc_avail(mdtc, filepages, headroom); domain_dirty_limits(mdtc); @@ -1699,7 +1707,7 @@ static void balance_dirty_pages(struct address_space *mapping, * do a reset, as it may be a light dirtier. */ if (pause < min_pause) { - trace_balance_dirty_pages(bdi, + trace_balance_dirty_pages(wb, sdtc->thresh, sdtc->bg_thresh, sdtc->dirty, @@ -1728,7 +1736,7 @@ static void balance_dirty_pages(struct address_space *mapping, } pause: - trace_balance_dirty_pages(bdi, + trace_balance_dirty_pages(wb, sdtc->thresh, sdtc->bg_thresh, sdtc->dirty, @@ -1909,10 +1917,11 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb) return true; if (mdtc) { - unsigned long writeback; + unsigned long filepages, headroom, writeback; - mem_cgroup_wb_stats(wb, &mdtc->avail, &mdtc->dirty, &writeback); - mdtc_cap_avail(mdtc); + mem_cgroup_wb_stats(wb, &filepages, &headroom, &mdtc->dirty, + &writeback); + mdtc_calc_avail(mdtc, filepages, headroom); domain_dirty_limits(mdtc); /* ditto, ignore writeback */ if (mdtc->dirty > mdtc->bg_thresh) @@ -1972,7 +1981,6 @@ void laptop_mode_timer_fn(unsigned long data) int nr_pages = global_page_state(NR_FILE_DIRTY) + global_page_state(NR_UNSTABLE_NFS); struct bdi_writeback *wb; - struct wb_iter iter; /* * We want to write everything out, not just down to the dirty @@ -1981,10 +1989,12 @@ void laptop_mode_timer_fn(unsigned long data) if (!bdi_has_dirty_io(&q->backing_dev_info)) return; - bdi_for_each_wb(wb, &q->backing_dev_info, &iter, 0) + rcu_read_lock(); + list_for_each_entry_rcu(wb, &q->backing_dev_info.wb_list, bdi_node) if (wb_has_dirty_io(wb)) wb_start_writeback(wb, nr_pages, true, WB_REASON_LAPTOP_TIMER); + rcu_read_unlock(); } /* diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 3a167b135..23651fd84 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -126,6 +126,24 @@ unsigned long dirty_balance_reserve __read_mostly; int percpu_pagelist_fraction; gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; +/* + * A cached value of the page's pageblock's migratetype, used when the page is + * put on a pcplist. Used to avoid the pageblock migratetype lookup when + * freeing from pcplists in most cases, at the cost of possibly becoming stale. + * Also the migratetype set in the page does not necessarily match the pcplist + * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any + * other index - this ensures that it will be put on the correct CMA freelist. + */ +static inline int get_pcppage_migratetype(struct page *page) +{ + return page->index; +} + +static inline void set_pcppage_migratetype(struct page *page, int migratetype) +{ + page->index = migratetype; +} + #ifdef CONFIG_PM_SLEEP /* * The following functions are used by the suspend/hibernate code to temporarily @@ -207,6 +225,9 @@ static char * const zone_names[MAX_NR_ZONES] = { "HighMem", #endif "Movable", +#ifdef CONFIG_ZONE_DEVICE + "Device", +#endif }; int min_free_kbytes = 1024; @@ -795,7 +816,11 @@ static void free_pcppages_bulk(struct zone *zone, int count, page = list_entry(list->prev, struct page, lru); /* must delete as __free_one_page list manipulates */ list_del(&page->lru); - mt = get_freepage_migratetype(page); + + mt = get_pcppage_migratetype(page); + /* MIGRATE_ISOLATE page should not go to pcplists */ + VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); + /* Pageblock could have been isolated meanwhile */ if (unlikely(has_isolate_pageblock(zone))) mt = get_pageblock_migratetype(page); @@ -959,7 +984,6 @@ static void __free_pages_ok(struct page *page, unsigned int order) migratetype = get_pfnblock_migratetype(page, pfn); local_irq_save(flags); __count_vm_events(PGFREE, 1 << order); - set_freepage_migratetype(page, migratetype); free_one_page(page_zone(page), page, pfn, order, migratetype); local_irq_restore(flags); } @@ -1392,7 +1416,7 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, rmv_page_order(page); area->nr_free--; expand(zone, page, order, current_order, area, migratetype); - set_freepage_migratetype(page, migratetype); + set_pcppage_migratetype(page, migratetype); return page; } @@ -1469,7 +1493,6 @@ int move_freepages(struct zone *zone, order = page_order(page); list_move(&page->lru, &zone->free_area[order].free_list[migratetype]); - set_freepage_migratetype(page, migratetype); page += 1 << order; pages_moved += 1 << order; } @@ -1639,14 +1662,13 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype) expand(zone, page, order, current_order, area, start_migratetype); /* - * The freepage_migratetype may differ from pageblock's + * The pcppage_migratetype may differ from pageblock's * migratetype depending on the decisions in - * try_to_steal_freepages(). This is OK as long as it - * does not differ for MIGRATE_CMA pageblocks. For CMA - * we need to make sure unallocated pages flushed from - * pcp lists are returned to the correct freelist. + * find_suitable_fallback(). This is OK as long as it does not + * differ for MIGRATE_CMA pageblocks. Those can be used as + * fallback only via special __rmqueue_cma_fallback() function */ - set_freepage_migratetype(page, start_migratetype); + set_pcppage_migratetype(page, start_migratetype); trace_mm_page_alloc_extfrag(page, order, current_order, start_migratetype, fallback_mt); @@ -1722,7 +1744,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, else list_add_tail(&page->lru, list); list = &page->lru; - if (is_migrate_cma(get_freepage_migratetype(page))) + if (is_migrate_cma(get_pcppage_migratetype(page))) __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, -(1 << order)); } @@ -1919,7 +1941,7 @@ void free_hot_cold_page(struct page *page, bool cold) return; migratetype = get_pfnblock_migratetype(page, pfn); - set_freepage_migratetype(page, migratetype); + set_pcppage_migratetype(page, migratetype); local_irq_save(flags); __count_vm_event(PGFREE); @@ -2124,7 +2146,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone, if (!page) goto failed; __mod_zone_freepage_state(zone, -(1 << order), - get_freepage_migratetype(page)); + get_pcppage_migratetype(page)); } __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order)); @@ -2705,6 +2727,12 @@ static inline struct page * __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, const struct alloc_context *ac, unsigned long *did_some_progress) { + struct oom_control oc = { + .zonelist = ac->zonelist, + .nodemask = ac->nodemask, + .gfp_mask = gfp_mask, + .order = order, + }; struct page *page; *did_some_progress = 0; @@ -2756,8 +2784,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, goto out; } /* Exhausted what can be done so it's blamo time */ - if (out_of_memory(ac->zonelist, gfp_mask, order, ac->nodemask, false) - || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) + if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) *did_some_progress = 1; out: mutex_unlock(&oom_lock); @@ -3499,8 +3526,6 @@ EXPORT_SYMBOL(alloc_pages_exact); * * Like alloc_pages_exact(), but try to allocate on node nid first before falling * back. - * Note this is not alloc_pages_exact_node() which allocates on a specific node, - * but is not exact. */ void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) { @@ -5075,7 +5100,7 @@ static unsigned long __meminit zone_spanned_pages_in_node(int nid, { unsigned long zone_start_pfn, zone_end_pfn; - /* When hotadd a new node, the node should be empty */ + /* When hotadd a new node from cpu_up(), the node should be empty */ if (!node_start_pfn && !node_end_pfn) return 0; @@ -5142,7 +5167,7 @@ static unsigned long __meminit zone_absent_pages_in_node(int nid, unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type]; unsigned long zone_start_pfn, zone_end_pfn; - /* When hotadd a new node, the node should be empty */ + /* When hotadd a new node from cpu_up(), the node should be empty */ if (!node_start_pfn && !node_end_pfn) return 0; @@ -5315,8 +5340,7 @@ static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages, * * NOTE: pgdat should get zeroed by caller. */ -static void __paginginit free_area_init_core(struct pglist_data *pgdat, - unsigned long node_start_pfn, unsigned long node_end_pfn) +static void __paginginit free_area_init_core(struct pglist_data *pgdat) { enum zone_type j; int nid = pgdat->node_id; @@ -5467,7 +5491,8 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size, #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid, - (u64)start_pfn << PAGE_SHIFT, ((u64)end_pfn << PAGE_SHIFT) - 1); + (u64)start_pfn << PAGE_SHIFT, + end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0); #endif calculate_node_totalpages(pgdat, start_pfn, end_pfn, zones_size, zholes_size); @@ -5479,7 +5504,7 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size, (unsigned long)pgdat->node_mem_map); #endif - free_area_init_core(pgdat, start_pfn, end_pfn); + free_area_init_core(pgdat); } #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP @@ -5490,11 +5515,9 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size, */ void __init setup_nr_node_ids(void) { - unsigned int node; - unsigned int highest = 0; + unsigned int highest; - for_each_node_mask(node, node_possible_map) - highest = node; + highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES); nr_node_ids = highest + 1; } #endif @@ -6015,7 +6038,7 @@ void __init mem_init_print_info(const char *str) * set_dma_reserve - set the specified number of pages reserved in the first zone * @new_dma_reserve: The number of pages to mark reserved * - * The per-cpu batchsize and zone watermarks are determined by present_pages. + * The per-cpu batchsize and zone watermarks are determined by managed_pages. * In the DMA zone, a significant percentage may be consumed by kernel image * and other unfreeable allocations which can skew the watermarks badly. This * function may optionally be used to account for unfreeable pages in the @@ -6068,7 +6091,7 @@ void __init page_alloc_init(void) } /* - * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio + * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio * or min_free_kbytes changes. */ static void calculate_totalreserve_pages(void) @@ -6112,7 +6135,7 @@ static void calculate_totalreserve_pages(void) /* * setup_per_zone_lowmem_reserve - called whenever - * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone + * sysctl_lowmem_reserve_ratio changes. Ensures that each zone * has a correct pages reserved value, so an adequate number of * pages are left in the zone after a successful __alloc_pages(). */ diff --git a/mm/page_ext.c b/mm/page_ext.c index d86fd2f53..292ca7b8d 100644 --- a/mm/page_ext.c +++ b/mm/page_ext.c @@ -6,6 +6,7 @@ #include <linux/vmalloc.h> #include <linux/kmemleak.h> #include <linux/page_owner.h> +#include <linux/page_idle.h> /* * struct page extension @@ -59,6 +60,9 @@ static struct page_ext_operations *page_ext_ops[] = { #ifdef CONFIG_PAGE_OWNER &page_owner_ops, #endif +#if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT) + &page_idle_ops, +#endif }; static unsigned long total_usage; diff --git a/mm/page_idle.c b/mm/page_idle.c new file mode 100644 index 000000000..d5dd79041 --- /dev/null +++ b/mm/page_idle.c @@ -0,0 +1,232 @@ +#include <linux/init.h> +#include <linux/bootmem.h> +#include <linux/fs.h> +#include <linux/sysfs.h> +#include <linux/kobject.h> +#include <linux/mm.h> +#include <linux/mmzone.h> +#include <linux/pagemap.h> +#include <linux/rmap.h> +#include <linux/mmu_notifier.h> +#include <linux/page_ext.h> +#include <linux/page_idle.h> + +#define BITMAP_CHUNK_SIZE sizeof(u64) +#define BITMAP_CHUNK_BITS (BITMAP_CHUNK_SIZE * BITS_PER_BYTE) + +/* + * Idle page tracking only considers user memory pages, for other types of + * pages the idle flag is always unset and an attempt to set it is silently + * ignored. + * + * We treat a page as a user memory page if it is on an LRU list, because it is + * always safe to pass such a page to rmap_walk(), which is essential for idle + * page tracking. With such an indicator of user pages we can skip isolated + * pages, but since there are not usually many of them, it will hardly affect + * the overall result. + * + * This function tries to get a user memory page by pfn as described above. + */ +static struct page *page_idle_get_page(unsigned long pfn) +{ + struct page *page; + struct zone *zone; + + if (!pfn_valid(pfn)) + return NULL; + + page = pfn_to_page(pfn); + if (!page || !PageLRU(page) || + !get_page_unless_zero(page)) + return NULL; + + zone = page_zone(page); + spin_lock_irq(&zone->lru_lock); + if (unlikely(!PageLRU(page))) { + put_page(page); + page = NULL; + } + spin_unlock_irq(&zone->lru_lock); + return page; +} + +static int page_idle_clear_pte_refs_one(struct page *page, + struct vm_area_struct *vma, + unsigned long addr, void *arg) +{ + struct mm_struct *mm = vma->vm_mm; + spinlock_t *ptl; + pmd_t *pmd; + pte_t *pte; + bool referenced = false; + + if (unlikely(PageTransHuge(page))) { + pmd = page_check_address_pmd(page, mm, addr, + PAGE_CHECK_ADDRESS_PMD_FLAG, &ptl); + if (pmd) { + referenced = pmdp_clear_young_notify(vma, addr, pmd); + spin_unlock(ptl); + } + } else { + pte = page_check_address(page, mm, addr, &ptl, 0); + if (pte) { + referenced = ptep_clear_young_notify(vma, addr, pte); + pte_unmap_unlock(pte, ptl); + } + } + if (referenced) { + clear_page_idle(page); + /* + * We cleared the referenced bit in a mapping to this page. To + * avoid interference with page reclaim, mark it young so that + * page_referenced() will return > 0. + */ + set_page_young(page); + } + return SWAP_AGAIN; +} + +static void page_idle_clear_pte_refs(struct page *page) +{ + /* + * Since rwc.arg is unused, rwc is effectively immutable, so we + * can make it static const to save some cycles and stack. + */ + static const struct rmap_walk_control rwc = { + .rmap_one = page_idle_clear_pte_refs_one, + .anon_lock = page_lock_anon_vma_read, + }; + bool need_lock; + + if (!page_mapped(page) || + !page_rmapping(page)) + return; + + need_lock = !PageAnon(page) || PageKsm(page); + if (need_lock && !trylock_page(page)) + return; + + rmap_walk(page, (struct rmap_walk_control *)&rwc); + + if (need_lock) + unlock_page(page); +} + +static ssize_t page_idle_bitmap_read(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, char *buf, + loff_t pos, size_t count) +{ + u64 *out = (u64 *)buf; + struct page *page; + unsigned long pfn, end_pfn; + int bit; + + if (pos % BITMAP_CHUNK_SIZE || count % BITMAP_CHUNK_SIZE) + return -EINVAL; + + pfn = pos * BITS_PER_BYTE; + if (pfn >= max_pfn) + return 0; + + end_pfn = pfn + count * BITS_PER_BYTE; + if (end_pfn > max_pfn) + end_pfn = ALIGN(max_pfn, BITMAP_CHUNK_BITS); + + for (; pfn < end_pfn; pfn++) { + bit = pfn % BITMAP_CHUNK_BITS; + if (!bit) + *out = 0ULL; + page = page_idle_get_page(pfn); + if (page) { + if (page_is_idle(page)) { + /* + * The page might have been referenced via a + * pte, in which case it is not idle. Clear + * refs and recheck. + */ + page_idle_clear_pte_refs(page); + if (page_is_idle(page)) + *out |= 1ULL << bit; + } + put_page(page); + } + if (bit == BITMAP_CHUNK_BITS - 1) + out++; + cond_resched(); + } + return (char *)out - buf; +} + +static ssize_t page_idle_bitmap_write(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, char *buf, + loff_t pos, size_t count) +{ + const u64 *in = (u64 *)buf; + struct page *page; + unsigned long pfn, end_pfn; + int bit; + + if (pos % BITMAP_CHUNK_SIZE || count % BITMAP_CHUNK_SIZE) + return -EINVAL; + + pfn = pos * BITS_PER_BYTE; + if (pfn >= max_pfn) + return -ENXIO; + + end_pfn = pfn + count * BITS_PER_BYTE; + if (end_pfn > max_pfn) + end_pfn = ALIGN(max_pfn, BITMAP_CHUNK_BITS); + + for (; pfn < end_pfn; pfn++) { + bit = pfn % BITMAP_CHUNK_BITS; + if ((*in >> bit) & 1) { + page = page_idle_get_page(pfn); + if (page) { + page_idle_clear_pte_refs(page); + set_page_idle(page); + put_page(page); + } + } + if (bit == BITMAP_CHUNK_BITS - 1) + in++; + cond_resched(); + } + return (char *)in - buf; +} + +static struct bin_attribute page_idle_bitmap_attr = + __BIN_ATTR(bitmap, S_IRUSR | S_IWUSR, + page_idle_bitmap_read, page_idle_bitmap_write, 0); + +static struct bin_attribute *page_idle_bin_attrs[] = { + &page_idle_bitmap_attr, + NULL, +}; + +static struct attribute_group page_idle_attr_group = { + .bin_attrs = page_idle_bin_attrs, + .name = "page_idle", +}; + +#ifndef CONFIG_64BIT +static bool need_page_idle(void) +{ + return true; +} +struct page_ext_operations page_idle_ops = { + .need = need_page_idle, +}; +#endif + +static int __init page_idle_init(void) +{ + int err; + + err = sysfs_create_group(mm_kobj, &page_idle_attr_group); + if (err) { + pr_err("page_idle: register sysfs failed\n"); + return err; + } + return 0; +} +subsys_initcall(page_idle_init); diff --git a/mm/page_io.c b/mm/page_io.c index 520baa4b0..b995a5ba5 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -33,22 +33,19 @@ static struct bio *get_swap_bio(gfp_t gfp_flags, if (bio) { bio->bi_iter.bi_sector = map_swap_page(page, &bio->bi_bdev); bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9; - bio->bi_io_vec[0].bv_page = page; - bio->bi_io_vec[0].bv_len = PAGE_SIZE; - bio->bi_io_vec[0].bv_offset = 0; - bio->bi_vcnt = 1; - bio->bi_iter.bi_size = PAGE_SIZE; bio->bi_end_io = end_io; + + bio_add_page(bio, page, PAGE_SIZE, 0); + BUG_ON(bio->bi_iter.bi_size != PAGE_SIZE); } return bio; } -void end_swap_bio_write(struct bio *bio, int err) +void end_swap_bio_write(struct bio *bio) { - const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); struct page *page = bio->bi_io_vec[0].bv_page; - if (!uptodate) { + if (bio->bi_error) { SetPageError(page); /* * We failed to write the page out to swap-space. @@ -69,12 +66,11 @@ void end_swap_bio_write(struct bio *bio, int err) bio_put(bio); } -static void end_swap_bio_read(struct bio *bio, int err) +static void end_swap_bio_read(struct bio *bio) { - const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); struct page *page = bio->bi_io_vec[0].bv_page; - if (!uptodate) { + if (bio->bi_error) { SetPageError(page); ClearPageUptodate(page); printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n", @@ -254,7 +250,7 @@ static sector_t swap_page_sector(struct page *page) } int __swap_writepage(struct page *page, struct writeback_control *wbc, - void (*end_write_func)(struct bio *, int)) + bio_end_io_t end_write_func) { struct bio *bio; int ret, rw = WRITE; diff --git a/mm/page_isolation.c b/mm/page_isolation.c index 303c90879..4568fd58f 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -9,7 +9,8 @@ #include <linux/hugetlb.h> #include "internal.h" -int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages) +static int set_migratetype_isolate(struct page *page, + bool skip_hwpoisoned_pages) { struct zone *zone; unsigned long flags, pfn; @@ -72,7 +73,7 @@ out: return ret; } -void unset_migratetype_isolate(struct page *page, unsigned migratetype) +static void unset_migratetype_isolate(struct page *page, unsigned migratetype) { struct zone *zone; unsigned long flags, nr_pages; @@ -223,34 +224,16 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn, continue; } page = pfn_to_page(pfn); - if (PageBuddy(page)) { + if (PageBuddy(page)) /* - * If race between isolatation and allocation happens, - * some free pages could be in MIGRATE_MOVABLE list - * although pageblock's migratation type of the page - * is MIGRATE_ISOLATE. Catch it and move the page into - * MIGRATE_ISOLATE list. + * If the page is on a free list, it has to be on + * the correct MIGRATE_ISOLATE freelist. There is no + * simple way to verify that as VM_BUG_ON(), though. */ - if (get_freepage_migratetype(page) != MIGRATE_ISOLATE) { - struct page *end_page; - - end_page = page + (1 << page_order(page)) - 1; - move_freepages(page_zone(page), page, end_page, - MIGRATE_ISOLATE); - } pfn += 1 << page_order(page); - } - else if (page_count(page) == 0 && - get_freepage_migratetype(page) == MIGRATE_ISOLATE) - pfn += 1; - else if (skip_hwpoisoned_pages && PageHWPoison(page)) { - /* - * The HWPoisoned page may be not in buddy - * system, and page_count() is not 0. - */ + else if (skip_hwpoisoned_pages && PageHWPoison(page)) + /* A HWPoisoned page cannot be also PageBuddy */ pfn++; - continue; - } else break; } diff --git a/mm/percpu.c b/mm/percpu.c index b4fe24569..8e67995a5 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -1669,9 +1669,8 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, schunk->map[1] = ai->static_size; schunk->map_used = 1; if (schunk->free_size) - schunk->map[++schunk->map_used] = 1 | (ai->static_size + schunk->free_size); - else - schunk->map[1] |= 1; + schunk->map[++schunk->map_used] = ai->static_size + schunk->free_size; + schunk->map[schunk->map_used] |= 1; /* init dynamic chunk if necessary */ if (dyn_size) { diff --git a/mm/readahead.c b/mm/readahead.c index 60cd846a9..24682f6f4 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -89,8 +89,8 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages, while (!list_empty(pages)) { page = list_to_page(pages); list_del(&page->lru); - if (add_to_page_cache_lru(page, mapping, - page->index, GFP_KERNEL)) { + if (add_to_page_cache_lru(page, mapping, page->index, + GFP_KERNEL & mapping_gfp_mask(mapping))) { read_cache_pages_invalidate_page(mapping, page); continue; } @@ -127,8 +127,8 @@ static int read_pages(struct address_space *mapping, struct file *filp, for (page_idx = 0; page_idx < nr_pages; page_idx++) { struct page *page = list_to_page(pages); list_del(&page->lru); - if (!add_to_page_cache_lru(page, mapping, - page->index, GFP_KERNEL)) { + if (!add_to_page_cache_lru(page, mapping, page->index, + GFP_KERNEL & mapping_gfp_mask(mapping))) { mapping->a_ops->readpage(filp, page); } page_cache_release(page); @@ -59,9 +59,12 @@ #include <linux/migrate.h> #include <linux/hugetlb.h> #include <linux/backing-dev.h> +#include <linux/page_idle.h> #include <asm/tlbflush.h> +#include <trace/events/tlb.h> + #include "internal.h" static struct kmem_cache *anon_vma_cachep; @@ -583,6 +586,107 @@ vma_address(struct page *page, struct vm_area_struct *vma) return address; } +#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH +static void percpu_flush_tlb_batch_pages(void *data) +{ + /* + * All TLB entries are flushed on the assumption that it is + * cheaper to flush all TLBs and let them be refilled than + * flushing individual PFNs. Note that we do not track mm's + * to flush as that might simply be multiple full TLB flushes + * for no gain. + */ + count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); + flush_tlb_local(); +} + +/* + * Flush TLB entries for recently unmapped pages from remote CPUs. It is + * important if a PTE was dirty when it was unmapped that it's flushed + * before any IO is initiated on the page to prevent lost writes. Similarly, + * it must be flushed before freeing to prevent data leakage. + */ +void try_to_unmap_flush(void) +{ + struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; + int cpu; + + if (!tlb_ubc->flush_required) + return; + + cpu = get_cpu(); + + trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, -1UL); + + if (cpumask_test_cpu(cpu, &tlb_ubc->cpumask)) + percpu_flush_tlb_batch_pages(&tlb_ubc->cpumask); + + if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids) { + smp_call_function_many(&tlb_ubc->cpumask, + percpu_flush_tlb_batch_pages, (void *)tlb_ubc, true); + } + cpumask_clear(&tlb_ubc->cpumask); + tlb_ubc->flush_required = false; + tlb_ubc->writable = false; + put_cpu(); +} + +/* Flush iff there are potentially writable TLB entries that can race with IO */ +void try_to_unmap_flush_dirty(void) +{ + struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; + + if (tlb_ubc->writable) + try_to_unmap_flush(); +} + +static void set_tlb_ubc_flush_pending(struct mm_struct *mm, + struct page *page, bool writable) +{ + struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; + + cpumask_or(&tlb_ubc->cpumask, &tlb_ubc->cpumask, mm_cpumask(mm)); + tlb_ubc->flush_required = true; + + /* + * If the PTE was dirty then it's best to assume it's writable. The + * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush() + * before the page is queued for IO. + */ + if (writable) + tlb_ubc->writable = true; +} + +/* + * Returns true if the TLB flush should be deferred to the end of a batch of + * unmap operations to reduce IPIs. + */ +static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) +{ + bool should_defer = false; + + if (!(flags & TTU_BATCH_FLUSH)) + return false; + + /* If remote CPUs need to be flushed then defer batch the flush */ + if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids) + should_defer = true; + put_cpu(); + + return should_defer; +} +#else +static void set_tlb_ubc_flush_pending(struct mm_struct *mm, + struct page *page, bool writable) +{ +} + +static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) +{ + return false; +} +#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ + /* * At what user virtual address is page expected in vma? * Caller should check the page is actually part of the vma. @@ -783,6 +887,11 @@ static int page_referenced_one(struct page *page, struct vm_area_struct *vma, pte_unmap_unlock(pte, ptl); } + if (referenced) + clear_page_idle(page); + if (test_and_clear_page_young(page)) + referenced++; + if (referenced) { pra->referenced++; pra->vm_flags |= vma->vm_flags; @@ -1220,7 +1329,20 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, /* Nuke the page table entry. */ flush_cache_page(vma, address, page_to_pfn(page)); - pteval = ptep_clear_flush(vma, address, pte); + if (should_defer_flush(mm, flags)) { + /* + * We clear the PTE but do not flush so potentially a remote + * CPU could still be writing to the page. If the entry was + * previously clean then the architecture must guarantee that + * a clear->dirty transition on a cached TLB entry is written + * through and traps if the PTE is unmapped. + */ + pteval = ptep_get_and_clear(mm, address, pte); + + set_tlb_ubc_flush_pending(mm, page, pte_dirty(pteval)); + } else { + pteval = ptep_clear_flush(vma, address, pte); + } /* Move the dirty bit to the physical page now the pte is gone. */ if (pte_dirty(pteval)) diff --git a/mm/shmem.c b/mm/shmem.c index 8c5467e4e..64195f3c6 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -546,6 +546,21 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) } EXPORT_SYMBOL_GPL(shmem_truncate_range); +static int shmem_getattr(struct vfsmount *mnt, struct dentry *dentry, + struct kstat *stat) +{ + struct inode *inode = dentry->d_inode; + struct shmem_inode_info *info = SHMEM_I(inode); + + spin_lock(&info->lock); + shmem_recalc_inode(inode); + spin_unlock(&info->lock); + + generic_fillattr(inode, stat); + + return 0; +} + static int shmem_setattr(struct dentry *dentry, struct iattr *attr) { struct inode *inode = d_inode(dentry); @@ -3147,6 +3162,7 @@ static const struct file_operations shmem_file_operations = { }; static const struct inode_operations shmem_inode_operations = { + .getattr = shmem_getattr, .setattr = shmem_setattr, #ifdef CONFIG_TMPFS_XATTR .setxattr = shmem_setxattr, @@ -1595,7 +1595,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, if (memcg_charge_slab(cachep, flags, cachep->gfporder)) return NULL; - page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder); + page = __alloc_pages_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder); if (!page) { memcg_uncharge_slab(cachep, cachep->gfporder); slab_out_of_memory(cachep, flags, nodeid); @@ -3423,6 +3423,19 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) } EXPORT_SYMBOL(kmem_cache_alloc); +void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) +{ + __kmem_cache_free_bulk(s, size, p); +} +EXPORT_SYMBOL(kmem_cache_free_bulk); + +bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, + void **p) +{ + return __kmem_cache_alloc_bulk(s, flags, size, p); +} +EXPORT_SYMBOL(kmem_cache_alloc_bulk); + #ifdef CONFIG_TRACING void * kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size) @@ -163,6 +163,15 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s); ssize_t slabinfo_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos); +/* + * Generic implementation of bulk operations + * These are useful for situations in which the allocator cannot + * perform optimizations. In that case segments of the objecct listed + * may be allocated or freed using these operations. + */ +void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); +bool __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); + #ifdef CONFIG_MEMCG_KMEM /* * Iterate over all memcg caches of the given root cache. The caller must hold @@ -321,7 +330,7 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) return cachep; pr_err("%s: Wrong slab cache. %s but object is from %s\n", - __func__, cachep->name, s->name); + __func__, s->name, cachep->name); WARN_ON_ONCE(1); return s; } diff --git a/mm/slab_common.c b/mm/slab_common.c index 86831105a..5ce4faeb1 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -104,6 +104,29 @@ static inline int kmem_cache_sanity_check(const char *name, size_t size) } #endif +void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p) +{ + size_t i; + + for (i = 0; i < nr; i++) + kmem_cache_free(s, p[i]); +} + +bool __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, + void **p) +{ + size_t i; + + for (i = 0; i < nr; i++) { + void *x = p[i] = kmem_cache_alloc(s, flags); + if (!x) { + __kmem_cache_free_bulk(s, i, p); + return false; + } + } + return true; +} + #ifdef CONFIG_MEMCG_KMEM void slab_init_memcg_params(struct kmem_cache *s) { @@ -477,7 +500,7 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg, struct kmem_cache *root_cache) { static char memcg_name_buf[NAME_MAX + 1]; /* protected by slab_mutex */ - struct cgroup_subsys_state *css = mem_cgroup_css(memcg); + struct cgroup_subsys_state *css = &memcg->css; struct memcg_cache_array *arr; struct kmem_cache *s = NULL; char *cache_name; @@ -617,6 +640,9 @@ void kmem_cache_destroy(struct kmem_cache *s) bool need_rcu_barrier = false; bool busy = false; + if (unlikely(!s)) + return; + BUG_ON(!is_root_cache(s)); get_online_cpus(); @@ -45,7 +45,7 @@ * NUMA support in SLOB is fairly simplistic, pushing most of the real * logic down to the page allocator, and simply doing the node accounting * on the upper levels. In the event that a node id is explicitly - * provided, alloc_pages_exact_node() with the specified node id is used + * provided, __alloc_pages_node() with the specified node id is used * instead. The common case (or when the node id isn't explicitly provided) * will default to the current node, as per numa_node_id(). * @@ -193,7 +193,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node) #ifdef CONFIG_NUMA if (node != NUMA_NO_NODE) - page = alloc_pages_exact_node(node, gfp, order); + page = __alloc_pages_node(node, gfp, order); else #endif page = alloc_pages(gfp, order); @@ -611,6 +611,19 @@ void kmem_cache_free(struct kmem_cache *c, void *b) } EXPORT_SYMBOL(kmem_cache_free); +void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) +{ + __kmem_cache_free_bulk(s, size, p); +} +EXPORT_SYMBOL(kmem_cache_free_bulk); + +bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, + void **p) +{ + return __kmem_cache_alloc_bulk(s, flags, size, p); +} +EXPORT_SYMBOL(kmem_cache_alloc_bulk); + int __kmem_cache_shutdown(struct kmem_cache *c) { /* No way to check for remaining objects */ @@ -1306,6 +1306,17 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x) kasan_slab_free(s, x); } +static void setup_object(struct kmem_cache *s, struct page *page, + void *object) +{ + setup_object_debug(s, page, object); + if (unlikely(s->ctor)) { + kasan_unpoison_object_data(s, object); + s->ctor(object); + kasan_poison_object_data(s, object); + } +} + /* * Slab allocation and freeing */ @@ -1323,7 +1334,7 @@ static inline struct page *alloc_slab_page(struct kmem_cache *s, if (node == NUMA_NO_NODE) page = alloc_pages(flags, order); else - page = alloc_pages_exact_node(node, flags, order); + page = __alloc_pages_node(node, flags, order); if (!page) memcg_uncharge_slab(s, order); @@ -1336,6 +1347,8 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) struct page *page; struct kmem_cache_order_objects oo = s->oo; gfp_t alloc_gfp; + void *start, *p; + int idx, order; flags &= gfp_allowed_mask; @@ -1349,6 +1362,8 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) * so we fall-back to the minimum order allocation. */ alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL; + if ((alloc_gfp & __GFP_WAIT) && oo_order(oo) > oo_order(s->min)) + alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~__GFP_WAIT; page = alloc_slab_page(s, alloc_gfp, node, oo); if (unlikely(!page)) { @@ -1359,13 +1374,13 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) * Try a lower order alloc if possible */ page = alloc_slab_page(s, alloc_gfp, node, oo); - - if (page) - stat(s, ORDER_FALLBACK); + if (unlikely(!page)) + goto out; + stat(s, ORDER_FALLBACK); } - if (kmemcheck_enabled && page - && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) { + if (kmemcheck_enabled && + !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) { int pages = 1 << oo_order(oo); kmemcheck_alloc_shadow(page, oo_order(oo), alloc_gfp, node); @@ -1380,51 +1395,9 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) kmemcheck_mark_unallocated_pages(page, pages); } - if (flags & __GFP_WAIT) - local_irq_disable(); - if (!page) - return NULL; - page->objects = oo_objects(oo); - mod_zone_page_state(page_zone(page), - (s->flags & SLAB_RECLAIM_ACCOUNT) ? - NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, - 1 << oo_order(oo)); - - return page; -} - -static void setup_object(struct kmem_cache *s, struct page *page, - void *object) -{ - setup_object_debug(s, page, object); - if (unlikely(s->ctor)) { - kasan_unpoison_object_data(s, object); - s->ctor(object); - kasan_poison_object_data(s, object); - } -} - -static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) -{ - struct page *page; - void *start; - void *p; - int order; - int idx; - - if (unlikely(flags & GFP_SLAB_BUG_MASK)) { - pr_emerg("gfp: %u\n", flags & GFP_SLAB_BUG_MASK); - BUG(); - } - - page = allocate_slab(s, - flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); - if (!page) - goto out; order = compound_order(page); - inc_slabs_node(s, page_to_nid(page), page->objects); page->slab_cache = s; __SetPageSlab(page); if (page_is_pfmemalloc(page)) @@ -1448,10 +1421,34 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) page->freelist = start; page->inuse = page->objects; page->frozen = 1; + out: + if (flags & __GFP_WAIT) + local_irq_disable(); + if (!page) + return NULL; + + mod_zone_page_state(page_zone(page), + (s->flags & SLAB_RECLAIM_ACCOUNT) ? + NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, + 1 << oo_order(oo)); + + inc_slabs_node(s, page_to_nid(page), page->objects); + return page; } +static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) +{ + if (unlikely(flags & GFP_SLAB_BUG_MASK)) { + pr_emerg("gfp: %u\n", flags & GFP_SLAB_BUG_MASK); + BUG(); + } + + return allocate_slab(s, + flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); +} + static void __free_slab(struct kmem_cache *s, struct page *page) { int order = compound_order(page); @@ -2712,7 +2709,7 @@ redo: * Determine the currently cpus per cpu slab. * The cpu may change afterward. However that does not matter since * data is retrieved via this pointer. If we are on the same cpu - * during the cmpxchg then the free will succedd. + * during the cmpxchg then the free will succeed. */ do { tid = this_cpu_read(s->cpu_slab->tid); @@ -2750,6 +2747,113 @@ void kmem_cache_free(struct kmem_cache *s, void *x) } EXPORT_SYMBOL(kmem_cache_free); +/* Note that interrupts must be enabled when calling this function. */ +void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) +{ + struct kmem_cache_cpu *c; + struct page *page; + int i; + + local_irq_disable(); + c = this_cpu_ptr(s->cpu_slab); + + for (i = 0; i < size; i++) { + void *object = p[i]; + + BUG_ON(!object); + /* kmem cache debug support */ + s = cache_from_obj(s, object); + if (unlikely(!s)) + goto exit; + slab_free_hook(s, object); + + page = virt_to_head_page(object); + + if (c->page == page) { + /* Fastpath: local CPU free */ + set_freepointer(s, object, c->freelist); + c->freelist = object; + } else { + c->tid = next_tid(c->tid); + local_irq_enable(); + /* Slowpath: overhead locked cmpxchg_double_slab */ + __slab_free(s, page, object, _RET_IP_); + local_irq_disable(); + c = this_cpu_ptr(s->cpu_slab); + } + } +exit: + c->tid = next_tid(c->tid); + local_irq_enable(); +} +EXPORT_SYMBOL(kmem_cache_free_bulk); + +/* Note that interrupts must be enabled when calling this function. */ +bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, + void **p) +{ + struct kmem_cache_cpu *c; + int i; + + /* + * Drain objects in the per cpu slab, while disabling local + * IRQs, which protects against PREEMPT and interrupts + * handlers invoking normal fastpath. + */ + local_irq_disable(); + c = this_cpu_ptr(s->cpu_slab); + + for (i = 0; i < size; i++) { + void *object = c->freelist; + + if (unlikely(!object)) { + local_irq_enable(); + /* + * Invoking slow path likely have side-effect + * of re-populating per CPU c->freelist + */ + p[i] = __slab_alloc(s, flags, NUMA_NO_NODE, + _RET_IP_, c); + if (unlikely(!p[i])) { + __kmem_cache_free_bulk(s, i, p); + return false; + } + local_irq_disable(); + c = this_cpu_ptr(s->cpu_slab); + continue; /* goto for-loop */ + } + + /* kmem_cache debug support */ + s = slab_pre_alloc_hook(s, flags); + if (unlikely(!s)) { + __kmem_cache_free_bulk(s, i, p); + c->tid = next_tid(c->tid); + local_irq_enable(); + return false; + } + + c->freelist = get_freepointer(s, object); + p[i] = object; + + /* kmem_cache debug support */ + slab_post_alloc_hook(s, flags, object); + } + c->tid = next_tid(c->tid); + local_irq_enable(); + + /* Clear memory outside IRQ disabled fastpath loop */ + if (unlikely(flags & __GFP_ZERO)) { + int j; + + for (j = 0; j < i; j++) + memset(p[j], 0, s->object_size); + } + + return true; +} +EXPORT_SYMBOL(kmem_cache_alloc_bulk); + + /* * Object placement in a slab is made very easy because we always start at * offset 0. If we tune the size of the object to the alignment then we can @@ -5181,7 +5285,7 @@ static int sysfs_slab_add(struct kmem_cache *s) s->kobj.kset = cache_kset(s); err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name); if (err) - goto out_put_kobj; + goto out; err = sysfs_create_group(&s->kobj, &slab_attr_group); if (err) @@ -5208,8 +5312,6 @@ out: return err; out_del_kobj: kobject_del(&s->kobj); -out_put_kobj: - kobject_put(&s->kobj); goto out; } @@ -32,6 +32,7 @@ #include <linux/gfp.h> #include <linux/uio.h> #include <linux/hugetlb.h> +#include <linux/page_idle.h> #include "internal.h" @@ -622,6 +623,8 @@ void mark_page_accessed(struct page *page) } else if (!PageReferenced(page)) { SetPageReferenced(page); } + if (page_is_idle(page)) + clear_page_idle(page); } EXPORT_SYMBOL(mark_page_accessed); diff --git a/mm/swap_state.c b/mm/swap_state.c index 8bc8e6613..d504adb7f 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -288,17 +288,14 @@ struct page * lookup_swap_cache(swp_entry_t entry) return page; } -/* - * Locate a page of swap in physical memory, reserving swap cache space - * and reading the disk if it is not already cached. - * A failure return means that either the page allocation failed or that - * the swap entry is no longer in use. - */ -struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, - struct vm_area_struct *vma, unsigned long addr) +struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, + struct vm_area_struct *vma, unsigned long addr, + bool *new_page_allocated) { struct page *found_page, *new_page = NULL; + struct address_space *swapper_space = swap_address_space(entry); int err; + *new_page_allocated = false; do { /* @@ -306,8 +303,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, * called after lookup_swap_cache() failed, re-calling * that would confuse statistics. */ - found_page = find_get_page(swap_address_space(entry), - entry.val); + found_page = find_get_page(swapper_space, entry.val); if (found_page) break; @@ -366,7 +362,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, * Initiate read into locked page and return. */ lru_cache_add_anon(new_page); - swap_readpage(new_page); + *new_page_allocated = true; return new_page; } radix_tree_preload_end(); @@ -384,6 +380,25 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, return found_page; } +/* + * Locate a page of swap in physical memory, reserving swap cache space + * and reading the disk if it is not already cached. + * A failure return means that either the page allocation failed or that + * the swap entry is no longer in use. + */ +struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, + struct vm_area_struct *vma, unsigned long addr) +{ + bool page_was_allocated; + struct page *retpage = __read_swap_cache_async(entry, gfp_mask, + vma, addr, &page_was_allocated); + + if (page_was_allocated) + swap_readpage(retpage); + + return retpage; +} + static unsigned long swapin_nr_pages(unsigned long offset) { static unsigned long prev_offset; diff --git a/mm/swapfile.c b/mm/swapfile.c index 3ce3f2978..6c67b4265 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -929,6 +929,48 @@ int page_swapcount(struct page *page) } /* + * How many references to @entry are currently swapped out? + * This considers COUNT_CONTINUED so it returns exact answer. + */ +int swp_swapcount(swp_entry_t entry) +{ + int count, tmp_count, n; + struct swap_info_struct *p; + struct page *page; + pgoff_t offset; + unsigned char *map; + + p = swap_info_get(entry); + if (!p) + return 0; + + count = swap_count(p->swap_map[swp_offset(entry)]); + if (!(count & COUNT_CONTINUED)) + goto out; + + count &= ~COUNT_CONTINUED; + n = SWAP_MAP_MAX + 1; + + offset = swp_offset(entry); + page = vmalloc_to_page(p->swap_map + offset); + offset &= ~PAGE_MASK; + VM_BUG_ON(page_private(page) != SWP_CONTINUED); + + do { + page = list_entry(page->lru.next, struct page, lru); + map = kmap_atomic(page); + tmp_count = map[offset]; + kunmap_atomic(map); + + count += (tmp_count & ~COUNT_CONTINUED) * n; + n *= (SWAP_CONT_MAX + 1); + } while (tmp_count & COUNT_CONTINUED); +out: + spin_unlock(&p->lock); + return count; +} + +/* * We can write to an anon page without COW if there are no other references * to it. And as a side-effect, free up its swap: because the old content * on disk will never be read, and seeking back there to write new content @@ -2197,11 +2239,10 @@ static int claim_swapfile(struct swap_info_struct *p, struct inode *inode) if (S_ISBLK(inode->i_mode)) { p->bdev = bdgrab(I_BDEV(inode)); error = blkdev_get(p->bdev, - FMODE_READ | FMODE_WRITE | FMODE_EXCL, - sys_swapon); + FMODE_READ | FMODE_WRITE | FMODE_EXCL, p); if (error < 0) { p->bdev = NULL; - return -EINVAL; + return error; } p->old_block_size = block_size(p->bdev); error = set_blocksize(p->bdev, PAGE_SIZE); @@ -2402,7 +2443,6 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) struct filename *name; struct file *swap_file = NULL; struct address_space *mapping; - int i; int prio; int error; union swap_header *swap_header; @@ -2442,19 +2482,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) p->swap_file = swap_file; mapping = swap_file->f_mapping; - - for (i = 0; i < nr_swapfiles; i++) { - struct swap_info_struct *q = swap_info[i]; - - if (q == p || !q->swap_file) - continue; - if (mapping == q->swap_file->f_mapping) { - error = -EBUSY; - goto bad_swap; - } - } - inode = mapping->host; + /* If S_ISREG(inode->i_mode) will do mutex_lock(&inode->i_mutex); */ error = claim_swapfile(p, inode); if (unlikely(error)) @@ -2487,6 +2516,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) goto bad_swap; } if (p->bdev && blk_queue_nonrot(bdev_get_queue(p->bdev))) { + int cpu; + p->flags |= SWP_SOLIDSTATE; /* * select a random position to start with to help wear leveling @@ -2505,9 +2536,9 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) error = -ENOMEM; goto bad_swap; } - for_each_possible_cpu(i) { + for_each_possible_cpu(cpu) { struct percpu_cluster *cluster; - cluster = per_cpu_ptr(p->percpu_cluster, i); + cluster = per_cpu_ptr(p->percpu_cluster, cpu); cluster_set_null(&cluster->index); } } diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c new file mode 100644 index 000000000..77fee9325 --- /dev/null +++ b/mm/userfaultfd.c @@ -0,0 +1,308 @@ +/* + * mm/userfaultfd.c + * + * Copyright (C) 2015 Red Hat, Inc. + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +#include <linux/mm.h> +#include <linux/pagemap.h> +#include <linux/rmap.h> +#include <linux/swap.h> +#include <linux/swapops.h> +#include <linux/userfaultfd_k.h> +#include <linux/mmu_notifier.h> +#include <asm/tlbflush.h> +#include "internal.h" + +static int mcopy_atomic_pte(struct mm_struct *dst_mm, + pmd_t *dst_pmd, + struct vm_area_struct *dst_vma, + unsigned long dst_addr, + unsigned long src_addr, + struct page **pagep) +{ + struct mem_cgroup *memcg; + pte_t _dst_pte, *dst_pte; + spinlock_t *ptl; + void *page_kaddr; + int ret; + struct page *page; + + if (!*pagep) { + ret = -ENOMEM; + page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, dst_vma, dst_addr); + if (!page) + goto out; + + page_kaddr = kmap_atomic(page); + ret = copy_from_user(page_kaddr, + (const void __user *) src_addr, + PAGE_SIZE); + kunmap_atomic(page_kaddr); + + /* fallback to copy_from_user outside mmap_sem */ + if (unlikely(ret)) { + ret = -EFAULT; + *pagep = page; + /* don't free the page */ + goto out; + } + } else { + page = *pagep; + *pagep = NULL; + } + + /* + * The memory barrier inside __SetPageUptodate makes sure that + * preceeding stores to the page contents become visible before + * the set_pte_at() write. + */ + __SetPageUptodate(page); + + ret = -ENOMEM; + if (mem_cgroup_try_charge(page, dst_mm, GFP_KERNEL, &memcg)) + goto out_release; + + _dst_pte = mk_pte(page, dst_vma->vm_page_prot); + if (dst_vma->vm_flags & VM_WRITE) + _dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte)); + + ret = -EEXIST; + dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); + if (!pte_none(*dst_pte)) + goto out_release_uncharge_unlock; + + inc_mm_counter(dst_mm, MM_ANONPAGES); + page_add_new_anon_rmap(page, dst_vma, dst_addr); + mem_cgroup_commit_charge(page, memcg, false); + lru_cache_add_active_or_unevictable(page, dst_vma); + + set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); + + /* No need to invalidate - it was non-present before */ + update_mmu_cache(dst_vma, dst_addr, dst_pte); + + pte_unmap_unlock(dst_pte, ptl); + ret = 0; +out: + return ret; +out_release_uncharge_unlock: + pte_unmap_unlock(dst_pte, ptl); + mem_cgroup_cancel_charge(page, memcg); +out_release: + page_cache_release(page); + goto out; +} + +static int mfill_zeropage_pte(struct mm_struct *dst_mm, + pmd_t *dst_pmd, + struct vm_area_struct *dst_vma, + unsigned long dst_addr) +{ + pte_t _dst_pte, *dst_pte; + spinlock_t *ptl; + int ret; + + _dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr), + dst_vma->vm_page_prot)); + ret = -EEXIST; + dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); + if (!pte_none(*dst_pte)) + goto out_unlock; + set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); + /* No need to invalidate - it was non-present before */ + update_mmu_cache(dst_vma, dst_addr, dst_pte); + ret = 0; +out_unlock: + pte_unmap_unlock(dst_pte, ptl); + return ret; +} + +static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address) +{ + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd = NULL; + + pgd = pgd_offset(mm, address); + pud = pud_alloc(mm, pgd, address); + if (pud) + /* + * Note that we didn't run this because the pmd was + * missing, the *pmd may be already established and in + * turn it may also be a trans_huge_pmd. + */ + pmd = pmd_alloc(mm, pud, address); + return pmd; +} + +static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm, + unsigned long dst_start, + unsigned long src_start, + unsigned long len, + bool zeropage) +{ + struct vm_area_struct *dst_vma; + ssize_t err; + pmd_t *dst_pmd; + unsigned long src_addr, dst_addr; + long copied; + struct page *page; + + /* + * Sanitize the command parameters: + */ + BUG_ON(dst_start & ~PAGE_MASK); + BUG_ON(len & ~PAGE_MASK); + + /* Does the address range wrap, or is the span zero-sized? */ + BUG_ON(src_start + len <= src_start); + BUG_ON(dst_start + len <= dst_start); + + src_addr = src_start; + dst_addr = dst_start; + copied = 0; + page = NULL; +retry: + down_read(&dst_mm->mmap_sem); + + /* + * Make sure the vma is not shared, that the dst range is + * both valid and fully within a single existing vma. + */ + err = -EINVAL; + dst_vma = find_vma(dst_mm, dst_start); + if (!dst_vma || (dst_vma->vm_flags & VM_SHARED)) + goto out_unlock; + if (dst_start < dst_vma->vm_start || + dst_start + len > dst_vma->vm_end) + goto out_unlock; + + /* + * Be strict and only allow __mcopy_atomic on userfaultfd + * registered ranges to prevent userland errors going + * unnoticed. As far as the VM consistency is concerned, it + * would be perfectly safe to remove this check, but there's + * no useful usage for __mcopy_atomic ouside of userfaultfd + * registered ranges. This is after all why these are ioctls + * belonging to the userfaultfd and not syscalls. + */ + if (!dst_vma->vm_userfaultfd_ctx.ctx) + goto out_unlock; + + /* + * FIXME: only allow copying on anonymous vmas, tmpfs should + * be added. + */ + if (dst_vma->vm_ops) + goto out_unlock; + + /* + * Ensure the dst_vma has a anon_vma or this page + * would get a NULL anon_vma when moved in the + * dst_vma. + */ + err = -ENOMEM; + if (unlikely(anon_vma_prepare(dst_vma))) + goto out_unlock; + + while (src_addr < src_start + len) { + pmd_t dst_pmdval; + + BUG_ON(dst_addr >= dst_start + len); + + dst_pmd = mm_alloc_pmd(dst_mm, dst_addr); + if (unlikely(!dst_pmd)) { + err = -ENOMEM; + break; + } + + dst_pmdval = pmd_read_atomic(dst_pmd); + /* + * If the dst_pmd is mapped as THP don't + * override it and just be strict. + */ + if (unlikely(pmd_trans_huge(dst_pmdval))) { + err = -EEXIST; + break; + } + if (unlikely(pmd_none(dst_pmdval)) && + unlikely(__pte_alloc(dst_mm, dst_vma, dst_pmd, + dst_addr))) { + err = -ENOMEM; + break; + } + /* If an huge pmd materialized from under us fail */ + if (unlikely(pmd_trans_huge(*dst_pmd))) { + err = -EFAULT; + break; + } + + BUG_ON(pmd_none(*dst_pmd)); + BUG_ON(pmd_trans_huge(*dst_pmd)); + + if (!zeropage) + err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma, + dst_addr, src_addr, &page); + else + err = mfill_zeropage_pte(dst_mm, dst_pmd, dst_vma, + dst_addr); + + cond_resched(); + + if (unlikely(err == -EFAULT)) { + void *page_kaddr; + + up_read(&dst_mm->mmap_sem); + BUG_ON(!page); + + page_kaddr = kmap(page); + err = copy_from_user(page_kaddr, + (const void __user *) src_addr, + PAGE_SIZE); + kunmap(page); + if (unlikely(err)) { + err = -EFAULT; + goto out; + } + goto retry; + } else + BUG_ON(page); + + if (!err) { + dst_addr += PAGE_SIZE; + src_addr += PAGE_SIZE; + copied += PAGE_SIZE; + + if (fatal_signal_pending(current)) + err = -EINTR; + } + if (err) + break; + } + +out_unlock: + up_read(&dst_mm->mmap_sem); +out: + if (page) + page_cache_release(page); + BUG_ON(copied < 0); + BUG_ON(err > 0); + BUG_ON(!copied && !err); + return copied ? copied : err; +} + +ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, + unsigned long src_start, unsigned long len) +{ + return __mcopy_atomic(dst_mm, dst_start, src_start, len, false); +} + +ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start, + unsigned long len) +{ + return __mcopy_atomic(dst_mm, start, 0, len, true); +} diff --git a/mm/vmscan.c b/mm/vmscan.c index bf542189d..4e05caed1 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -175,7 +175,7 @@ static bool sane_reclaim(struct scan_control *sc) if (!memcg) return true; #ifdef CONFIG_CGROUP_WRITEBACK - if (cgroup_on_dfl(mem_cgroup_css(memcg)->cgroup)) + if (cgroup_on_dfl(memcg->css.cgroup)) return true; #endif return false; @@ -985,7 +985,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, * __GFP_IO|__GFP_FS for this reason); but more thought * would probably show more reasons. * - * 3) Legacy memcg encounters a page that is not already marked + * 3) Legacy memcg encounters a page that is already marked * PageReclaim. memcg does not have any dirty pages * throttling so we could easily OOM just because too many * pages are in writeback and there is nothing else to @@ -1015,12 +1015,15 @@ static unsigned long shrink_page_list(struct list_head *page_list, */ SetPageReclaim(page); nr_writeback++; - goto keep_locked; /* Case 3 above */ } else { + unlock_page(page); wait_on_page_writeback(page); + /* then go back and try same page again */ + list_add_tail(&page->lru, page_list); + continue; } } @@ -1057,7 +1060,8 @@ static unsigned long shrink_page_list(struct list_head *page_list, * processes. Try to unmap it here. */ if (page_mapped(page) && mapping) { - switch (try_to_unmap(page, ttu_flags)) { + switch (try_to_unmap(page, + ttu_flags|TTU_BATCH_FLUSH)) { case SWAP_FAIL: goto activate_locked; case SWAP_AGAIN: @@ -1097,7 +1101,12 @@ static unsigned long shrink_page_list(struct list_head *page_list, if (!sc->may_writepage) goto keep_locked; - /* Page is dirty, try to write it out here */ + /* + * Page is dirty. Flush the TLB if a writable entry + * potentially exists to avoid CPU writes after IO + * starts and then write it out here. + */ + try_to_unmap_flush_dirty(); switch (pageout(page, mapping, sc)) { case PAGE_KEEP: goto keep_locked; @@ -1208,6 +1217,7 @@ keep: } mem_cgroup_uncharge_list(&free_pages); + try_to_unmap_flush(); free_hot_cold_page_list(&free_pages, true); list_splice(&ret_pages, page_list); @@ -1352,7 +1362,8 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, unsigned long nr_taken = 0; unsigned long scan; - for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) { + for (scan = 0; scan < nr_to_scan && nr_taken < nr_to_scan && + !list_empty(src); scan++) { struct page *page; int nr_pages; @@ -2151,6 +2162,23 @@ out: } } +#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH +static void init_tlb_ubc(void) +{ + /* + * This deliberately does not clear the cpumask as it's expensive + * and unnecessary. If there happens to be data in there then the + * first SWAP_CLUSTER_MAX pages will send an unnecessary IPI and + * then will be cleared. + */ + current->tlb_ubc.flush_required = false; +} +#else +static inline void init_tlb_ubc(void) +{ +} +#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ + /* * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. */ @@ -2185,6 +2213,8 @@ static void shrink_lruvec(struct lruvec *lruvec, int swappiness, scan_adjusted = (global_reclaim(sc) && !current_is_kswapd() && sc->priority == DEF_PRIORITY); + init_tlb_ubc(); + blk_start_plug(&plug); while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || nr[LRU_INACTIVE_FILE]) { diff --git a/mm/vmstat.c b/mm/vmstat.c index 095cc4bc3..8881db3ec 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1366,15 +1366,16 @@ static cpumask_var_t cpu_stat_off; static void vmstat_update(struct work_struct *w) { - if (refresh_cpu_vm_stats()) + if (refresh_cpu_vm_stats()) { /* * Counters were updated so we expect more updates * to occur in the future. Keep on running the * update worker thread. */ - schedule_delayed_work(this_cpu_ptr(&vmstat_work), + schedule_delayed_work_on(smp_processor_id(), + this_cpu_ptr(&vmstat_work), round_jiffies_relative(sysctl_stat_interval)); - else { + } else { /* * We did not update any counters so the app may be in * a mode where it does not cause counter updates. @@ -96,10 +96,10 @@ struct zbud_pool { struct list_head buddied; struct list_head lru; u64 pages_nr; - struct zbud_ops *ops; + const struct zbud_ops *ops; #ifdef CONFIG_ZPOOL struct zpool *zpool; - struct zpool_ops *zpool_ops; + const struct zpool_ops *zpool_ops; #endif }; @@ -133,12 +133,12 @@ static int zbud_zpool_evict(struct zbud_pool *pool, unsigned long handle) return -ENOENT; } -static struct zbud_ops zbud_zpool_ops = { +static const struct zbud_ops zbud_zpool_ops = { .evict = zbud_zpool_evict }; static void *zbud_zpool_create(char *name, gfp_t gfp, - struct zpool_ops *zpool_ops, + const struct zpool_ops *zpool_ops, struct zpool *zpool) { struct zbud_pool *pool; @@ -302,7 +302,7 @@ static int num_free_chunks(struct zbud_header *zhdr) * Return: pointer to the new zbud pool or NULL if the metadata allocation * failed. */ -struct zbud_pool *zbud_create_pool(gfp_t gfp, struct zbud_ops *ops) +struct zbud_pool *zbud_create_pool(gfp_t gfp, const struct zbud_ops *ops) { struct zbud_pool *pool; int i; diff --git a/mm/zpool.c b/mm/zpool.c index 722a4f60e..8f670d3e8 100644 --- a/mm/zpool.c +++ b/mm/zpool.c @@ -22,7 +22,7 @@ struct zpool { struct zpool_driver *driver; void *pool; - struct zpool_ops *ops; + const struct zpool_ops *ops; struct list_head list; }; @@ -100,6 +100,39 @@ static void zpool_put_driver(struct zpool_driver *driver) } /** + * zpool_has_pool() - Check if the pool driver is available + * @type The type of the zpool to check (e.g. zbud, zsmalloc) + * + * This checks if the @type pool driver is available. This will try to load + * the requested module, if needed, but there is no guarantee the module will + * still be loaded and available immediately after calling. If this returns + * true, the caller should assume the pool is available, but must be prepared + * to handle the @zpool_create_pool() returning failure. However if this + * returns false, the caller should assume the requested pool type is not + * available; either the requested pool type module does not exist, or could + * not be loaded, and calling @zpool_create_pool() with the pool type will + * fail. + * + * Returns: true if @type pool is available, false if not + */ +bool zpool_has_pool(char *type) +{ + struct zpool_driver *driver = zpool_get_driver(type); + + if (!driver) { + request_module("zpool-%s", type); + driver = zpool_get_driver(type); + } + + if (!driver) + return false; + + zpool_put_driver(driver); + return true; +} +EXPORT_SYMBOL(zpool_has_pool); + +/** * zpool_create_pool() - Create a new zpool * @type The type of the zpool to create (e.g. zbud, zsmalloc) * @name The name of the zpool (e.g. zram0, zswap) @@ -115,7 +148,7 @@ static void zpool_put_driver(struct zpool_driver *driver) * Returns: New zpool on success, NULL on failure. */ struct zpool *zpool_create_pool(char *type, char *name, gfp_t gfp, - struct zpool_ops *ops) + const struct zpool_ops *ops) { struct zpool_driver *driver; struct zpool *zpool; @@ -320,20 +353,6 @@ u64 zpool_get_total_size(struct zpool *zpool) return zpool->driver->total_size(zpool->pool); } -static int __init init_zpool(void) -{ - pr_info("loaded\n"); - return 0; -} - -static void __exit exit_zpool(void) -{ - pr_info("unloaded\n"); -} - -module_init(init_zpool); -module_exit(exit_zpool); - MODULE_LICENSE("GPL"); MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>"); MODULE_DESCRIPTION("Common API for compressed memory storage"); diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 0a7f81aa2..f135b1b6f 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -169,14 +169,12 @@ enum zs_stat_type { NR_ZS_STAT_TYPE, }; -#ifdef CONFIG_ZSMALLOC_STAT - -static struct dentry *zs_stat_root; - struct zs_size_stat { unsigned long objs[NR_ZS_STAT_TYPE]; }; +#ifdef CONFIG_ZSMALLOC_STAT +static struct dentry *zs_stat_root; #endif /* @@ -201,6 +199,8 @@ static int zs_size_classes; static const int fullness_threshold_frac = 4; struct size_class { + spinlock_t lock; + struct page *fullness_list[_ZS_NR_FULLNESS_GROUPS]; /* * Size of objects stored in this class. Must be multiple * of ZS_ALIGN. @@ -210,16 +210,10 @@ struct size_class { /* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */ int pages_per_zspage; - /* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */ - bool huge; - -#ifdef CONFIG_ZSMALLOC_STAT struct zs_size_stat stats; -#endif - - spinlock_t lock; - struct page *fullness_list[_ZS_NR_FULLNESS_GROUPS]; + /* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */ + bool huge; }; /* @@ -251,6 +245,15 @@ struct zs_pool { gfp_t flags; /* allocation flags used when growing pool */ atomic_long_t pages_allocated; + struct zs_pool_stats stats; + + /* Compact classes */ + struct shrinker shrinker; + /* + * To signify that register_shrinker() was successful + * and unregister_shrinker() will not Oops. + */ + bool shrinker_enabled; #ifdef CONFIG_ZSMALLOC_STAT struct dentry *stat_dentry; #endif @@ -285,8 +288,7 @@ static int create_handle_cache(struct zs_pool *pool) static void destroy_handle_cache(struct zs_pool *pool) { - if (pool->handle_cachep) - kmem_cache_destroy(pool->handle_cachep); + kmem_cache_destroy(pool->handle_cachep); } static unsigned long alloc_handle(struct zs_pool *pool) @@ -309,7 +311,8 @@ static void record_obj(unsigned long handle, unsigned long obj) #ifdef CONFIG_ZPOOL -static void *zs_zpool_create(char *name, gfp_t gfp, struct zpool_ops *zpool_ops, +static void *zs_zpool_create(char *name, gfp_t gfp, + const struct zpool_ops *zpool_ops, struct zpool *zpool) { return zs_create_pool(name, gfp); @@ -441,8 +444,6 @@ static int get_size_class_index(int size) return min(zs_size_classes - 1, idx); } -#ifdef CONFIG_ZSMALLOC_STAT - static inline void zs_stat_inc(struct size_class *class, enum zs_stat_type type, unsigned long cnt) { @@ -461,6 +462,8 @@ static inline unsigned long zs_stat_get(struct size_class *class, return class->stats.objs[type]; } +#ifdef CONFIG_ZSMALLOC_STAT + static int __init zs_stat_init(void) { if (!debugfs_initialized()) @@ -576,23 +579,6 @@ static void zs_pool_stat_destroy(struct zs_pool *pool) } #else /* CONFIG_ZSMALLOC_STAT */ - -static inline void zs_stat_inc(struct size_class *class, - enum zs_stat_type type, unsigned long cnt) -{ -} - -static inline void zs_stat_dec(struct size_class *class, - enum zs_stat_type type, unsigned long cnt) -{ -} - -static inline unsigned long zs_stat_get(struct size_class *class, - enum zs_stat_type type) -{ - return 0; -} - static int __init zs_stat_init(void) { return 0; @@ -610,7 +596,6 @@ static inline int zs_pool_stat_create(char *name, struct zs_pool *pool) static inline void zs_pool_stat_destroy(struct zs_pool *pool) { } - #endif @@ -658,13 +643,22 @@ static void insert_zspage(struct page *page, struct size_class *class, if (fullness >= _ZS_NR_FULLNESS_GROUPS) return; - head = &class->fullness_list[fullness]; - if (*head) - list_add_tail(&page->lru, &(*head)->lru); - - *head = page; zs_stat_inc(class, fullness == ZS_ALMOST_EMPTY ? CLASS_ALMOST_EMPTY : CLASS_ALMOST_FULL, 1); + + head = &class->fullness_list[fullness]; + if (!*head) { + *head = page; + return; + } + + /* + * We want to see more ZS_FULL pages and less almost + * empty/full. Put pages with higher ->inuse first. + */ + list_add_tail(&page->lru, &(*head)->lru); + if (page->inuse >= (*head)->inuse) + *head = page; } /* @@ -1495,7 +1489,7 @@ void zs_free(struct zs_pool *pool, unsigned long handle) } EXPORT_SYMBOL_GPL(zs_free); -static void zs_object_copy(unsigned long src, unsigned long dst, +static void zs_object_copy(unsigned long dst, unsigned long src, struct size_class *class) { struct page *s_page, *d_page; @@ -1602,8 +1596,6 @@ struct zs_compact_control { /* Starting object index within @s_page which used for live object * in the subpage. */ int index; - /* how many of objects are migrated */ - int nr_migrated; }; static int migrate_zspage(struct zs_pool *pool, struct size_class *class, @@ -1614,7 +1606,6 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class, struct page *s_page = cc->s_page; struct page *d_page = cc->d_page; unsigned long index = cc->index; - int nr_migrated = 0; int ret = 0; while (1) { @@ -1636,23 +1627,21 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class, used_obj = handle_to_obj(handle); free_obj = obj_malloc(d_page, class, handle); - zs_object_copy(used_obj, free_obj, class); + zs_object_copy(free_obj, used_obj, class); index++; record_obj(handle, free_obj); unpin_tag(handle); obj_free(pool, class, used_obj); - nr_migrated++; } /* Remember last position in this iteration */ cc->s_page = s_page; cc->index = index; - cc->nr_migrated = nr_migrated; return ret; } -static struct page *alloc_target_page(struct size_class *class) +static struct page *isolate_target_page(struct size_class *class) { int i; struct page *page; @@ -1668,8 +1657,17 @@ static struct page *alloc_target_page(struct size_class *class) return page; } -static void putback_zspage(struct zs_pool *pool, struct size_class *class, - struct page *first_page) +/* + * putback_zspage - add @first_page into right class's fullness list + * @pool: target pool + * @class: destination class + * @first_page: target page + * + * Return @fist_page's fullness_group + */ +static enum fullness_group putback_zspage(struct zs_pool *pool, + struct size_class *class, + struct page *first_page) { enum fullness_group fullness; @@ -1687,50 +1685,72 @@ static void putback_zspage(struct zs_pool *pool, struct size_class *class, free_zspage(first_page); } + + return fullness; } static struct page *isolate_source_page(struct size_class *class) { - struct page *page; + int i; + struct page *page = NULL; - page = class->fullness_list[ZS_ALMOST_EMPTY]; - if (page) - remove_zspage(page, class, ZS_ALMOST_EMPTY); + for (i = ZS_ALMOST_EMPTY; i >= ZS_ALMOST_FULL; i--) { + page = class->fullness_list[i]; + if (!page) + continue; + + remove_zspage(page, class, i); + break; + } return page; } -static unsigned long __zs_compact(struct zs_pool *pool, - struct size_class *class) +/* + * + * Based on the number of unused allocated objects calculate + * and return the number of pages that we can free. + */ +static unsigned long zs_can_compact(struct size_class *class) +{ + unsigned long obj_wasted; + + obj_wasted = zs_stat_get(class, OBJ_ALLOCATED) - + zs_stat_get(class, OBJ_USED); + + obj_wasted /= get_maxobj_per_zspage(class->size, + class->pages_per_zspage); + + return obj_wasted * class->pages_per_zspage; +} + +static void __zs_compact(struct zs_pool *pool, struct size_class *class) { - int nr_to_migrate; struct zs_compact_control cc; struct page *src_page; struct page *dst_page = NULL; - unsigned long nr_total_migrated = 0; spin_lock(&class->lock); while ((src_page = isolate_source_page(class))) { BUG_ON(!is_first_page(src_page)); - /* The goal is to migrate all live objects in source page */ - nr_to_migrate = src_page->inuse; + if (!zs_can_compact(class)) + break; + cc.index = 0; cc.s_page = src_page; - while ((dst_page = alloc_target_page(class))) { + while ((dst_page = isolate_target_page(class))) { cc.d_page = dst_page; /* - * If there is no more space in dst_page, try to - * allocate another zspage. + * If there is no more space in dst_page, resched + * and see if anyone had allocated another zspage. */ if (!migrate_zspage(pool, class, &cc)) break; putback_zspage(pool, class, dst_page); - nr_total_migrated += cc.nr_migrated; - nr_to_migrate -= cc.nr_migrated; } /* Stop if we couldn't find slot */ @@ -1738,9 +1758,9 @@ static unsigned long __zs_compact(struct zs_pool *pool, break; putback_zspage(pool, class, dst_page); - putback_zspage(pool, class, src_page); + if (putback_zspage(pool, class, src_page) == ZS_EMPTY) + pool->stats.pages_compacted += class->pages_per_zspage; spin_unlock(&class->lock); - nr_total_migrated += cc.nr_migrated; cond_resched(); spin_lock(&class->lock); } @@ -1749,14 +1769,11 @@ static unsigned long __zs_compact(struct zs_pool *pool, putback_zspage(pool, class, src_page); spin_unlock(&class->lock); - - return nr_total_migrated; } unsigned long zs_compact(struct zs_pool *pool) { int i; - unsigned long nr_migrated = 0; struct size_class *class; for (i = zs_size_classes - 1; i >= 0; i--) { @@ -1765,13 +1782,80 @@ unsigned long zs_compact(struct zs_pool *pool) continue; if (class->index != i) continue; - nr_migrated += __zs_compact(pool, class); + __zs_compact(pool, class); } - return nr_migrated; + return pool->stats.pages_compacted; } EXPORT_SYMBOL_GPL(zs_compact); +void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats) +{ + memcpy(stats, &pool->stats, sizeof(struct zs_pool_stats)); +} +EXPORT_SYMBOL_GPL(zs_pool_stats); + +static unsigned long zs_shrinker_scan(struct shrinker *shrinker, + struct shrink_control *sc) +{ + unsigned long pages_freed; + struct zs_pool *pool = container_of(shrinker, struct zs_pool, + shrinker); + + pages_freed = pool->stats.pages_compacted; + /* + * Compact classes and calculate compaction delta. + * Can run concurrently with a manually triggered + * (by user) compaction. + */ + pages_freed = zs_compact(pool) - pages_freed; + + return pages_freed ? pages_freed : SHRINK_STOP; +} + +static unsigned long zs_shrinker_count(struct shrinker *shrinker, + struct shrink_control *sc) +{ + int i; + struct size_class *class; + unsigned long pages_to_free = 0; + struct zs_pool *pool = container_of(shrinker, struct zs_pool, + shrinker); + + if (!pool->shrinker_enabled) + return 0; + + for (i = zs_size_classes - 1; i >= 0; i--) { + class = pool->size_class[i]; + if (!class) + continue; + if (class->index != i) + continue; + + pages_to_free += zs_can_compact(class); + } + + return pages_to_free; +} + +static void zs_unregister_shrinker(struct zs_pool *pool) +{ + if (pool->shrinker_enabled) { + unregister_shrinker(&pool->shrinker); + pool->shrinker_enabled = false; + } +} + +static int zs_register_shrinker(struct zs_pool *pool) +{ + pool->shrinker.scan_objects = zs_shrinker_scan; + pool->shrinker.count_objects = zs_shrinker_count; + pool->shrinker.batch = 0; + pool->shrinker.seeks = DEFAULT_SEEKS; + + return register_shrinker(&pool->shrinker); +} + /** * zs_create_pool - Creates an allocation pool to work from. * @flags: allocation flags used to allocate pool metadata @@ -1857,6 +1941,12 @@ struct zs_pool *zs_create_pool(char *name, gfp_t flags) if (zs_pool_stat_create(name, pool)) goto err; + /* + * Not critical, we still can use the pool + * and user can trigger compaction manually. + */ + if (zs_register_shrinker(pool) == 0) + pool->shrinker_enabled = true; return pool; err: @@ -1869,6 +1959,7 @@ void zs_destroy_pool(struct zs_pool *pool) { int i; + zs_unregister_shrinker(pool); zs_pool_stat_destroy(pool); for (i = 0; i < zs_size_classes; i++) { diff --git a/mm/zswap.c b/mm/zswap.c index 2d5727bae..4043df7c6 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -80,85 +80,54 @@ static u64 zswap_duplicate_entry; static bool zswap_enabled; module_param_named(enabled, zswap_enabled, bool, 0644); -/* Compressor to be used by zswap (fixed at boot for now) */ +/* Crypto compressor to use */ #define ZSWAP_COMPRESSOR_DEFAULT "lzo" -static char *zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT; -module_param_named(compressor, zswap_compressor, charp, 0444); - -/* The maximum percentage of memory that the compressed pool can occupy */ -static unsigned int zswap_max_pool_percent = 20; -module_param_named(max_pool_percent, - zswap_max_pool_percent, uint, 0644); +static char zswap_compressor[CRYPTO_MAX_ALG_NAME] = ZSWAP_COMPRESSOR_DEFAULT; +static struct kparam_string zswap_compressor_kparam = { + .string = zswap_compressor, + .maxlen = sizeof(zswap_compressor), +}; +static int zswap_compressor_param_set(const char *, + const struct kernel_param *); +static struct kernel_param_ops zswap_compressor_param_ops = { + .set = zswap_compressor_param_set, + .get = param_get_string, +}; +module_param_cb(compressor, &zswap_compressor_param_ops, + &zswap_compressor_kparam, 0644); -/* Compressed storage to use */ +/* Compressed storage zpool to use */ #define ZSWAP_ZPOOL_DEFAULT "zbud" -static char *zswap_zpool_type = ZSWAP_ZPOOL_DEFAULT; -module_param_named(zpool, zswap_zpool_type, charp, 0444); +static char zswap_zpool_type[32 /* arbitrary */] = ZSWAP_ZPOOL_DEFAULT; +static struct kparam_string zswap_zpool_kparam = { + .string = zswap_zpool_type, + .maxlen = sizeof(zswap_zpool_type), +}; +static int zswap_zpool_param_set(const char *, const struct kernel_param *); +static struct kernel_param_ops zswap_zpool_param_ops = { + .set = zswap_zpool_param_set, + .get = param_get_string, +}; +module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_kparam, 0644); -/* zpool is shared by all of zswap backend */ -static struct zpool *zswap_pool; +/* The maximum percentage of memory that the compressed pool can occupy */ +static unsigned int zswap_max_pool_percent = 20; +module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644); /********************************* -* compression functions +* data structures **********************************/ -/* per-cpu compression transforms */ -static struct crypto_comp * __percpu *zswap_comp_pcpu_tfms; -enum comp_op { - ZSWAP_COMPOP_COMPRESS, - ZSWAP_COMPOP_DECOMPRESS +struct zswap_pool { + struct zpool *zpool; + struct crypto_comp * __percpu *tfm; + struct kref kref; + struct list_head list; + struct rcu_head rcu_head; + struct notifier_block notifier; + char tfm_name[CRYPTO_MAX_ALG_NAME]; }; -static int zswap_comp_op(enum comp_op op, const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen) -{ - struct crypto_comp *tfm; - int ret; - - tfm = *per_cpu_ptr(zswap_comp_pcpu_tfms, get_cpu()); - switch (op) { - case ZSWAP_COMPOP_COMPRESS: - ret = crypto_comp_compress(tfm, src, slen, dst, dlen); - break; - case ZSWAP_COMPOP_DECOMPRESS: - ret = crypto_comp_decompress(tfm, src, slen, dst, dlen); - break; - default: - ret = -EINVAL; - } - - put_cpu(); - return ret; -} - -static int __init zswap_comp_init(void) -{ - if (!crypto_has_comp(zswap_compressor, 0, 0)) { - pr_info("%s compressor not available\n", zswap_compressor); - /* fall back to default compressor */ - zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT; - if (!crypto_has_comp(zswap_compressor, 0, 0)) - /* can't even load the default compressor */ - return -ENODEV; - } - pr_info("using %s compressor\n", zswap_compressor); - - /* alloc percpu transforms */ - zswap_comp_pcpu_tfms = alloc_percpu(struct crypto_comp *); - if (!zswap_comp_pcpu_tfms) - return -ENOMEM; - return 0; -} - -static void __init zswap_comp_exit(void) -{ - /* free percpu transforms */ - free_percpu(zswap_comp_pcpu_tfms); -} - -/********************************* -* data structures -**********************************/ /* * struct zswap_entry * @@ -166,22 +135,24 @@ static void __init zswap_comp_exit(void) * page within zswap. * * rbnode - links the entry into red-black tree for the appropriate swap type + * offset - the swap offset for the entry. Index into the red-black tree. * refcount - the number of outstanding reference to the entry. This is needed * to protect against premature freeing of the entry by code * concurrent calls to load, invalidate, and writeback. The lock * for the zswap_tree structure that contains the entry must * be held while changing the refcount. Since the lock must * be held, there is no reason to also make refcount atomic. - * offset - the swap offset for the entry. Index into the red-black tree. - * handle - zpool allocation handle that stores the compressed page data * length - the length in bytes of the compressed page data. Needed during * decompression + * pool - the zswap_pool the entry's data is in + * handle - zpool allocation handle that stores the compressed page data */ struct zswap_entry { struct rb_node rbnode; pgoff_t offset; int refcount; unsigned int length; + struct zswap_pool *pool; unsigned long handle; }; @@ -201,6 +172,51 @@ struct zswap_tree { static struct zswap_tree *zswap_trees[MAX_SWAPFILES]; +/* RCU-protected iteration */ +static LIST_HEAD(zswap_pools); +/* protects zswap_pools list modification */ +static DEFINE_SPINLOCK(zswap_pools_lock); + +/* used by param callback function */ +static bool zswap_init_started; + +/********************************* +* helpers and fwd declarations +**********************************/ + +#define zswap_pool_debug(msg, p) \ + pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \ + zpool_get_type((p)->zpool)) + +static int zswap_writeback_entry(struct zpool *pool, unsigned long handle); +static int zswap_pool_get(struct zswap_pool *pool); +static void zswap_pool_put(struct zswap_pool *pool); + +static const struct zpool_ops zswap_zpool_ops = { + .evict = zswap_writeback_entry +}; + +static bool zswap_is_full(void) +{ + return totalram_pages * zswap_max_pool_percent / 100 < + DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE); +} + +static void zswap_update_total_size(void) +{ + struct zswap_pool *pool; + u64 total = 0; + + rcu_read_lock(); + + list_for_each_entry_rcu(pool, &zswap_pools, list) + total += zpool_get_total_size(pool->zpool); + + rcu_read_unlock(); + + zswap_pool_total_size = total; +} + /********************************* * zswap entry functions **********************************/ @@ -294,10 +310,11 @@ static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry) */ static void zswap_free_entry(struct zswap_entry *entry) { - zpool_free(zswap_pool, entry->handle); + zpool_free(entry->pool->zpool, entry->handle); + zswap_pool_put(entry->pool); zswap_entry_cache_free(entry); atomic_dec(&zswap_stored_pages); - zswap_pool_total_size = zpool_get_total_size(zswap_pool); + zswap_update_total_size(); } /* caller must hold the tree lock */ @@ -339,35 +356,21 @@ static struct zswap_entry *zswap_entry_find_get(struct rb_root *root, **********************************/ static DEFINE_PER_CPU(u8 *, zswap_dstmem); -static int __zswap_cpu_notifier(unsigned long action, unsigned long cpu) +static int __zswap_cpu_dstmem_notifier(unsigned long action, unsigned long cpu) { - struct crypto_comp *tfm; u8 *dst; switch (action) { case CPU_UP_PREPARE: - tfm = crypto_alloc_comp(zswap_compressor, 0, 0); - if (IS_ERR(tfm)) { - pr_err("can't allocate compressor transform\n"); - return NOTIFY_BAD; - } - *per_cpu_ptr(zswap_comp_pcpu_tfms, cpu) = tfm; dst = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu)); if (!dst) { pr_err("can't allocate compressor buffer\n"); - crypto_free_comp(tfm); - *per_cpu_ptr(zswap_comp_pcpu_tfms, cpu) = NULL; return NOTIFY_BAD; } per_cpu(zswap_dstmem, cpu) = dst; break; case CPU_DEAD: case CPU_UP_CANCELED: - tfm = *per_cpu_ptr(zswap_comp_pcpu_tfms, cpu); - if (tfm) { - crypto_free_comp(tfm); - *per_cpu_ptr(zswap_comp_pcpu_tfms, cpu) = NULL; - } dst = per_cpu(zswap_dstmem, cpu); kfree(dst); per_cpu(zswap_dstmem, cpu) = NULL; @@ -378,43 +381,398 @@ static int __zswap_cpu_notifier(unsigned long action, unsigned long cpu) return NOTIFY_OK; } -static int zswap_cpu_notifier(struct notifier_block *nb, - unsigned long action, void *pcpu) +static int zswap_cpu_dstmem_notifier(struct notifier_block *nb, + unsigned long action, void *pcpu) { - unsigned long cpu = (unsigned long)pcpu; - return __zswap_cpu_notifier(action, cpu); + return __zswap_cpu_dstmem_notifier(action, (unsigned long)pcpu); } -static struct notifier_block zswap_cpu_notifier_block = { - .notifier_call = zswap_cpu_notifier +static struct notifier_block zswap_dstmem_notifier = { + .notifier_call = zswap_cpu_dstmem_notifier, }; -static int __init zswap_cpu_init(void) +static int __init zswap_cpu_dstmem_init(void) { unsigned long cpu; cpu_notifier_register_begin(); for_each_online_cpu(cpu) - if (__zswap_cpu_notifier(CPU_UP_PREPARE, cpu) != NOTIFY_OK) + if (__zswap_cpu_dstmem_notifier(CPU_UP_PREPARE, cpu) == + NOTIFY_BAD) goto cleanup; - __register_cpu_notifier(&zswap_cpu_notifier_block); + __register_cpu_notifier(&zswap_dstmem_notifier); cpu_notifier_register_done(); return 0; cleanup: for_each_online_cpu(cpu) - __zswap_cpu_notifier(CPU_UP_CANCELED, cpu); + __zswap_cpu_dstmem_notifier(CPU_UP_CANCELED, cpu); cpu_notifier_register_done(); return -ENOMEM; } +static void zswap_cpu_dstmem_destroy(void) +{ + unsigned long cpu; + + cpu_notifier_register_begin(); + for_each_online_cpu(cpu) + __zswap_cpu_dstmem_notifier(CPU_UP_CANCELED, cpu); + __unregister_cpu_notifier(&zswap_dstmem_notifier); + cpu_notifier_register_done(); +} + +static int __zswap_cpu_comp_notifier(struct zswap_pool *pool, + unsigned long action, unsigned long cpu) +{ + struct crypto_comp *tfm; + + switch (action) { + case CPU_UP_PREPARE: + if (WARN_ON(*per_cpu_ptr(pool->tfm, cpu))) + break; + tfm = crypto_alloc_comp(pool->tfm_name, 0, 0); + if (IS_ERR_OR_NULL(tfm)) { + pr_err("could not alloc crypto comp %s : %ld\n", + pool->tfm_name, PTR_ERR(tfm)); + return NOTIFY_BAD; + } + *per_cpu_ptr(pool->tfm, cpu) = tfm; + break; + case CPU_DEAD: + case CPU_UP_CANCELED: + tfm = *per_cpu_ptr(pool->tfm, cpu); + if (!IS_ERR_OR_NULL(tfm)) + crypto_free_comp(tfm); + *per_cpu_ptr(pool->tfm, cpu) = NULL; + break; + default: + break; + } + return NOTIFY_OK; +} + +static int zswap_cpu_comp_notifier(struct notifier_block *nb, + unsigned long action, void *pcpu) +{ + unsigned long cpu = (unsigned long)pcpu; + struct zswap_pool *pool = container_of(nb, typeof(*pool), notifier); + + return __zswap_cpu_comp_notifier(pool, action, cpu); +} + +static int zswap_cpu_comp_init(struct zswap_pool *pool) +{ + unsigned long cpu; + + memset(&pool->notifier, 0, sizeof(pool->notifier)); + pool->notifier.notifier_call = zswap_cpu_comp_notifier; + + cpu_notifier_register_begin(); + for_each_online_cpu(cpu) + if (__zswap_cpu_comp_notifier(pool, CPU_UP_PREPARE, cpu) == + NOTIFY_BAD) + goto cleanup; + __register_cpu_notifier(&pool->notifier); + cpu_notifier_register_done(); + return 0; + +cleanup: + for_each_online_cpu(cpu) + __zswap_cpu_comp_notifier(pool, CPU_UP_CANCELED, cpu); + cpu_notifier_register_done(); + return -ENOMEM; +} + +static void zswap_cpu_comp_destroy(struct zswap_pool *pool) +{ + unsigned long cpu; + + cpu_notifier_register_begin(); + for_each_online_cpu(cpu) + __zswap_cpu_comp_notifier(pool, CPU_UP_CANCELED, cpu); + __unregister_cpu_notifier(&pool->notifier); + cpu_notifier_register_done(); +} + /********************************* -* helpers +* pool functions **********************************/ -static bool zswap_is_full(void) + +static struct zswap_pool *__zswap_pool_current(void) { - return totalram_pages * zswap_max_pool_percent / 100 < - DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE); + struct zswap_pool *pool; + + pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list); + WARN_ON(!pool); + + return pool; +} + +static struct zswap_pool *zswap_pool_current(void) +{ + assert_spin_locked(&zswap_pools_lock); + + return __zswap_pool_current(); +} + +static struct zswap_pool *zswap_pool_current_get(void) +{ + struct zswap_pool *pool; + + rcu_read_lock(); + + pool = __zswap_pool_current(); + if (!pool || !zswap_pool_get(pool)) + pool = NULL; + + rcu_read_unlock(); + + return pool; +} + +static struct zswap_pool *zswap_pool_last_get(void) +{ + struct zswap_pool *pool, *last = NULL; + + rcu_read_lock(); + + list_for_each_entry_rcu(pool, &zswap_pools, list) + last = pool; + if (!WARN_ON(!last) && !zswap_pool_get(last)) + last = NULL; + + rcu_read_unlock(); + + return last; +} + +static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor) +{ + struct zswap_pool *pool; + + assert_spin_locked(&zswap_pools_lock); + + list_for_each_entry_rcu(pool, &zswap_pools, list) { + if (strncmp(pool->tfm_name, compressor, sizeof(pool->tfm_name))) + continue; + if (strncmp(zpool_get_type(pool->zpool), type, + sizeof(zswap_zpool_type))) + continue; + /* if we can't get it, it's about to be destroyed */ + if (!zswap_pool_get(pool)) + continue; + return pool; + } + + return NULL; +} + +static struct zswap_pool *zswap_pool_create(char *type, char *compressor) +{ + struct zswap_pool *pool; + gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN; + + pool = kzalloc(sizeof(*pool), GFP_KERNEL); + if (!pool) { + pr_err("pool alloc failed\n"); + return NULL; + } + + pool->zpool = zpool_create_pool(type, "zswap", gfp, &zswap_zpool_ops); + if (!pool->zpool) { + pr_err("%s zpool not available\n", type); + goto error; + } + pr_debug("using %s zpool\n", zpool_get_type(pool->zpool)); + + strlcpy(pool->tfm_name, compressor, sizeof(pool->tfm_name)); + pool->tfm = alloc_percpu(struct crypto_comp *); + if (!pool->tfm) { + pr_err("percpu alloc failed\n"); + goto error; + } + + if (zswap_cpu_comp_init(pool)) + goto error; + pr_debug("using %s compressor\n", pool->tfm_name); + + /* being the current pool takes 1 ref; this func expects the + * caller to always add the new pool as the current pool + */ + kref_init(&pool->kref); + INIT_LIST_HEAD(&pool->list); + + zswap_pool_debug("created", pool); + + return pool; + +error: + free_percpu(pool->tfm); + if (pool->zpool) + zpool_destroy_pool(pool->zpool); + kfree(pool); + return NULL; +} + +static struct zswap_pool *__zswap_pool_create_fallback(void) +{ + if (!crypto_has_comp(zswap_compressor, 0, 0)) { + pr_err("compressor %s not available, using default %s\n", + zswap_compressor, ZSWAP_COMPRESSOR_DEFAULT); + strncpy(zswap_compressor, ZSWAP_COMPRESSOR_DEFAULT, + sizeof(zswap_compressor)); + } + if (!zpool_has_pool(zswap_zpool_type)) { + pr_err("zpool %s not available, using default %s\n", + zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT); + strncpy(zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT, + sizeof(zswap_zpool_type)); + } + + return zswap_pool_create(zswap_zpool_type, zswap_compressor); +} + +static void zswap_pool_destroy(struct zswap_pool *pool) +{ + zswap_pool_debug("destroying", pool); + + zswap_cpu_comp_destroy(pool); + free_percpu(pool->tfm); + zpool_destroy_pool(pool->zpool); + kfree(pool); +} + +static int __must_check zswap_pool_get(struct zswap_pool *pool) +{ + return kref_get_unless_zero(&pool->kref); +} + +static void __zswap_pool_release(struct rcu_head *head) +{ + struct zswap_pool *pool = container_of(head, typeof(*pool), rcu_head); + + /* nobody should have been able to get a kref... */ + WARN_ON(kref_get_unless_zero(&pool->kref)); + + /* pool is now off zswap_pools list and has no references. */ + zswap_pool_destroy(pool); +} + +static void __zswap_pool_empty(struct kref *kref) +{ + struct zswap_pool *pool; + + pool = container_of(kref, typeof(*pool), kref); + + spin_lock(&zswap_pools_lock); + + WARN_ON(pool == zswap_pool_current()); + + list_del_rcu(&pool->list); + call_rcu(&pool->rcu_head, __zswap_pool_release); + + spin_unlock(&zswap_pools_lock); +} + +static void zswap_pool_put(struct zswap_pool *pool) +{ + kref_put(&pool->kref, __zswap_pool_empty); +} + +/********************************* +* param callbacks +**********************************/ + +static int __zswap_param_set(const char *val, const struct kernel_param *kp, + char *type, char *compressor) +{ + struct zswap_pool *pool, *put_pool = NULL; + char str[kp->str->maxlen], *s; + int ret; + + /* + * kp is either zswap_zpool_kparam or zswap_compressor_kparam, defined + * at the top of this file, so maxlen is CRYPTO_MAX_ALG_NAME (64) or + * 32 (arbitrary). + */ + strlcpy(str, val, kp->str->maxlen); + s = strim(str); + + /* if this is load-time (pre-init) param setting, + * don't create a pool; that's done during init. + */ + if (!zswap_init_started) + return param_set_copystring(s, kp); + + /* no change required */ + if (!strncmp(kp->str->string, s, kp->str->maxlen)) + return 0; + + if (!type) { + type = s; + if (!zpool_has_pool(type)) { + pr_err("zpool %s not available\n", type); + return -ENOENT; + } + } else if (!compressor) { + compressor = s; + if (!crypto_has_comp(compressor, 0, 0)) { + pr_err("compressor %s not available\n", compressor); + return -ENOENT; + } + } + + spin_lock(&zswap_pools_lock); + + pool = zswap_pool_find_get(type, compressor); + if (pool) { + zswap_pool_debug("using existing", pool); + list_del_rcu(&pool->list); + } else { + spin_unlock(&zswap_pools_lock); + pool = zswap_pool_create(type, compressor); + spin_lock(&zswap_pools_lock); + } + + if (pool) + ret = param_set_copystring(s, kp); + else + ret = -EINVAL; + + if (!ret) { + put_pool = zswap_pool_current(); + list_add_rcu(&pool->list, &zswap_pools); + } else if (pool) { + /* add the possibly pre-existing pool to the end of the pools + * list; if it's new (and empty) then it'll be removed and + * destroyed by the put after we drop the lock + */ + list_add_tail_rcu(&pool->list, &zswap_pools); + put_pool = pool; + } + + spin_unlock(&zswap_pools_lock); + + /* drop the ref from either the old current pool, + * or the new pool we failed to add + */ + if (put_pool) + zswap_pool_put(put_pool); + + return ret; +} + +static int zswap_compressor_param_set(const char *val, + const struct kernel_param *kp) +{ + return __zswap_param_set(val, kp, zswap_zpool_type, NULL); +} + +static int zswap_zpool_param_set(const char *val, + const struct kernel_param *kp) +{ + return __zswap_param_set(val, kp, NULL, zswap_compressor); } /********************************* @@ -446,75 +804,14 @@ enum zswap_get_swap_ret { static int zswap_get_swap_cache_page(swp_entry_t entry, struct page **retpage) { - struct page *found_page, *new_page = NULL; - struct address_space *swapper_space = swap_address_space(entry); - int err; - - *retpage = NULL; - do { - /* - * First check the swap cache. Since this is normally - * called after lookup_swap_cache() failed, re-calling - * that would confuse statistics. - */ - found_page = find_get_page(swapper_space, entry.val); - if (found_page) - break; - - /* - * Get a new page to read into from swap. - */ - if (!new_page) { - new_page = alloc_page(GFP_KERNEL); - if (!new_page) - break; /* Out of memory */ - } - - /* - * call radix_tree_preload() while we can wait. - */ - err = radix_tree_preload(GFP_KERNEL); - if (err) - break; - - /* - * Swap entry may have been freed since our caller observed it. - */ - err = swapcache_prepare(entry); - if (err == -EEXIST) { /* seems racy */ - radix_tree_preload_end(); - continue; - } - if (err) { /* swp entry is obsolete ? */ - radix_tree_preload_end(); - break; - } + bool page_was_allocated; - /* May fail (-ENOMEM) if radix-tree node allocation failed. */ - __set_page_locked(new_page); - SetPageSwapBacked(new_page); - err = __add_to_swap_cache(new_page, entry); - if (likely(!err)) { - radix_tree_preload_end(); - lru_cache_add_anon(new_page); - *retpage = new_page; - return ZSWAP_SWAPCACHE_NEW; - } - radix_tree_preload_end(); - ClearPageSwapBacked(new_page); - __clear_page_locked(new_page); - /* - * add_to_swap_cache() doesn't return -EEXIST, so we can safely - * clear SWAP_HAS_CACHE flag. - */ - swapcache_free(entry); - } while (err != -ENOMEM); - - if (new_page) - page_cache_release(new_page); - if (!found_page) + *retpage = __read_swap_cache_async(entry, GFP_KERNEL, + NULL, 0, &page_was_allocated); + if (page_was_allocated) + return ZSWAP_SWAPCACHE_NEW; + if (!*retpage) return ZSWAP_SWAPCACHE_FAIL; - *retpage = found_page; return ZSWAP_SWAPCACHE_EXIST; } @@ -538,6 +835,7 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle) pgoff_t offset; struct zswap_entry *entry; struct page *page; + struct crypto_comp *tfm; u8 *src, *dst; unsigned int dlen; int ret; @@ -578,13 +876,15 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle) case ZSWAP_SWAPCACHE_NEW: /* page is locked */ /* decompress */ dlen = PAGE_SIZE; - src = (u8 *)zpool_map_handle(zswap_pool, entry->handle, + src = (u8 *)zpool_map_handle(entry->pool->zpool, entry->handle, ZPOOL_MM_RO) + sizeof(struct zswap_header); dst = kmap_atomic(page); - ret = zswap_comp_op(ZSWAP_COMPOP_DECOMPRESS, src, - entry->length, dst, &dlen); + tfm = *get_cpu_ptr(entry->pool->tfm); + ret = crypto_comp_decompress(tfm, src, entry->length, + dst, &dlen); + put_cpu_ptr(entry->pool->tfm); kunmap_atomic(dst); - zpool_unmap_handle(zswap_pool, entry->handle); + zpool_unmap_handle(entry->pool->zpool, entry->handle); BUG_ON(ret); BUG_ON(dlen != PAGE_SIZE); @@ -633,6 +933,22 @@ end: return ret; } +static int zswap_shrink(void) +{ + struct zswap_pool *pool; + int ret; + + pool = zswap_pool_last_get(); + if (!pool) + return -ENOENT; + + ret = zpool_shrink(pool->zpool, 1, NULL); + + zswap_pool_put(pool); + + return ret; +} + /********************************* * frontswap hooks **********************************/ @@ -642,6 +958,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset, { struct zswap_tree *tree = zswap_trees[type]; struct zswap_entry *entry, *dupentry; + struct crypto_comp *tfm; int ret; unsigned int dlen = PAGE_SIZE, len; unsigned long handle; @@ -657,7 +974,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset, /* reclaim space if needed */ if (zswap_is_full()) { zswap_pool_limit_hit++; - if (zpool_shrink(zswap_pool, 1, NULL)) { + if (zswap_shrink()) { zswap_reject_reclaim_fail++; ret = -ENOMEM; goto reject; @@ -672,33 +989,42 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset, goto reject; } + /* if entry is successfully added, it keeps the reference */ + entry->pool = zswap_pool_current_get(); + if (!entry->pool) { + ret = -EINVAL; + goto freepage; + } + /* compress */ dst = get_cpu_var(zswap_dstmem); + tfm = *get_cpu_ptr(entry->pool->tfm); src = kmap_atomic(page); - ret = zswap_comp_op(ZSWAP_COMPOP_COMPRESS, src, PAGE_SIZE, dst, &dlen); + ret = crypto_comp_compress(tfm, src, PAGE_SIZE, dst, &dlen); kunmap_atomic(src); + put_cpu_ptr(entry->pool->tfm); if (ret) { ret = -EINVAL; - goto freepage; + goto put_dstmem; } /* store */ len = dlen + sizeof(struct zswap_header); - ret = zpool_malloc(zswap_pool, len, __GFP_NORETRY | __GFP_NOWARN, - &handle); + ret = zpool_malloc(entry->pool->zpool, len, + __GFP_NORETRY | __GFP_NOWARN, &handle); if (ret == -ENOSPC) { zswap_reject_compress_poor++; - goto freepage; + goto put_dstmem; } if (ret) { zswap_reject_alloc_fail++; - goto freepage; + goto put_dstmem; } - zhdr = zpool_map_handle(zswap_pool, handle, ZPOOL_MM_RW); + zhdr = zpool_map_handle(entry->pool->zpool, handle, ZPOOL_MM_RW); zhdr->swpentry = swp_entry(type, offset); buf = (u8 *)(zhdr + 1); memcpy(buf, dst, dlen); - zpool_unmap_handle(zswap_pool, handle); + zpool_unmap_handle(entry->pool->zpool, handle); put_cpu_var(zswap_dstmem); /* populate entry */ @@ -721,12 +1047,14 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset, /* update stats */ atomic_inc(&zswap_stored_pages); - zswap_pool_total_size = zpool_get_total_size(zswap_pool); + zswap_update_total_size(); return 0; -freepage: +put_dstmem: put_cpu_var(zswap_dstmem); + zswap_pool_put(entry->pool); +freepage: zswap_entry_cache_free(entry); reject: return ret; @@ -741,6 +1069,7 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset, { struct zswap_tree *tree = zswap_trees[type]; struct zswap_entry *entry; + struct crypto_comp *tfm; u8 *src, *dst; unsigned int dlen; int ret; @@ -757,13 +1086,14 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset, /* decompress */ dlen = PAGE_SIZE; - src = (u8 *)zpool_map_handle(zswap_pool, entry->handle, + src = (u8 *)zpool_map_handle(entry->pool->zpool, entry->handle, ZPOOL_MM_RO) + sizeof(struct zswap_header); dst = kmap_atomic(page); - ret = zswap_comp_op(ZSWAP_COMPOP_DECOMPRESS, src, entry->length, - dst, &dlen); + tfm = *get_cpu_ptr(entry->pool->tfm); + ret = crypto_comp_decompress(tfm, src, entry->length, dst, &dlen); + put_cpu_ptr(entry->pool->tfm); kunmap_atomic(dst); - zpool_unmap_handle(zswap_pool, entry->handle); + zpool_unmap_handle(entry->pool->zpool, entry->handle); BUG_ON(ret); spin_lock(&tree->lock); @@ -816,10 +1146,6 @@ static void zswap_frontswap_invalidate_area(unsigned type) zswap_trees[type] = NULL; } -static struct zpool_ops zswap_zpool_ops = { - .evict = zswap_writeback_entry -}; - static void zswap_frontswap_init(unsigned type) { struct zswap_tree *tree; @@ -900,49 +1226,40 @@ static void __exit zswap_debugfs_exit(void) { } **********************************/ static int __init init_zswap(void) { - gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN; + struct zswap_pool *pool; - pr_info("loading zswap\n"); - - zswap_pool = zpool_create_pool(zswap_zpool_type, "zswap", gfp, - &zswap_zpool_ops); - if (!zswap_pool && strcmp(zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT)) { - pr_info("%s zpool not available\n", zswap_zpool_type); - zswap_zpool_type = ZSWAP_ZPOOL_DEFAULT; - zswap_pool = zpool_create_pool(zswap_zpool_type, "zswap", gfp, - &zswap_zpool_ops); - } - if (!zswap_pool) { - pr_err("%s zpool not available\n", zswap_zpool_type); - pr_err("zpool creation failed\n"); - goto error; - } - pr_info("using %s pool\n", zswap_zpool_type); + zswap_init_started = true; if (zswap_entry_cache_create()) { pr_err("entry cache creation failed\n"); - goto cachefail; + goto cache_fail; } - if (zswap_comp_init()) { - pr_err("compressor initialization failed\n"); - goto compfail; + + if (zswap_cpu_dstmem_init()) { + pr_err("dstmem alloc failed\n"); + goto dstmem_fail; } - if (zswap_cpu_init()) { - pr_err("per-cpu initialization failed\n"); - goto pcpufail; + + pool = __zswap_pool_create_fallback(); + if (!pool) { + pr_err("pool creation failed\n"); + goto pool_fail; } + pr_info("loaded using pool %s/%s\n", pool->tfm_name, + zpool_get_type(pool->zpool)); + + list_add(&pool->list, &zswap_pools); frontswap_register_ops(&zswap_frontswap_ops); if (zswap_debugfs_init()) pr_warn("debugfs initialization failed\n"); return 0; -pcpufail: - zswap_comp_exit(); -compfail: + +pool_fail: + zswap_cpu_dstmem_destroy(); +dstmem_fail: zswap_entry_cache_destroy(); -cachefail: - zpool_destroy_pool(zswap_pool); -error: +cache_fail: return -ENOMEM; } /* must be late so crypto has time to come up */ |