summaryrefslogtreecommitdiff
path: root/mm/vmscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c23
1 files changed, 21 insertions, 2 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 71b1c2994..f406a4765 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1475,7 +1475,7 @@ static int too_many_isolated(struct zone *zone, int file,
{
unsigned long inactive, isolated;
- if (current_is_kswapd())
+ if (current_is_kswapd() || sc->hibernation_mode)
return 0;
if (!sane_reclaim(sc))
@@ -2345,6 +2345,9 @@ static inline bool should_continue_reclaim(struct zone *zone,
unsigned long pages_for_compaction;
unsigned long inactive_lru_pages;
+ if (nr_reclaimed && nr_scanned && sc->nr_to_reclaim >= sc->nr_reclaimed)
+ return true;
+
/* If not in reclaim/compaction mode, stop */
if (!in_reclaim_compaction(sc))
return false;
@@ -2659,6 +2662,12 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
unsigned long total_scanned = 0;
unsigned long writeback_threshold;
bool zones_reclaimable;
+
+#ifdef CONFIG_FREEZER
+ if (unlikely(pm_freezing && !sc->hibernation_mode))
+ return 0;
+#endif
+
retry:
delayacct_freepages_start();
@@ -3540,6 +3549,11 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
if (!populated_zone(zone))
return;
+#ifdef CONFIG_FREEZER
+ if (pm_freezing)
+ return;
+#endif
+
if (!cpuset_zone_allowed(zone, GFP_KERNEL | __GFP_HARDWALL))
return;
pgdat = zone->zone_pgdat;
@@ -3565,7 +3579,7 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
* LRU order by reclaiming preferentially
* inactive > active > active referenced > active mapped
*/
-unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
+unsigned long shrink_memory_mask(unsigned long nr_to_reclaim, gfp_t mask)
{
struct reclaim_state reclaim_state;
struct scan_control sc = {
@@ -3594,6 +3608,11 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
return nr_reclaimed;
}
+
+unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
+{
+ return shrink_memory_mask(nr_to_reclaim, GFP_HIGHUSER_MOVABLE);
+}
#endif /* CONFIG_HIBERNATION */
/* It's optimal to keep kswapds on the same CPUs as their memory, but