summaryrefslogtreecommitdiff
path: root/kernel/power
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/power')
-rw-r--r--kernel/power/process.c12
-rw-r--r--kernel/power/swap.c18
2 files changed, 30 insertions, 0 deletions
diff --git a/kernel/power/process.c b/kernel/power/process.c
index df058bed5..0c2ee9761 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -146,6 +146,18 @@ int freeze_processes(void)
if (!error && !oom_killer_disable())
error = -EBUSY;
+ /*
+ * There is a hard to fix race between oom_reaper kernel thread
+ * and oom_killer_disable. oom_reaper calls exit_oom_victim
+ * before the victim reaches exit_mm so try to freeze all the tasks
+ * again and catch such a left over task.
+ */
+ if (!error) {
+ pr_info("Double checking all user space processes after OOM killer disable... ");
+ error = try_to_freeze_tasks(true);
+ pr_cont("\n");
+ }
+
if (error)
thaw_processes();
return error;
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 12cd989da..160e10066 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -37,6 +37,14 @@
#define HIBERNATE_SIG "S1SUSPEND"
/*
+ * When reading an {un,}compressed image, we may restore pages in place,
+ * in which case some architectures need these pages cleaning before they
+ * can be executed. We don't know which pages these may be, so clean the lot.
+ */
+static bool clean_pages_on_read;
+static bool clean_pages_on_decompress;
+
+/*
* The swap map is a data structure used for keeping track of each page
* written to a swap partition. It consists of many swap_map_page
* structures that contain each an array of MAP_PAGE_ENTRIES swap entries.
@@ -241,6 +249,9 @@ static void hib_end_io(struct bio *bio)
if (bio_data_dir(bio) == WRITE)
put_page(page);
+ else if (clean_pages_on_read)
+ flush_icache_range((unsigned long)page_address(page),
+ (unsigned long)page_address(page) + PAGE_SIZE);
if (bio->bi_error && !hb->error)
hb->error = bio->bi_error;
@@ -1049,6 +1060,7 @@ static int load_image(struct swap_map_handle *handle,
hib_init_batch(&hb);
+ clean_pages_on_read = true;
printk(KERN_INFO "PM: Loading image data pages (%u pages)...\n",
nr_to_read);
m = nr_to_read / 10;
@@ -1124,6 +1136,10 @@ static int lzo_decompress_threadfn(void *data)
d->unc_len = LZO_UNC_SIZE;
d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len,
d->unc, &d->unc_len);
+ if (clean_pages_on_decompress)
+ flush_icache_range((unsigned long)d->unc,
+ (unsigned long)d->unc + d->unc_len);
+
atomic_set(&d->stop, 1);
wake_up(&d->done);
}
@@ -1189,6 +1205,8 @@ static int load_image_lzo(struct swap_map_handle *handle,
}
memset(crc, 0, offsetof(struct crc_data, go));
+ clean_pages_on_decompress = true;
+
/*
* Start the decompression threads.
*/