diff options
Diffstat (limited to 'kernel/power')
-rw-r--r-- | kernel/power/Kconfig | 10 | ||||
-rw-r--r-- | kernel/power/suspend.c | 2 | ||||
-rw-r--r-- | kernel/power/swap.c | 12 | ||||
-rw-r--r-- | kernel/power/tuxonice_bio_core.c | 11 | ||||
-rw-r--r-- | kernel/power/wakelock.c | 18 |
5 files changed, 36 insertions, 17 deletions
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index 89a46f3ff..9e2ee0cb1 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig @@ -18,6 +18,16 @@ config SUSPEND_FREEZER Turning OFF this setting is NOT recommended! If in doubt, say Y. +config SUSPEND_SKIP_SYNC + bool "Skip kernel's sys_sync() on suspend to RAM/standby" + depends on SUSPEND + depends on EXPERT + help + Skip the kernel sys_sync() before freezing user processes. + Some systems prefer not to pay this cost on every invocation + of suspend, or they are content with invoking sync() from + user-space before invoking suspend. Say Y if that's your case. + config HIBERNATE_CALLBACKS bool diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 53266b729..7e4cda4a8 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -484,11 +484,13 @@ static int enter_state(suspend_state_t state) if (state == PM_SUSPEND_FREEZE) freeze_begin(); +#ifndef CONFIG_SUSPEND_SKIP_SYNC trace_suspend_resume(TPS("sync_filesystems"), 0, true); printk(KERN_INFO "PM: Syncing filesystems ... "); sys_sync(); printk("done.\n"); trace_suspend_resume(TPS("sync_filesystems"), 0, false); +#endif pr_debug("PM: Preparing system for sleep (%s)\n", pm_states[state]); error = suspend_prepare(state); diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 2f30ca91e..b2066fb5b 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -227,27 +227,23 @@ static void hib_init_batch(struct hib_bio_batch *hb) hb->error = 0; } -static void hib_end_io(struct bio *bio, int error) +static void hib_end_io(struct bio *bio) { struct hib_bio_batch *hb = bio->bi_private; - const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); struct page *page = bio->bi_io_vec[0].bv_page; - if (!uptodate || error) { + if (bio->bi_error) { printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n", imajor(bio->bi_bdev->bd_inode), iminor(bio->bi_bdev->bd_inode), (unsigned long long)bio->bi_iter.bi_sector); - - if (!error) - error = -EIO; } if (bio_data_dir(bio) == WRITE) put_page(page); - if (error && !hb->error) - hb->error = error; + if (bio->bi_error && !hb->error) + hb->error = bio->bi_error; if (atomic_dec_and_test(&hb->count)) wake_up(&hb->wait); diff --git a/kernel/power/tuxonice_bio_core.c b/kernel/power/tuxonice_bio_core.c index 193e1532e..87aa4c96e 100644 --- a/kernel/power/tuxonice_bio_core.c +++ b/kernel/power/tuxonice_bio_core.c @@ -305,7 +305,6 @@ static int toi_finish_all_io(void) /** * toi_end_bio - bio completion function. * @bio: bio that has completed. - * @err: Error value. Yes, like end_swap_bio_read, we ignore it. * * Function called by the block driver from interrupt context when I/O is * completed. If we were writing the page, we want to free it and will have @@ -314,11 +313,11 @@ static int toi_finish_all_io(void) * reading the page, it will be in the singly linked list made from * page->private pointers. **/ -static void toi_end_bio(struct bio *bio, int err) +static void toi_end_bio(struct bio *bio) { struct page *page = bio->bi_io_vec[0].bv_page; - BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags)); + BUG_ON(bio->bi_error); unlock_page(page); bio_put(bio); @@ -385,7 +384,7 @@ static int submit(int writing, struct block_device *dev, sector_t first_block, bio->bi_iter.bi_sector = first_block; bio->bi_private = (void *) ((unsigned long) free_group); bio->bi_end_io = toi_end_bio; - bio->bi_flags |= (1 << BIO_TOI); + bio_set_flag(bio, BIO_TOI); if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { printk(KERN_DEBUG "ERROR: adding page to bio at %lld\n", @@ -408,8 +407,8 @@ static int submit(int writing, struct block_device *dev, sector_t first_block, /* Still read the header! */ if (unlikely(test_action_state(TOI_TEST_BIO) && writing)) { /* Fake having done the hard work */ - set_bit(BIO_UPTODATE, &bio->bi_flags); - toi_end_bio(bio, 0); + bio->bi_error = 0; + toi_end_bio(bio); } else submit_bio(writing | REQ_SYNC, bio); diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c index 019069c84..1896386e1 100644 --- a/kernel/power/wakelock.c +++ b/kernel/power/wakelock.c @@ -17,6 +17,7 @@ #include <linux/list.h> #include <linux/rbtree.h> #include <linux/slab.h> +#include <linux/workqueue.h> #include "power.h" @@ -83,7 +84,9 @@ static inline void decrement_wakelocks_number(void) {} #define WL_GC_COUNT_MAX 100 #define WL_GC_TIME_SEC 300 +static void __wakelocks_gc(struct work_struct *work); static LIST_HEAD(wakelocks_lru_list); +static DECLARE_WORK(wakelock_work, __wakelocks_gc); static unsigned int wakelocks_gc_count; static inline void wakelocks_lru_add(struct wakelock *wl) @@ -96,13 +99,12 @@ static inline void wakelocks_lru_most_recent(struct wakelock *wl) list_move(&wl->lru, &wakelocks_lru_list); } -static void wakelocks_gc(void) +static void __wakelocks_gc(struct work_struct *work) { struct wakelock *wl, *aux; ktime_t now; - if (++wakelocks_gc_count <= WL_GC_COUNT_MAX) - return; + mutex_lock(&wakelocks_lock); now = ktime_get(); list_for_each_entry_safe_reverse(wl, aux, &wakelocks_lru_list, lru) { @@ -127,6 +129,16 @@ static void wakelocks_gc(void) } } wakelocks_gc_count = 0; + + mutex_unlock(&wakelocks_lock); +} + +static void wakelocks_gc(void) +{ + if (++wakelocks_gc_count <= WL_GC_COUNT_MAX) + return; + + schedule_work(&wakelock_work); } #else /* !CONFIG_PM_WAKELOCKS_GC */ static inline void wakelocks_lru_add(struct wakelock *wl) {} |