diff options
Diffstat (limited to 'block')
-rw-r--r-- | block/bfq-iosched.c | 2 | ||||
-rw-r--r-- | block/bfq.h | 2 | ||||
-rw-r--r-- | block/blk-core.c | 4 | ||||
-rw-r--r-- | block/blk-merge.c | 22 |
4 files changed, 27 insertions, 3 deletions
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index c2cb29873..76a701abf 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -4834,7 +4834,7 @@ static struct blkcg_policy blkcg_policy_bfq = { static int __init bfq_init(void) { int ret; - char msg[50] = "BFQ I/O-scheduler: v8r2"; + char msg[50] = "BFQ I/O-scheduler: v8r3"; /* * Can be 0 on HZ < 1000 setups. diff --git a/block/bfq.h b/block/bfq.h index c6ba0994f..49d28b9dc 100644 --- a/block/bfq.h +++ b/block/bfq.h @@ -1,5 +1,5 @@ /* - * BFQ-v8r2 for 4.7.0: data structures and common functions prototypes. + * BFQ-v8r3 for 4.7.0: data structures and common functions prototypes. * * Based on ideas and code from CFQ: * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> diff --git a/block/blk-core.c b/block/blk-core.c index 3545520c7..7bcf0e30a 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -518,7 +518,9 @@ EXPORT_SYMBOL_GPL(blk_queue_bypass_end); void blk_set_queue_dying(struct request_queue *q) { - queue_flag_set_unlocked(QUEUE_FLAG_DYING, q); + spin_lock_irq(q->queue_lock); + queue_flag_set(QUEUE_FLAG_DYING, q); + spin_unlock_irq(q->queue_lock); if (q->mq_ops) blk_mq_wake_waiters(q); diff --git a/block/blk-merge.c b/block/blk-merge.c index 261353166..bea93441a 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -94,9 +94,31 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, bool do_split = true; struct bio *new = NULL; const unsigned max_sectors = get_max_io_size(q, bio); + unsigned bvecs = 0; bio_for_each_segment(bv, bio, iter) { /* + * With arbitrary bio size, the incoming bio may be very + * big. We have to split the bio into small bios so that + * each holds at most BIO_MAX_PAGES bvecs because + * bio_clone() can fail to allocate big bvecs. + * + * It should have been better to apply the limit per + * request queue in which bio_clone() is involved, + * instead of globally. The biggest blocker is the + * bio_clone() in bio bounce. + * + * If bio is splitted by this reason, we should have + * allowed to continue bios merging, but don't do + * that now for making the change simple. + * + * TODO: deal with bio bounce's bio_clone() gracefully + * and convert the global limit into per-queue limit. + */ + if (bvecs++ >= BIO_MAX_PAGES) + goto split; + + /* * If the queue doesn't support SG gaps and adding this * offset would create a gap, disallow it. */ |