summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-09-17 02:25:44 -0300
committerAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-09-17 02:25:44 -0300
commit5b465b045af3a649a89b8a5c5bfdece20ffc0345 (patch)
tree934f851eaec863864e15fa7ef9514a445a9e88ff /block
parent0520a938e11c34a5ffc422b9316b85e294b0fbb2 (diff)
Linux-libre 4.7.4-gnupck-4.7.4-gnu
Diffstat (limited to 'block')
-rw-r--r--block/bfq-iosched.c2
-rw-r--r--block/bfq.h2
-rw-r--r--block/blk-core.c4
-rw-r--r--block/blk-merge.c22
4 files changed, 27 insertions, 3 deletions
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index c2cb29873..76a701abf 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -4834,7 +4834,7 @@ static struct blkcg_policy blkcg_policy_bfq = {
static int __init bfq_init(void)
{
int ret;
- char msg[50] = "BFQ I/O-scheduler: v8r2";
+ char msg[50] = "BFQ I/O-scheduler: v8r3";
/*
* Can be 0 on HZ < 1000 setups.
diff --git a/block/bfq.h b/block/bfq.h
index c6ba0994f..49d28b9dc 100644
--- a/block/bfq.h
+++ b/block/bfq.h
@@ -1,5 +1,5 @@
/*
- * BFQ-v8r2 for 4.7.0: data structures and common functions prototypes.
+ * BFQ-v8r3 for 4.7.0: data structures and common functions prototypes.
*
* Based on ideas and code from CFQ:
* Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
diff --git a/block/blk-core.c b/block/blk-core.c
index 3545520c7..7bcf0e30a 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -518,7 +518,9 @@ EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
void blk_set_queue_dying(struct request_queue *q)
{
- queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
+ spin_lock_irq(q->queue_lock);
+ queue_flag_set(QUEUE_FLAG_DYING, q);
+ spin_unlock_irq(q->queue_lock);
if (q->mq_ops)
blk_mq_wake_waiters(q);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 261353166..bea93441a 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -94,9 +94,31 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
bool do_split = true;
struct bio *new = NULL;
const unsigned max_sectors = get_max_io_size(q, bio);
+ unsigned bvecs = 0;
bio_for_each_segment(bv, bio, iter) {
/*
+ * With arbitrary bio size, the incoming bio may be very
+ * big. We have to split the bio into small bios so that
+ * each holds at most BIO_MAX_PAGES bvecs because
+ * bio_clone() can fail to allocate big bvecs.
+ *
+ * It should have been better to apply the limit per
+ * request queue in which bio_clone() is involved,
+ * instead of globally. The biggest blocker is the
+ * bio_clone() in bio bounce.
+ *
+ * If bio is splitted by this reason, we should have
+ * allowed to continue bios merging, but don't do
+ * that now for making the change simple.
+ *
+ * TODO: deal with bio bounce's bio_clone() gracefully
+ * and convert the global limit into per-queue limit.
+ */
+ if (bvecs++ >= BIO_MAX_PAGES)
+ goto split;
+
+ /*
* If the queue doesn't support SG gaps and adding this
* offset would create a gap, disallow it.
*/