summaryrefslogtreecommitdiff
path: root/block/blk-core.c
diff options
context:
space:
mode:
authorAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-10-20 00:10:27 -0300
committerAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-10-20 00:10:27 -0300
commitd0b2f91bede3bd5e3d24dd6803e56eee959c1797 (patch)
tree7fee4ab0509879c373c4f2cbd5b8a5be5b4041ee /block/blk-core.c
parente914f8eb445e8f74b00303c19c2ffceaedd16a05 (diff)
Linux-libre 4.8.2-gnupck-4.8.2-gnu
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c220
1 files changed, 86 insertions, 134 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 7bcf0e30a..cdcb188e8 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -49,8 +49,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
DEFINE_IDA(blk_queue_ida);
-int trap_non_toi_io;
-
/*
* For the allocated request tables
*/
@@ -966,10 +964,10 @@ static void __freed_request(struct request_list *rl, int sync)
* A request has just been released. Account for it, update the full and
* congestion status, wake up any waiters. Called under q->queue_lock.
*/
-static void freed_request(struct request_list *rl, unsigned int flags)
+static void freed_request(struct request_list *rl, int op, unsigned int flags)
{
struct request_queue *q = rl->q;
- int sync = rw_is_sync(flags);
+ int sync = rw_is_sync(op, flags);
q->nr_rqs[sync]--;
rl->count[sync]--;
@@ -1036,7 +1034,7 @@ static bool blk_rq_should_init_elevator(struct bio *bio)
* Flush requests do not use the elevator so skip initialization.
* This allows a request to share the flush and elevator data.
*/
- if (bio->bi_rw & (REQ_FLUSH | REQ_FUA))
+ if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA))
return false;
return true;
@@ -1061,7 +1059,8 @@ static struct io_context *rq_ioc(struct bio *bio)
/**
* __get_request - get a free request
* @rl: request list to allocate from
- * @rw_flags: RW and SYNC flags
+ * @op: REQ_OP_READ/REQ_OP_WRITE
+ * @op_flags: rq_flag_bits
* @bio: bio to allocate request for (can be %NULL)
* @gfp_mask: allocation mask
*
@@ -1072,21 +1071,22 @@ static struct io_context *rq_ioc(struct bio *bio)
* Returns ERR_PTR on failure, with @q->queue_lock held.
* Returns request pointer on success, with @q->queue_lock *not held*.
*/
-static struct request *__get_request(struct request_list *rl, int rw_flags,
- struct bio *bio, gfp_t gfp_mask)
+static struct request *__get_request(struct request_list *rl, int op,
+ int op_flags, struct bio *bio,
+ gfp_t gfp_mask)
{
struct request_queue *q = rl->q;
struct request *rq;
struct elevator_type *et = q->elevator->type;
struct io_context *ioc = rq_ioc(bio);
struct io_cq *icq = NULL;
- const bool is_sync = rw_is_sync(rw_flags) != 0;
+ const bool is_sync = rw_is_sync(op, op_flags) != 0;
int may_queue;
if (unlikely(blk_queue_dying(q)))
return ERR_PTR(-ENODEV);
- may_queue = elv_may_queue(q, rw_flags);
+ may_queue = elv_may_queue(q, op, op_flags);
if (may_queue == ELV_MQUEUE_NO)
goto rq_starved;
@@ -1130,7 +1130,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
/*
* Decide whether the new request will be managed by elevator. If
- * so, mark @rw_flags and increment elvpriv. Non-zero elvpriv will
+ * so, mark @op_flags and increment elvpriv. Non-zero elvpriv will
* prevent the current elevator from being destroyed until the new
* request is freed. This guarantees icq's won't be destroyed and
* makes creating new ones safe.
@@ -1139,14 +1139,14 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
* it will be created after releasing queue_lock.
*/
if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) {
- rw_flags |= REQ_ELVPRIV;
+ op_flags |= REQ_ELVPRIV;
q->nr_rqs_elvpriv++;
if (et->icq_cache && ioc)
icq = ioc_lookup_icq(ioc, q);
}
if (blk_queue_io_stat(q))
- rw_flags |= REQ_IO_STAT;
+ op_flags |= REQ_IO_STAT;
spin_unlock_irq(q->queue_lock);
/* allocate and init request */
@@ -1156,10 +1156,10 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
blk_rq_init(q, rq);
blk_rq_set_rl(rq, rl);
- rq->cmd_flags = rw_flags | REQ_ALLOCED;
+ req_set_op_attrs(rq, op, op_flags | REQ_ALLOCED);
/* init elvpriv */
- if (rw_flags & REQ_ELVPRIV) {
+ if (op_flags & REQ_ELVPRIV) {
if (unlikely(et->icq_cache && !icq)) {
if (ioc)
icq = ioc_create_icq(ioc, q, gfp_mask);
@@ -1185,7 +1185,7 @@ out:
if (ioc_batching(q, ioc))
ioc->nr_batch_requests--;
- trace_block_getrq(q, bio, rw_flags & 1);
+ trace_block_getrq(q, bio, op);
return rq;
fail_elvpriv:
@@ -1215,7 +1215,7 @@ fail_alloc:
* queue, but this is pretty rare.
*/
spin_lock_irq(q->queue_lock);
- freed_request(rl, rw_flags);
+ freed_request(rl, op, op_flags);
/*
* in the very unlikely event that allocation failed and no
@@ -1233,7 +1233,8 @@ rq_starved:
/**
* get_request - get a free request
* @q: request_queue to allocate request from
- * @rw_flags: RW and SYNC flags
+ * @op: REQ_OP_READ/REQ_OP_WRITE
+ * @op_flags: rq_flag_bits
* @bio: bio to allocate request for (can be %NULL)
* @gfp_mask: allocation mask
*
@@ -1244,17 +1245,18 @@ rq_starved:
* Returns ERR_PTR on failure, with @q->queue_lock held.
* Returns request pointer on success, with @q->queue_lock *not held*.
*/
-static struct request *get_request(struct request_queue *q, int rw_flags,
- struct bio *bio, gfp_t gfp_mask)
+static struct request *get_request(struct request_queue *q, int op,
+ int op_flags, struct bio *bio,
+ gfp_t gfp_mask)
{
- const bool is_sync = rw_is_sync(rw_flags) != 0;
+ const bool is_sync = rw_is_sync(op, op_flags) != 0;
DEFINE_WAIT(wait);
struct request_list *rl;
struct request *rq;
rl = blk_get_rl(q, bio); /* transferred to @rq on success */
retry:
- rq = __get_request(rl, rw_flags, bio, gfp_mask);
+ rq = __get_request(rl, op, op_flags, bio, gfp_mask);
if (!IS_ERR(rq))
return rq;
@@ -1267,7 +1269,7 @@ retry:
prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
TASK_UNINTERRUPTIBLE);
- trace_block_sleeprq(q, bio, rw_flags & 1);
+ trace_block_sleeprq(q, bio, op);
spin_unlock_irq(q->queue_lock);
io_schedule();
@@ -1296,11 +1298,16 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
create_io_context(gfp_mask, q->node);
spin_lock_irq(q->queue_lock);
- rq = get_request(q, rw, NULL, gfp_mask);
- if (IS_ERR(rq))
+ rq = get_request(q, rw, 0, NULL, gfp_mask);
+ if (IS_ERR(rq)) {
spin_unlock_irq(q->queue_lock);
- /* q->queue_lock is unlocked at this point */
+ return rq;
+ }
+ /* q->queue_lock is unlocked at this point */
+ rq->__data_len = 0;
+ rq->__sector = (sector_t) -1;
+ rq->bio = rq->biotail = NULL;
return rq;
}
@@ -1316,63 +1323,6 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
EXPORT_SYMBOL(blk_get_request);
/**
- * blk_make_request - given a bio, allocate a corresponding struct request.
- * @q: target request queue
- * @bio: The bio describing the memory mappings that will be submitted for IO.
- * It may be a chained-bio properly constructed by block/bio layer.
- * @gfp_mask: gfp flags to be used for memory allocation
- *
- * blk_make_request is the parallel of generic_make_request for BLOCK_PC
- * type commands. Where the struct request needs to be farther initialized by
- * the caller. It is passed a &struct bio, which describes the memory info of
- * the I/O transfer.
- *
- * The caller of blk_make_request must make sure that bi_io_vec
- * are set to describe the memory buffers. That bio_data_dir() will return
- * the needed direction of the request. (And all bio's in the passed bio-chain
- * are properly set accordingly)
- *
- * If called under none-sleepable conditions, mapped bio buffers must not
- * need bouncing, by calling the appropriate masked or flagged allocator,
- * suitable for the target device. Otherwise the call to blk_queue_bounce will
- * BUG.
- *
- * WARNING: When allocating/cloning a bio-chain, careful consideration should be
- * given to how you allocate bios. In particular, you cannot use
- * __GFP_DIRECT_RECLAIM for anything but the first bio in the chain. Otherwise
- * you risk waiting for IO completion of a bio that hasn't been submitted yet,
- * thus resulting in a deadlock. Alternatively bios should be allocated using
- * bio_kmalloc() instead of bio_alloc(), as that avoids the mempool deadlock.
- * If possible a big IO should be split into smaller parts when allocation
- * fails. Partial allocation should not be an error, or you risk a live-lock.
- */
-struct request *blk_make_request(struct request_queue *q, struct bio *bio,
- gfp_t gfp_mask)
-{
- struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask);
-
- if (IS_ERR(rq))
- return rq;
-
- blk_rq_set_block_pc(rq);
-
- for_each_bio(bio) {
- struct bio *bounce_bio = bio;
- int ret;
-
- blk_queue_bounce(q, &bounce_bio);
- ret = blk_rq_append_bio(q, rq, bounce_bio);
- if (unlikely(ret)) {
- blk_put_request(rq);
- return ERR_PTR(ret);
- }
- }
-
- return rq;
-}
-EXPORT_SYMBOL(blk_make_request);
-
-/**
* blk_rq_set_block_pc - initialize a request to type BLOCK_PC
* @rq: request to be initialized
*
@@ -1380,9 +1330,6 @@ EXPORT_SYMBOL(blk_make_request);
void blk_rq_set_block_pc(struct request *rq)
{
rq->cmd_type = REQ_TYPE_BLOCK_PC;
- rq->__data_len = 0;
- rq->__sector = (sector_t) -1;
- rq->bio = rq->biotail = NULL;
memset(rq->__cmd, 0, sizeof(rq->__cmd));
}
EXPORT_SYMBOL(blk_rq_set_block_pc);
@@ -1501,13 +1448,14 @@ void __blk_put_request(struct request_queue *q, struct request *req)
*/
if (req->cmd_flags & REQ_ALLOCED) {
unsigned int flags = req->cmd_flags;
+ int op = req_op(req);
struct request_list *rl = blk_rq_rl(req);
BUG_ON(!list_empty(&req->queuelist));
BUG_ON(ELV_ON_HASH(req));
blk_free_request(rl, req);
- freed_request(rl, flags);
+ freed_request(rl, op, flags);
blk_put_rl(rl);
}
}
@@ -1564,7 +1512,7 @@ EXPORT_SYMBOL_GPL(blk_add_request_payload);
bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
struct bio *bio)
{
- const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
+ const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
if (!ll_back_merge_fn(q, req, bio))
return false;
@@ -1586,7 +1534,7 @@ bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
struct bio *bio)
{
- const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
+ const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
if (!ll_front_merge_fn(q, req, bio))
return false;
@@ -1708,8 +1656,8 @@ void init_request_from_bio(struct request *req, struct bio *bio)
{
req->cmd_type = REQ_TYPE_FS;
- req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK;
- if (bio->bi_rw & REQ_RAHEAD)
+ req->cmd_flags |= bio->bi_opf & REQ_COMMON_MASK;
+ if (bio->bi_opf & REQ_RAHEAD)
req->cmd_flags |= REQ_FAILFAST_MASK;
req->errors = 0;
@@ -1720,12 +1668,12 @@ void init_request_from_bio(struct request *req, struct bio *bio)
static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
{
- const bool sync = !!(bio->bi_rw & REQ_SYNC);
+ const bool sync = !!(bio->bi_opf & REQ_SYNC);
struct blk_plug *plug;
- int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT;
+ int el_ret, rw_flags = 0, where = ELEVATOR_INSERT_SORT;
struct request *req;
unsigned int request_count = 0;
- bool wb_acct;
+ unsigned int wb_acct;
/*
* low level driver can indicate that it wants pages above a
@@ -1742,7 +1690,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
return BLK_QC_T_NONE;
}
- if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) {
+ if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) {
spin_lock_irq(q->queue_lock);
where = ELEVATOR_INSERT_FLUSH;
goto get_rq;
@@ -1778,32 +1726,34 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
}
get_rq:
- wb_acct = wbt_wait(q->rq_wb, bio->bi_rw, q->queue_lock);
+ wb_acct = wbt_wait(q->rq_wb, bio->bi_opf, q->queue_lock);
/*
* This sync check and mask will be re-done in init_request_from_bio(),
* but we need to set it earlier to expose the sync flag to the
* rq allocator and io schedulers.
*/
- rw_flags = bio_data_dir(bio);
if (sync)
rw_flags |= REQ_SYNC;
/*
+ * Add in META/PRIO flags, if set, before we get to the IO scheduler
+ */
+ rw_flags |= (bio->bi_opf & (REQ_META | REQ_PRIO));
+
+ /*
* Grab a free request. This is might sleep but can not fail.
* Returns with the queue unlocked.
*/
- req = get_request(q, rw_flags, bio, GFP_NOIO);
+ req = get_request(q, bio_data_dir(bio), rw_flags, bio, GFP_NOIO);
if (IS_ERR(req)) {
- if (wb_acct)
- __wbt_done(q->rq_wb);
+ __wbt_done(q->rq_wb, wb_acct);
bio->bi_error = PTR_ERR(req);
bio_endio(bio);
goto out_unlock;
}
- if (wb_acct)
- wbt_mark_tracked(&req->wb_stat);
+ wbt_track(&req->wb_stat, wb_acct);
/*
* After dropping the lock and possibly sleeping here, our request
@@ -1867,9 +1817,9 @@ static void handle_bad_sector(struct bio *bio)
char b[BDEVNAME_SIZE];
printk(KERN_INFO "attempt to access beyond end of device\n");
- printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n",
+ printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n",
bdevname(bio->bi_bdev, b),
- bio->bi_rw,
+ bio->bi_opf,
(unsigned long long)bio_end_sector(bio),
(long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
}
@@ -1982,25 +1932,30 @@ generic_make_request_checks(struct bio *bio)
* drivers without flush support don't have to worry
* about them.
*/
- if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
+ if ((bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) &&
!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
- bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA);
+ bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
if (!nr_sectors) {
err = 0;
goto end_io;
}
}
- if ((bio->bi_rw & REQ_DISCARD) &&
- (!blk_queue_discard(q) ||
- ((bio->bi_rw & REQ_SECURE) && !blk_queue_secdiscard(q)))) {
- err = -EOPNOTSUPP;
- goto end_io;
- }
-
- if (bio->bi_rw & REQ_WRITE_SAME && !bdev_write_same(bio->bi_bdev)) {
- err = -EOPNOTSUPP;
- goto end_io;
+ switch (bio_op(bio)) {
+ case REQ_OP_DISCARD:
+ if (!blk_queue_discard(q))
+ goto not_supported;
+ break;
+ case REQ_OP_SECURE_ERASE:
+ if (!blk_queue_secure_erase(q))
+ goto not_supported;
+ break;
+ case REQ_OP_WRITE_SAME:
+ if (!bdev_write_same(bio->bi_bdev))
+ goto not_supported;
+ break;
+ default:
+ break;
}
/*
@@ -2017,6 +1972,8 @@ generic_make_request_checks(struct bio *bio)
trace_block_bio_queue(q, bio);
return true;
+not_supported:
+ err = -EOPNOTSUPP;
end_io:
bio->bi_error = err;
bio_endio(bio);
@@ -2112,7 +2069,6 @@ EXPORT_SYMBOL(generic_make_request);
/**
* submit_bio - submit a bio to the block device layer for I/O
- * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
* @bio: The &struct bio which describes the I/O
*
* submit_bio() is very similar in purpose to generic_make_request(), and
@@ -2120,13 +2076,8 @@ EXPORT_SYMBOL(generic_make_request);
* interfaces; @bio must be presetup and ready for I/O.
*
*/
-blk_qc_t submit_bio(int rw, struct bio *bio)
+blk_qc_t submit_bio(struct bio *bio)
{
- bio->bi_rw |= rw;
-
- if (unlikely(trap_non_toi_io))
- BUG_ON(!bio_flagged(bio, BIO_TOI));
-
/*
* If it's a regular read/write or a barrier with data attached,
* go through the normal accounting stuff before submission.
@@ -2134,12 +2085,12 @@ blk_qc_t submit_bio(int rw, struct bio *bio)
if (bio_has_data(bio)) {
unsigned int count;
- if (unlikely(rw & REQ_WRITE_SAME))
+ if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
count = bdev_logical_block_size(bio->bi_bdev) >> 9;
else
count = bio_sectors(bio);
- if (rw & WRITE) {
+ if (op_is_write(bio_op(bio))) {
count_vm_events(PGPGOUT, count);
} else {
task_io_account_read(bio->bi_iter.bi_size);
@@ -2150,7 +2101,7 @@ blk_qc_t submit_bio(int rw, struct bio *bio)
char b[BDEVNAME_SIZE];
printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
current->comm, task_pid_nr(current),
- (rw & WRITE) ? "WRITE" : "READ",
+ op_is_write(bio_op(bio)) ? "WRITE" : "READ",
(unsigned long long)bio->bi_iter.bi_sector,
bdevname(bio->bi_bdev, b),
count);
@@ -2181,7 +2132,7 @@ EXPORT_SYMBOL(submit_bio);
static int blk_cloned_rq_check_limits(struct request_queue *q,
struct request *rq)
{
- if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) {
+ if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, req_op(rq))) {
printk(KERN_ERR "%s: over max size limit.\n", __func__);
return -EIO;
}
@@ -2237,7 +2188,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
*/
BUG_ON(blk_queued_rq(rq));
- if (rq->cmd_flags & (REQ_FLUSH|REQ_FUA))
+ if (rq->cmd_flags & (REQ_PREFLUSH | REQ_FUA))
where = ELEVATOR_INSERT_FLUSH;
add_acct_request(q, rq, where);
@@ -2282,7 +2233,7 @@ unsigned int blk_rq_err_bytes(const struct request *rq)
* one.
*/
for (bio = rq->bio; bio; bio = bio->bi_next) {
- if ((bio->bi_rw & ff) != ff)
+ if ((bio->bi_opf & ff) != ff)
break;
bytes += bio->bi_iter.bi_size;
}
@@ -2697,7 +2648,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
/* mixed attributes always follow the first bio */
if (req->cmd_flags & REQ_MIXED_MERGE) {
req->cmd_flags &= ~REQ_FAILFAST_MASK;
- req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK;
+ req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
}
/*
@@ -3005,8 +2956,7 @@ EXPORT_SYMBOL_GPL(__blk_end_request_err);
void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
struct bio *bio)
{
- /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */
- rq->cmd_flags |= bio->bi_rw & REQ_WRITE;
+ req_set_op(rq, bio_op(bio));
if (bio_has_data(bio))
rq->nr_phys_segments = bio_phys_segments(q, bio);
@@ -3091,7 +3041,8 @@ EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
static void __blk_rq_prep_clone(struct request *dst, struct request *src)
{
dst->cpu = src->cpu;
- dst->cmd_flags |= (src->cmd_flags & REQ_CLONE_MASK) | REQ_NOMERGE;
+ req_set_op_attrs(dst, req_op(src),
+ (src->cmd_flags & REQ_CLONE_MASK) | REQ_NOMERGE);
dst->cmd_type = src->cmd_type;
dst->__sector = blk_rq_pos(src);
dst->__data_len = blk_rq_bytes(src);
@@ -3336,7 +3287,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
/*
* rq is already accounted, so use raw insert
*/
- if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA))
+ if (rq->cmd_flags & (REQ_PREFLUSH | REQ_FUA))
__elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
else
__elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
@@ -3403,6 +3354,7 @@ bool blk_poll(struct request_queue *q, blk_qc_t cookie)
return false;
}
+EXPORT_SYMBOL_GPL(blk_poll);
#ifdef CONFIG_PM
/**