summaryrefslogtreecommitdiff
path: root/block/bio.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/bio.c')
-rw-r--r--block/bio.c74
1 files changed, 37 insertions, 37 deletions
diff --git a/block/bio.c b/block/bio.c
index 462386908..aa7354088 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -43,7 +43,7 @@
* unsigned short
*/
#define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) }
-static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
+static struct biovec_slab bvec_slabs[BVEC_POOL_NR] __read_mostly = {
BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES),
};
#undef BV
@@ -160,11 +160,15 @@ unsigned int bvec_nr_vecs(unsigned short idx)
void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
{
- BIO_BUG_ON(idx >= BIOVEC_NR_POOLS);
+ if (!idx)
+ return;
+ idx--;
+
+ BIO_BUG_ON(idx >= BVEC_POOL_NR);
- if (idx == BIOVEC_MAX_IDX)
+ if (idx == BVEC_POOL_MAX) {
mempool_free(bv, pool);
- else {
+ } else {
struct biovec_slab *bvs = bvec_slabs + idx;
kmem_cache_free(bvs->slab, bv);
@@ -206,7 +210,7 @@ struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx,
* idx now points to the pool we want to allocate from. only the
* 1-vec entry pool is mempool backed.
*/
- if (*idx == BIOVEC_MAX_IDX) {
+ if (*idx == BVEC_POOL_MAX) {
fallback:
bvl = mempool_alloc(pool, gfp_mask);
} else {
@@ -226,11 +230,12 @@ fallback:
*/
bvl = kmem_cache_alloc(bvs->slab, __gfp_mask);
if (unlikely(!bvl && (gfp_mask & __GFP_DIRECT_RECLAIM))) {
- *idx = BIOVEC_MAX_IDX;
+ *idx = BVEC_POOL_MAX;
goto fallback;
}
}
+ (*idx)++;
return bvl;
}
@@ -250,8 +255,7 @@ static void bio_free(struct bio *bio)
__bio_free(bio);
if (bs) {
- if (bio_flagged(bio, BIO_OWNS_VEC))
- bvec_free(bs->bvec_pool, bio->bi_io_vec, BIO_POOL_IDX(bio));
+ bvec_free(bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio));
/*
* If we have front padding, adjust the bio pointer before freeing
@@ -420,7 +424,6 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
gfp_t saved_gfp = gfp_mask;
unsigned front_pad;
unsigned inline_vecs;
- unsigned long idx = BIO_POOL_NONE;
struct bio_vec *bvl = NULL;
struct bio *bio;
void *p;
@@ -480,6 +483,8 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
bio_init(bio);
if (nr_iovecs > inline_vecs) {
+ unsigned long idx = 0;
+
bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool);
if (!bvl && gfp_mask != saved_gfp) {
punt_bios_to_rescuer(bs);
@@ -490,13 +495,12 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
if (unlikely(!bvl))
goto err_free;
- bio_set_flag(bio, BIO_OWNS_VEC);
+ bio->bi_flags |= idx << BVEC_POOL_OFFSET;
} else if (nr_iovecs) {
bvl = bio->bi_inline_vecs;
}
bio->bi_pool = bs;
- bio->bi_flags |= idx << BIO_POOL_OFFSET;
bio->bi_max_vecs = nr_iovecs;
bio->bi_io_vec = bvl;
return bio;
@@ -568,7 +572,7 @@ EXPORT_SYMBOL(bio_phys_segments);
*/
void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
{
- BUG_ON(bio->bi_pool && BIO_POOL_IDX(bio) != BIO_POOL_NONE);
+ BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio));
/*
* most users will be overriding ->bi_bdev with a new target,
@@ -576,7 +580,7 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
*/
bio->bi_bdev = bio_src->bi_bdev;
bio_set_flag(bio, BIO_CLONED);
- bio->bi_rw = bio_src->bi_rw;
+ bio->bi_opf = bio_src->bi_opf;
bio->bi_iter = bio_src->bi_iter;
bio->bi_io_vec = bio_src->bi_io_vec;
@@ -658,24 +662,24 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs);
if (!bio)
return NULL;
-
bio->bi_bdev = bio_src->bi_bdev;
- bio->bi_rw = bio_src->bi_rw;
+ bio->bi_opf = bio_src->bi_opf;
bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
- if (bio->bi_rw & REQ_DISCARD)
- goto integrity_clone;
-
- if (bio->bi_rw & REQ_WRITE_SAME) {
+ switch (bio_op(bio)) {
+ case REQ_OP_DISCARD:
+ case REQ_OP_SECURE_ERASE:
+ break;
+ case REQ_OP_WRITE_SAME:
bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
- goto integrity_clone;
+ break;
+ default:
+ bio_for_each_segment(bv, bio_src, iter)
+ bio->bi_io_vec[bio->bi_vcnt++] = bv;
+ break;
}
- bio_for_each_segment(bv, bio_src, iter)
- bio->bi_io_vec[bio->bi_vcnt++] = bv;
-
-integrity_clone:
if (bio_integrity(bio_src)) {
int ret;
@@ -858,21 +862,20 @@ static void submit_bio_wait_endio(struct bio *bio)
/**
* submit_bio_wait - submit a bio, and wait until it completes
- * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
* @bio: The &struct bio which describes the I/O
*
* Simple wrapper around submit_bio(). Returns 0 on success, or the error from
* bio_endio() on failure.
*/
-int submit_bio_wait(int rw, struct bio *bio)
+int submit_bio_wait(struct bio *bio)
{
struct submit_bio_ret ret;
- rw |= REQ_SYNC;
init_completion(&ret.event);
bio->bi_private = &ret;
bio->bi_end_io = submit_bio_wait_endio;
- submit_bio(rw, bio);
+ bio->bi_opf |= REQ_SYNC;
+ submit_bio(bio);
wait_for_completion_io(&ret.event);
return ret.error;
@@ -1103,7 +1106,6 @@ int bio_uncopy_user(struct bio *bio)
bio_put(bio);
return ret;
}
-EXPORT_SYMBOL(bio_uncopy_user);
/**
* bio_copy_user_iov - copy user data to bio
@@ -1171,7 +1173,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
goto out_bmd;
if (iter->type & WRITE)
- bio->bi_rw |= REQ_WRITE;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
ret = 0;
@@ -1341,7 +1343,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
* set data direction, and check if mapped pages need bouncing
*/
if (iter->type & WRITE)
- bio->bi_rw |= REQ_WRITE;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
bio_set_flag(bio, BIO_USER_MAPPED);
@@ -1398,7 +1400,6 @@ void bio_unmap_user(struct bio *bio)
__bio_unmap_user(bio);
bio_put(bio);
}
-EXPORT_SYMBOL(bio_unmap_user);
static void bio_map_kern_endio(struct bio *bio)
{
@@ -1534,7 +1535,7 @@ struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
bio->bi_private = data;
} else {
bio->bi_end_io = bio_copy_kern_endio;
- bio->bi_rw |= REQ_WRITE;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
}
return bio;
@@ -1544,7 +1545,6 @@ cleanup:
bio_put(bio);
return ERR_PTR(-ENOMEM);
}
-EXPORT_SYMBOL(bio_copy_kern);
/*
* bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
@@ -1789,7 +1789,7 @@ struct bio *bio_split(struct bio *bio, int sectors,
* Discards need a mutable bio_vec to accommodate the payload
* required by the DSM TRIM and UNMAP commands.
*/
- if (bio->bi_rw & REQ_DISCARD)
+ if (bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_SECURE_ERASE)
split = bio_clone_bioset(bio, gfp, bs);
else
split = bio_clone_fast(bio, gfp, bs);
@@ -1838,7 +1838,7 @@ EXPORT_SYMBOL_GPL(bio_trim);
*/
mempool_t *biovec_create_pool(int pool_entries)
{
- struct biovec_slab *bp = bvec_slabs + BIOVEC_MAX_IDX;
+ struct biovec_slab *bp = bvec_slabs + BVEC_POOL_MAX;
return mempool_create_slab_pool(pool_entries, bp->slab);
}
@@ -2026,7 +2026,7 @@ static void __init biovec_init_slabs(void)
{
int i;
- for (i = 0; i < BIOVEC_NR_POOLS; i++) {
+ for (i = 0; i < BVEC_POOL_NR; i++) {
int size;
struct biovec_slab *bvs = bvec_slabs + i;