From 8d91c1e411f55d7ea91b1183a2e9f8088fb4d5be Mon Sep 17 00:00:00 2001 From: André Fabian Silva Delgado Date: Tue, 15 Dec 2015 14:52:16 -0300 Subject: Linux-libre 4.3.2-gnu --- drivers/md/dm-thin.c | 161 ++++++++++++--------------------------------------- 1 file changed, 38 insertions(+), 123 deletions(-) (limited to 'drivers/md/dm-thin.c') diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 493c38e08..3897b90bd 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -332,9 +332,6 @@ struct thin_c { * * Description: * Asynchronously issue a discard request for the sectors in question. - * NOTE: this variant of blk-core's blkdev_issue_discard() is a stop-gap - * that is being kept local to DM thinp until the block changes to allow - * late bio splitting land upstream. */ static int __blkdev_issue_discard_async(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, unsigned long flags, @@ -342,91 +339,36 @@ static int __blkdev_issue_discard_async(struct block_device *bdev, sector_t sect { struct request_queue *q = bdev_get_queue(bdev); int type = REQ_WRITE | REQ_DISCARD; - unsigned int max_discard_sectors, granularity; - int alignment; struct bio *bio; - int ret = 0; - struct blk_plug plug; - if (!q) + if (!q || !nr_sects) return -ENXIO; if (!blk_queue_discard(q)) return -EOPNOTSUPP; - /* Zero-sector (unknown) and one-sector granularities are the same. */ - granularity = max(q->limits.discard_granularity >> 9, 1U); - alignment = (bdev_discard_alignment(bdev) >> 9) % granularity; - - /* - * Ensure that max_discard_sectors is of the proper - * granularity, so that requests stay aligned after a split. - */ - max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); - max_discard_sectors -= max_discard_sectors % granularity; - if (unlikely(!max_discard_sectors)) { - /* Avoid infinite loop below. Being cautious never hurts. */ - return -EOPNOTSUPP; - } - if (flags & BLKDEV_DISCARD_SECURE) { if (!blk_queue_secdiscard(q)) return -EOPNOTSUPP; type |= REQ_SECURE; } - blk_start_plug(&plug); - while (nr_sects) { - unsigned int req_sects; - sector_t end_sect, tmp; - - /* - * Required bio_put occurs in bio_endio thanks to bio_chain below - */ - bio = bio_alloc(gfp_mask, 1); - if (!bio) { - ret = -ENOMEM; - break; - } - - req_sects = min_t(sector_t, nr_sects, max_discard_sectors); - - /* - * If splitting a request, and the next starting sector would be - * misaligned, stop the discard at the previous aligned sector. - */ - end_sect = sector + req_sects; - tmp = end_sect; - if (req_sects < nr_sects && - sector_div(tmp, granularity) != alignment) { - end_sect = end_sect - alignment; - sector_div(end_sect, granularity); - end_sect = end_sect * granularity + alignment; - req_sects = end_sect - sector; - } - - bio_chain(bio, parent_bio); - - bio->bi_iter.bi_sector = sector; - bio->bi_bdev = bdev; + /* + * Required bio_put occurs in bio_endio thanks to bio_chain below + */ + bio = bio_alloc(gfp_mask, 1); + if (!bio) + return -ENOMEM; - bio->bi_iter.bi_size = req_sects << 9; - nr_sects -= req_sects; - sector = end_sect; + bio_chain(bio, parent_bio); - submit_bio(type, bio); + bio->bi_iter.bi_sector = sector; + bio->bi_bdev = bdev; + bio->bi_iter.bi_size = nr_sects << 9; - /* - * We can loop for a long time in here, if someone does - * full device discards (like mkfs). Be nice and allow - * us to schedule out to avoid softlocking if preempt - * is disabled. - */ - cond_resched(); - } - blk_finish_plug(&plug); + submit_bio(type, bio); - return ret; + return 0; } static bool block_size_is_power_of_two(struct pool *pool) @@ -615,8 +557,10 @@ static void error_bio_list(struct bio_list *bios, int error) { struct bio *bio; - while ((bio = bio_list_pop(bios))) - bio_endio(bio, error); + while ((bio = bio_list_pop(bios))) { + bio->bi_error = error; + bio_endio(bio); + } } static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master, int error) @@ -870,14 +814,14 @@ static void copy_complete(int read_err, unsigned long write_err, void *context) complete_mapping_preparation(m); } -static void overwrite_endio(struct bio *bio, int err) +static void overwrite_endio(struct bio *bio) { struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); struct dm_thin_new_mapping *m = h->overwrite_mapping; bio->bi_end_io = m->saved_bi_end_io; - m->err = err; + m->err = bio->bi_error; complete_mapping_preparation(m); } @@ -1002,7 +946,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m) */ if (bio) { inc_remap_and_issue_cell(tc, m->cell, m->data_block); - bio_endio(bio, 0); + bio_endio(bio); } else { inc_all_io_entry(tc->pool, m->cell->holder); remap_and_issue(tc, m->cell->holder, m->data_block); @@ -1032,7 +976,7 @@ static void process_prepared_discard_fail(struct dm_thin_new_mapping *m) static void process_prepared_discard_success(struct dm_thin_new_mapping *m) { - bio_endio(m->bio, 0); + bio_endio(m->bio); free_discard_mapping(m); } @@ -1046,7 +990,7 @@ static void process_prepared_discard_no_passdown(struct dm_thin_new_mapping *m) metadata_operation_failed(tc->pool, "dm_thin_remove_range", r); bio_io_error(m->bio); } else - bio_endio(m->bio, 0); + bio_endio(m->bio); cell_defer_no_holder(tc, m->cell); mempool_free(m, tc->pool->mapping_pool); @@ -1117,7 +1061,8 @@ static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m) * Even if r is set, there could be sub discards in flight that we * need to wait for. */ - bio_endio(m->bio, r); + m->bio->bi_error = r; + bio_endio(m->bio); cell_defer_no_holder(tc, m->cell); mempool_free(m, pool->mapping_pool); } @@ -1493,9 +1438,10 @@ static void handle_unserviceable_bio(struct pool *pool, struct bio *bio) { int error = should_error_unserviceable_bio(pool); - if (error) - bio_endio(bio, error); - else + if (error) { + bio->bi_error = error; + bio_endio(bio); + } else retry_on_resume(bio); } @@ -1539,9 +1485,8 @@ static void process_discard_cell_no_passdown(struct thin_c *tc, } /* - * FIXME: DM local hack to defer parent bios's end_io until we - * _know_ all chained sub range discard bios have completed. - * Will go away once late bio splitting lands upstream! + * __bio_inc_remaining() is used to defer parent bios's end_io until + * we _know_ all chained sub range discard bios have completed. */ static inline void __bio_inc_remaining(struct bio *bio) { @@ -1631,7 +1576,7 @@ static void process_discard_cell_passdown(struct thin_c *tc, struct dm_bio_priso * will prevent completion until the sub range discards have * completed. */ - bio_endio(bio, 0); + bio_endio(bio); } static void process_discard_bio(struct thin_c *tc, struct bio *bio) @@ -1645,7 +1590,7 @@ static void process_discard_bio(struct thin_c *tc, struct bio *bio) /* * The discard covers less than a block. */ - bio_endio(bio, 0); + bio_endio(bio); return; } @@ -1790,7 +1735,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block if (bio_data_dir(bio) == READ) { zero_fill_bio(bio); cell_defer_no_holder(tc, cell); - bio_endio(bio, 0); + bio_endio(bio); return; } @@ -1855,7 +1800,7 @@ static void process_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell) } else { zero_fill_bio(bio); - bio_endio(bio, 0); + bio_endio(bio); } } else provision_block(tc, bio, block, cell); @@ -1926,7 +1871,7 @@ static void __process_bio_read_only(struct thin_c *tc, struct bio *bio, } zero_fill_bio(bio); - bio_endio(bio, 0); + bio_endio(bio); break; default: @@ -1951,7 +1896,7 @@ static void process_cell_read_only(struct thin_c *tc, struct dm_bio_prison_cell static void process_bio_success(struct thin_c *tc, struct bio *bio) { - bio_endio(bio, 0); + bio_endio(bio); } static void process_bio_fail(struct thin_c *tc, struct bio *bio) @@ -2600,7 +2545,8 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio) thin_hook_bio(tc, bio); if (tc->requeue_mode) { - bio_endio(bio, DM_ENDIO_REQUEUE); + bio->bi_error = DM_ENDIO_REQUEUE; + bio_endio(bio); return DM_MAPIO_SUBMITTED; } @@ -3875,20 +3821,6 @@ static int pool_iterate_devices(struct dm_target *ti, return fn(ti, pt->data_dev, 0, ti->len, data); } -static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm, - struct bio_vec *biovec, int max_size) -{ - struct pool_c *pt = ti->private; - struct request_queue *q = bdev_get_queue(pt->data_dev->bdev); - - if (!q->merge_bvec_fn) - return max_size; - - bvm->bi_bdev = pt->data_dev->bdev; - - return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); -} - static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits) { struct pool_c *pt = ti->private; @@ -3965,7 +3897,6 @@ static struct target_type pool_target = { .resume = pool_resume, .message = pool_message, .status = pool_status, - .merge = pool_merge, .iterate_devices = pool_iterate_devices, .io_hints = pool_io_hints, }; @@ -4292,21 +4223,6 @@ err: DMEMIT("Error"); } -static int thin_merge(struct dm_target *ti, struct bvec_merge_data *bvm, - struct bio_vec *biovec, int max_size) -{ - struct thin_c *tc = ti->private; - struct request_queue *q = bdev_get_queue(tc->pool_dev->bdev); - - if (!q->merge_bvec_fn) - return max_size; - - bvm->bi_bdev = tc->pool_dev->bdev; - bvm->bi_sector = dm_target_offset(ti, bvm->bi_sector); - - return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); -} - static int thin_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data) { @@ -4354,7 +4270,6 @@ static struct target_type thin_target = { .presuspend = thin_presuspend, .postsuspend = thin_postsuspend, .status = thin_status, - .merge = thin_merge, .iterate_devices = thin_iterate_devices, .io_hints = thin_io_hints, }; -- cgit v1.2.3-54-g00ecf