diff options
author | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2016-11-11 16:22:27 -0300 |
---|---|---|
committer | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2016-11-11 16:22:27 -0300 |
commit | 29f5b648fa0b31ad614c78468b9279e5fa96397a (patch) | |
tree | 385e76324c4e95e90b16889937e7c24e6f7d4f4a /block | |
parent | 1eae9639aac0f8de4d284f567ec722a822b52513 (diff) |
Linux-libre 4.8.7-gnupck-4.8.7-gnu
Diffstat (limited to 'block')
-rw-r--r-- | block/Kconfig | 1 | ||||
-rw-r--r-- | block/Kconfig.iosched | 11 | ||||
-rw-r--r-- | block/Makefile | 2 | ||||
-rw-r--r-- | block/bfq-cgroup.c | 12 | ||||
-rw-r--r-- | block/bfq-iosched.c | 24 | ||||
-rw-r--r-- | block/bfq.h | 2 | ||||
-rw-r--r-- | block/blk-core.c | 21 | ||||
-rw-r--r-- | block/blk-mq-sysfs.c | 47 | ||||
-rw-r--r-- | block/blk-mq.c | 40 | ||||
-rw-r--r-- | block/blk-mq.h | 3 | ||||
-rw-r--r-- | block/blk-settings.c | 15 | ||||
-rw-r--r-- | block/blk-stat.c | 221 | ||||
-rw-r--r-- | block/blk-stat.h | 18 | ||||
-rw-r--r-- | block/blk-sysfs.c | 151 | ||||
-rw-r--r-- | block/cfq-iosched.c | 13 |
15 files changed, 24 insertions, 557 deletions
diff --git a/block/Kconfig b/block/Kconfig index 6da79e670..161491d0a 100644 --- a/block/Kconfig +++ b/block/Kconfig @@ -4,7 +4,6 @@ menuconfig BLOCK bool "Enable the block layer" if EXPERT default y - select WBT help Provide block layer support for the kernel. diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched index 6d9257924..277b112b8 100644 --- a/block/Kconfig.iosched +++ b/block/Kconfig.iosched @@ -43,13 +43,10 @@ config IOSCHED_BFQ tristate "BFQ I/O scheduler" default n ---help--- - The BFQ I/O scheduler tries to distribute bandwidth among - all processes according to their weights. - It aims at distributing the bandwidth as desired, independently of - the disk parameters and with any workload. It also tries to - guarantee low latency to interactive and soft real-time - applications. If compiled built-in (saying Y here), BFQ can - be configured to support hierarchical scheduling. + The BFQ I/O scheduler distributes bandwidth among all + processes according to their weights, regardless of the + device parameters and with any workload. It also guarantees + a low latency to interactive and soft real-time applications. config BFQ_GROUP_IOSCHED bool "BFQ hierarchical scheduling support" diff --git a/block/Makefile b/block/Makefile index b7aa613b4..4a3668393 100644 --- a/block/Makefile +++ b/block/Makefile @@ -5,7 +5,7 @@ obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-tag.o blk-sysfs.o \ blk-flush.o blk-settings.o blk-ioc.o blk-map.o \ blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \ - blk-lib.o blk-mq.o blk-mq-tag.o blk-stat.o \ + blk-lib.o blk-mq.o blk-mq-tag.o \ blk-mq-sysfs.o blk-mq-cpu.o blk-mq-cpumap.o ioctl.o \ genhd.o scsi_ioctl.o partition-generic.o ioprio.o \ badblocks.o partitions/ diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c index 569988bda..b50ae8ec6 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c @@ -649,18 +649,6 @@ static void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr)) goto out; - /* - * If we have a non-root cgroup, we can depend on that to - * do proper throttling of writes. Turn off wbt for that - * case. - */ - if (bio_blkcg(bio) != &blkcg_root) { - struct request_queue *q = bfqd->queue; - - if (q->rq_wb) - wbt_disable(q->rq_wb); - } - bfqg = __bfq_bic_change_cgroup(bfqd, bic, bio_blkcg(bio)); bic->blkcg_serial_nr = serial_nr; out: diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index ecb949ec6..0f3081dbd 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -1,5 +1,5 @@ /* - * Budget Fair Queueing (BFQ) disk scheduler. + * Budget Fair Queueing (BFQ) I/O scheduler. * * Based on ideas and code from CFQ: * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> @@ -77,19 +77,19 @@ static const u64 bfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 }; /* Maximum backwards seek, in KiB. */ -static const int bfq_back_max = 16 * 1024; +static const int bfq_back_max = (16 * 1024); /* Penalty of a backwards seek, in number of sectors. */ static const int bfq_back_penalty = 2; /* Idling period duration, in ns. */ -static u32 bfq_slice_idle = NSEC_PER_SEC / 125; +static u32 bfq_slice_idle = (NSEC_PER_SEC / 125); /* Minimum number of assigned budgets for which stats are safe to compute. */ static const int bfq_stats_min_budgets = 194; /* Default maximum budget values, in sectors and number of requests. */ -static const int bfq_default_max_budget = 16 * 1024; +static const int bfq_default_max_budget = (16 * 1024); /* * Async to sync throughput distribution is controlled as follows: @@ -99,7 +99,7 @@ static const int bfq_default_max_budget = 16 * 1024; static const int bfq_async_charge_factor = 10; /* Default timeout values, in jiffies, approximating CFQ defaults. */ -static const int bfq_timeout = HZ / 8; +static const int bfq_timeout = (HZ / 8); struct kmem_cache *bfq_pool; @@ -117,7 +117,7 @@ struct kmem_cache *bfq_pool; /* Min number of samples required to perform peak-rate update */ #define BFQ_RATE_MIN_SAMPLES 32 /* Min observation time interval required to perform a peak-rate update (ns) */ -#define BFQ_RATE_MIN_INTERVAL 300*NSEC_PER_MSEC +#define BFQ_RATE_MIN_INTERVAL (300*NSEC_PER_MSEC) /* Target observation time interval for a peak-rate update (ns) */ #define BFQ_RATE_REF_INTERVAL NSEC_PER_SEC @@ -2179,9 +2179,13 @@ static void __bfq_set_in_service_queue(struct bfq_data *bfqd, * not only expires, but also remains with no * request. */ - bfqq->last_wr_start_finish += jiffies - - max_t(unsigned long, bfqq->last_wr_start_finish, - bfqq->budget_timeout); + if (time_after(bfqq->budget_timeout, + bfqq->last_wr_start_finish)) + bfqq->last_wr_start_finish += + jiffies - bfqq->budget_timeout; + else + bfqq->last_wr_start_finish = jiffies; + if (time_is_after_jiffies(bfqq->last_wr_start_finish)) { pr_crit( "BFQ WARNING:last %lu budget %lu jiffies %lu", @@ -5208,7 +5212,7 @@ static struct blkcg_policy blkcg_policy_bfq = { static int __init bfq_init(void) { int ret; - char msg[50] = "BFQ I/O-scheduler: v8r4"; + char msg[50] = "BFQ I/O-scheduler: v8r5"; #ifdef CONFIG_BFQ_GROUP_IOSCHED ret = blkcg_policy_register(&blkcg_policy_bfq); diff --git a/block/bfq.h b/block/bfq.h index ea1e7d852..b80abe0e3 100644 --- a/block/bfq.h +++ b/block/bfq.h @@ -1,5 +1,5 @@ /* - * BFQ-v8r4 for 4.8.0: data structures and common functions prototypes. + * BFQ-v8r5 for 4.8.0: data structures and common functions prototypes. * * Based on ideas and code from CFQ: * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> diff --git a/block/blk-core.c b/block/blk-core.c index cdcb188e8..36c7ac328 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -33,7 +33,6 @@ #include <linux/ratelimit.h> #include <linux/pm_runtime.h> #include <linux/blk-cgroup.h> -#include <linux/wbt.h> #define CREATE_TRACE_POINTS #include <trace/events/block.h> @@ -883,8 +882,6 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn, fail: blk_free_flush_queue(q->fq); - wbt_exit(q->rq_wb); - q->rq_wb = NULL; return NULL; } EXPORT_SYMBOL(blk_init_allocated_queue); @@ -1349,7 +1346,6 @@ void blk_requeue_request(struct request_queue *q, struct request *rq) blk_delete_timer(rq); blk_clear_rq_complete(rq); trace_block_rq_requeue(q, rq); - wbt_requeue(q->rq_wb, &rq->wb_stat); if (rq->cmd_flags & REQ_QUEUED) blk_queue_end_tag(q, rq); @@ -1440,8 +1436,6 @@ void __blk_put_request(struct request_queue *q, struct request *req) /* this is a bio leak */ WARN_ON(req->bio != NULL); - wbt_done(q->rq_wb, &req->wb_stat); - /* * Request may not have originated from ll_rw_blk. if not, * it didn't come out of our reserved rq pools @@ -1673,7 +1667,6 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio) int el_ret, rw_flags = 0, where = ELEVATOR_INSERT_SORT; struct request *req; unsigned int request_count = 0; - unsigned int wb_acct; /* * low level driver can indicate that it wants pages above a @@ -1726,8 +1719,6 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio) } get_rq: - wb_acct = wbt_wait(q->rq_wb, bio->bi_opf, q->queue_lock); - /* * This sync check and mask will be re-done in init_request_from_bio(), * but we need to set it earlier to expose the sync flag to the @@ -1747,14 +1738,11 @@ get_rq: */ req = get_request(q, bio_data_dir(bio), rw_flags, bio, GFP_NOIO); if (IS_ERR(req)) { - __wbt_done(q->rq_wb, wb_acct); bio->bi_error = PTR_ERR(req); bio_endio(bio); goto out_unlock; } - wbt_track(&req->wb_stat, wb_acct); - /* * After dropping the lock and possibly sleeping here, our request * may now be mergeable after it had proven unmergeable (above). @@ -2487,8 +2475,6 @@ void blk_start_request(struct request *req) { blk_dequeue_request(req); - wbt_issue(req->q->rq_wb, &req->wb_stat); - /* * We are now handing the request to the hardware, initialize * resid_len to full count and add the timeout handler. @@ -2556,8 +2542,6 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) trace_block_rq_complete(req->q, req, nr_bytes); - blk_stat_add(&req->q->rq_stats[rq_data_dir(req)], req); - if (!req->bio) return false; @@ -2725,10 +2709,9 @@ void blk_finish_request(struct request *req, int error) blk_account_io_done(req); - if (req->end_io) { - wbt_done(req->q->rq_wb, &req->wb_stat); + if (req->end_io) req->end_io(req, error); - } else { + else { if (blk_bidi_rq(req)) __blk_put_request(req->next_rq->q, req->next_rq); diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c index b66bbf13c..fe822aa5b 100644 --- a/block/blk-mq-sysfs.c +++ b/block/blk-mq-sysfs.c @@ -247,47 +247,6 @@ static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page) return ret; } -static void blk_mq_stat_clear(struct blk_mq_hw_ctx *hctx) -{ - struct blk_mq_ctx *ctx; - unsigned int i; - - hctx_for_each_ctx(hctx, ctx, i) { - blk_stat_init(&ctx->stat[0]); - blk_stat_init(&ctx->stat[1]); - } -} - -static ssize_t blk_mq_hw_sysfs_stat_store(struct blk_mq_hw_ctx *hctx, - const char *page, size_t count) -{ - blk_mq_stat_clear(hctx); - return count; -} - -static ssize_t print_stat(char *page, struct blk_rq_stat *stat, const char *pre) -{ - return sprintf(page, "%s samples=%llu, mean=%lld, min=%lld, max=%lld\n", - pre, (long long) stat->nr_samples, - (long long) stat->mean, (long long) stat->min, - (long long) stat->max); -} - -static ssize_t blk_mq_hw_sysfs_stat_show(struct blk_mq_hw_ctx *hctx, char *page) -{ - struct blk_rq_stat stat[2]; - ssize_t ret; - - blk_stat_init(&stat[0]); - blk_stat_init(&stat[1]); - - blk_hctx_stat_get(hctx, stat); - - ret = print_stat(page, &stat[0], "read :"); - ret += print_stat(page + ret, &stat[1], "write:"); - return ret; -} - static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched = { .attr = {.name = "dispatched", .mode = S_IRUGO }, .show = blk_mq_sysfs_dispatched_show, @@ -345,11 +304,6 @@ static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_poll = { .attr = {.name = "io_poll", .mode = S_IRUGO }, .show = blk_mq_hw_sysfs_poll_show, }; -static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_stat = { - .attr = {.name = "stats", .mode = S_IRUGO | S_IWUSR }, - .show = blk_mq_hw_sysfs_stat_show, - .store = blk_mq_hw_sysfs_stat_store, -}; static struct attribute *default_hw_ctx_attrs[] = { &blk_mq_hw_sysfs_queued.attr, @@ -360,7 +314,6 @@ static struct attribute *default_hw_ctx_attrs[] = { &blk_mq_hw_sysfs_cpus.attr, &blk_mq_hw_sysfs_active.attr, &blk_mq_hw_sysfs_poll.attr, - &blk_mq_hw_sysfs_stat.attr, NULL, }; diff --git a/block/blk-mq.c b/block/blk-mq.c index 815e2ac02..c207fa987 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -22,7 +22,6 @@ #include <linux/sched/sysctl.h> #include <linux/delay.h> #include <linux/crash_dump.h> -#include <linux/wbt.h> #include <trace/events/block.h> @@ -30,7 +29,6 @@ #include "blk.h" #include "blk-mq.h" #include "blk-mq-tag.h" -#include "blk-stat.h" static DEFINE_MUTEX(all_q_mutex); static LIST_HEAD(all_q_list); @@ -332,8 +330,6 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx, if (rq->cmd_flags & REQ_MQ_INFLIGHT) atomic_dec(&hctx->nr_active); - - wbt_done(q->rq_wb, &rq->wb_stat); rq->cmd_flags = 0; clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags); @@ -366,7 +362,6 @@ inline void __blk_mq_end_request(struct request *rq, int error) blk_account_io_done(rq); if (rq->end_io) { - wbt_done(rq->q->rq_wb, &rq->wb_stat); rq->end_io(rq, error); } else { if (unlikely(blk_bidi_rq(rq))) @@ -417,19 +412,10 @@ static void blk_mq_ipi_complete_request(struct request *rq) put_cpu(); } -static void blk_mq_stat_add(struct request *rq) -{ - struct blk_rq_stat *stat = &rq->mq_ctx->stat[rq_data_dir(rq)]; - - blk_stat_add(stat, rq); -} - static void __blk_mq_complete_request(struct request *rq) { struct request_queue *q = rq->q; - blk_mq_stat_add(rq); - if (!q->softirq_done_fn) blk_mq_end_request(rq, rq->errors); else @@ -473,8 +459,6 @@ void blk_mq_start_request(struct request *rq) if (unlikely(blk_bidi_rq(rq))) rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq); - wbt_issue(q->rq_wb, &rq->wb_stat); - blk_add_timer(rq); /* @@ -510,7 +494,6 @@ static void __blk_mq_requeue_request(struct request *rq) struct request_queue *q = rq->q; trace_block_rq_requeue(q, rq); - wbt_requeue(q->rq_wb, &rq->wb_stat); if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) { if (q->dma_drain_size && blk_rq_bytes(rq)) @@ -1329,7 +1312,6 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) struct blk_plug *plug; struct request *same_queue_rq = NULL; blk_qc_t cookie; - unsigned int wb_acct; blk_queue_bounce(q, &bio); @@ -1344,15 +1326,9 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq)) return BLK_QC_T_NONE; - wb_acct = wbt_wait(q->rq_wb, bio->bi_opf, NULL); - rq = blk_mq_map_request(q, bio, &data); - if (unlikely(!rq)) { - __wbt_done(q->rq_wb, wb_acct); + if (unlikely(!rq)) return BLK_QC_T_NONE; - } - - wbt_track(&rq->wb_stat, wb_acct); cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num); @@ -1429,7 +1405,6 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio) struct blk_map_ctx data; struct request *rq; blk_qc_t cookie; - unsigned int wb_acct; blk_queue_bounce(q, &bio); @@ -1446,15 +1421,9 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio) } else request_count = blk_plug_queued_count(q); - wb_acct = wbt_wait(q->rq_wb, bio->bi_opf, NULL); - rq = blk_mq_map_request(q, bio, &data); - if (unlikely(!rq)) { - __wbt_done(q->rq_wb, wb_acct); + if (unlikely(!rq)) return BLK_QC_T_NONE; - } - - wbt_track(&rq->wb_stat, wb_acct); cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num); @@ -1838,8 +1807,6 @@ static void blk_mq_init_cpu_queues(struct request_queue *q, spin_lock_init(&__ctx->lock); INIT_LIST_HEAD(&__ctx->rq_list); __ctx->queue = q; - blk_stat_init(&__ctx->stat[0]); - blk_stat_init(&__ctx->stat[1]); /* If the cpu isn't online, the cpu is mapped to first hctx */ if (!cpu_online(i)) @@ -2178,9 +2145,6 @@ void blk_mq_free_queue(struct request_queue *q) list_del_init(&q->all_q_node); mutex_unlock(&all_q_mutex); - wbt_exit(q->rq_wb); - q->rq_wb = NULL; - blk_mq_del_queue_tag_set(q); blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); diff --git a/block/blk-mq.h b/block/blk-mq.h index e107f700f..9087b1103 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -1,8 +1,6 @@ #ifndef INT_BLK_MQ_H #define INT_BLK_MQ_H -#include "blk-stat.h" - struct blk_mq_tag_set; struct blk_mq_ctx { @@ -22,7 +20,6 @@ struct blk_mq_ctx { /* incremented at completion time */ unsigned long ____cacheline_aligned_in_smp rq_completed[2]; - struct blk_rq_stat stat[2]; struct request_queue *queue; struct kobject kobj; diff --git a/block/blk-settings.c b/block/blk-settings.c index 746dc9fee..f679ae122 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -832,19 +832,6 @@ void blk_queue_flush_queueable(struct request_queue *q, bool queueable) EXPORT_SYMBOL_GPL(blk_queue_flush_queueable); /** - * blk_set_queue_depth - tell the block layer about the device queue depth - * @q: the request queue for the device - * @depth: queue depth - * - */ -void blk_set_queue_depth(struct request_queue *q, unsigned int depth) -{ - q->queue_depth = depth; - wbt_set_queue_depth(q->rq_wb, depth); -} -EXPORT_SYMBOL(blk_set_queue_depth); - -/** * blk_queue_write_cache - configure queue's write cache * @q: the request queue for the device * @wc: write back cache on or off @@ -864,8 +851,6 @@ void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua) else queue_flag_clear(QUEUE_FLAG_FUA, q); spin_unlock_irq(q->queue_lock); - - wbt_set_write_cache(q->rq_wb, test_bit(QUEUE_FLAG_WC, &q->queue_flags)); } EXPORT_SYMBOL_GPL(blk_queue_write_cache); diff --git a/block/blk-stat.c b/block/blk-stat.c deleted file mode 100644 index bdb16d84b..000000000 --- a/block/blk-stat.c +++ /dev/null @@ -1,221 +0,0 @@ -/* - * Block stat tracking code - * - * Copyright (C) 2016 Jens Axboe - */ -#include <linux/kernel.h> -#include <linux/blk-mq.h> - -#include "blk-stat.h" -#include "blk-mq.h" - -static void blk_stat_flush_batch(struct blk_rq_stat *stat) -{ - if (!stat->nr_batch) - return; - if (!stat->nr_samples) - stat->mean = div64_s64(stat->batch, stat->nr_batch); - else { - stat->mean = div64_s64((stat->mean * stat->nr_samples) + - stat->batch, - stat->nr_samples + stat->nr_batch); - } - - stat->nr_samples += stat->nr_batch; - stat->nr_batch = stat->batch = 0; -} - -void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src) -{ - if (!src->nr_samples) - return; - - blk_stat_flush_batch(src); - - dst->min = min(dst->min, src->min); - dst->max = max(dst->max, src->max); - - if (!dst->nr_samples) - dst->mean = src->mean; - else { - dst->mean = div64_s64((src->mean * src->nr_samples) + - (dst->mean * dst->nr_samples), - dst->nr_samples + src->nr_samples); - } - dst->nr_samples += src->nr_samples; -} - -static void blk_mq_stat_get(struct request_queue *q, struct blk_rq_stat *dst) -{ - struct blk_mq_hw_ctx *hctx; - struct blk_mq_ctx *ctx; - uint64_t latest = 0; - int i, j, nr; - - blk_stat_init(&dst[0]); - blk_stat_init(&dst[1]); - - nr = 0; - do { - uint64_t newest = 0; - - queue_for_each_hw_ctx(q, hctx, i) { - hctx_for_each_ctx(hctx, ctx, j) { - if (!ctx->stat[0].nr_samples && - !ctx->stat[1].nr_samples) - continue; - if (ctx->stat[0].time > newest) - newest = ctx->stat[0].time; - if (ctx->stat[1].time > newest) - newest = ctx->stat[1].time; - } - } - - /* - * No samples - */ - if (!newest) - break; - - if (newest > latest) - latest = newest; - - queue_for_each_hw_ctx(q, hctx, i) { - hctx_for_each_ctx(hctx, ctx, j) { - if (ctx->stat[0].time == newest) { - blk_stat_sum(&dst[0], &ctx->stat[0]); - nr++; - } - if (ctx->stat[1].time == newest) { - blk_stat_sum(&dst[1], &ctx->stat[1]); - nr++; - } - } - } - /* - * If we race on finding an entry, just loop back again. - * Should be very rare. - */ - } while (!nr); - - dst[0].time = dst[1].time = latest; -} - -void blk_queue_stat_get(struct request_queue *q, struct blk_rq_stat *dst) -{ - if (q->mq_ops) - blk_mq_stat_get(q, dst); - else { - memcpy(&dst[0], &q->rq_stats[0], sizeof(struct blk_rq_stat)); - memcpy(&dst[1], &q->rq_stats[1], sizeof(struct blk_rq_stat)); - } -} - -void blk_hctx_stat_get(struct blk_mq_hw_ctx *hctx, struct blk_rq_stat *dst) -{ - struct blk_mq_ctx *ctx; - unsigned int i, nr; - - nr = 0; - do { - uint64_t newest = 0; - - hctx_for_each_ctx(hctx, ctx, i) { - if (!ctx->stat[0].nr_samples && - !ctx->stat[1].nr_samples) - continue; - - if (ctx->stat[0].time > newest) - newest = ctx->stat[0].time; - if (ctx->stat[1].time > newest) - newest = ctx->stat[1].time; - } - - if (!newest) - break; - - hctx_for_each_ctx(hctx, ctx, i) { - if (ctx->stat[0].time == newest) { - blk_stat_sum(&dst[0], &ctx->stat[0]); - nr++; - } - if (ctx->stat[1].time == newest) { - blk_stat_sum(&dst[1], &ctx->stat[1]); - nr++; - } - } - /* - * If we race on finding an entry, just loop back again. - * Should be very rare, as the window is only updated - * occasionally - */ - } while (!nr); -} - -static void __blk_stat_init(struct blk_rq_stat *stat, s64 time_now) -{ - stat->min = -1ULL; - stat->max = stat->nr_samples = stat->mean = 0; - stat->batch = stat->nr_batch = 0; - stat->time = time_now & BLK_STAT_MASK; -} - -void blk_stat_init(struct blk_rq_stat *stat) -{ - __blk_stat_init(stat, ktime_to_ns(ktime_get())); -} - -static bool __blk_stat_is_current(struct blk_rq_stat *stat, s64 now) -{ - return (now & BLK_STAT_MASK) == (stat->time & BLK_STAT_MASK); -} - -bool blk_stat_is_current(struct blk_rq_stat *stat) -{ - return __blk_stat_is_current(stat, ktime_to_ns(ktime_get())); -} - -void blk_stat_add(struct blk_rq_stat *stat, struct request *rq) -{ - s64 now, value; - u64 rq_time = wbt_issue_stat_get_time(&rq->wb_stat); - - now = ktime_to_ns(ktime_get()); - if (now < rq_time) - return; - - if (!__blk_stat_is_current(stat, now)) - __blk_stat_init(stat, now); - - value = now - rq_time; - if (value > stat->max) - stat->max = value; - if (value < stat->min) - stat->min = value; - - if (stat->batch + value < stat->batch || - stat->nr_batch + 1 == BLK_RQ_STAT_BATCH) - blk_stat_flush_batch(stat); - - stat->batch += value; - stat->nr_batch++; -} - -void blk_stat_clear(struct request_queue *q) -{ - if (q->mq_ops) { - struct blk_mq_hw_ctx *hctx; - struct blk_mq_ctx *ctx; - int i, j; - - queue_for_each_hw_ctx(q, hctx, i) { - hctx_for_each_ctx(hctx, ctx, j) { - blk_stat_init(&ctx->stat[0]); - blk_stat_init(&ctx->stat[1]); - } - } - } else { - blk_stat_init(&q->rq_stats[0]); - blk_stat_init(&q->rq_stats[1]); - } -} diff --git a/block/blk-stat.h b/block/blk-stat.h deleted file mode 100644 index 376a6ccd9..000000000 --- a/block/blk-stat.h +++ /dev/null @@ -1,18 +0,0 @@ -#ifndef BLK_STAT_H -#define BLK_STAT_H - -/* - * ~0.13s window as a power-of-2 (2^27 nsecs) - */ -#define BLK_STAT_NSEC 134217728ULL -#define BLK_STAT_MASK ~(BLK_STAT_NSEC - 1) - -void blk_stat_add(struct blk_rq_stat *, struct request *); -void blk_hctx_stat_get(struct blk_mq_hw_ctx *, struct blk_rq_stat *); -void blk_queue_stat_get(struct request_queue *, struct blk_rq_stat *); -void blk_stat_clear(struct request_queue *q); -void blk_stat_init(struct blk_rq_stat *); -void blk_stat_sum(struct blk_rq_stat *, struct blk_rq_stat *); -bool blk_stat_is_current(struct blk_rq_stat *); - -#endif diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 85c3dc223..f87a7e747 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -10,7 +10,6 @@ #include <linux/blktrace_api.h> #include <linux/blk-mq.h> #include <linux/blk-cgroup.h> -#include <linux/wbt.h> #include "blk.h" #include "blk-mq.h" @@ -42,19 +41,6 @@ queue_var_store(unsigned long *var, const char *page, size_t count) return count; } -static ssize_t queue_var_store64(u64 *var, const char *page) -{ - int err; - u64 v; - - err = kstrtou64(page, 10, &v); - if (err < 0) - return err; - - *var = v; - return 0; -} - static ssize_t queue_requests_show(struct request_queue *q, char *page) { return queue_var_show(q->nr_requests, (page)); @@ -361,58 +347,6 @@ static ssize_t queue_poll_store(struct request_queue *q, const char *page, return ret; } -static ssize_t queue_wb_win_show(struct request_queue *q, char *page) -{ - if (!q->rq_wb) - return -EINVAL; - - return sprintf(page, "%llu\n", div_u64(q->rq_wb->win_nsec, 1000)); -} - -static ssize_t queue_wb_win_store(struct request_queue *q, const char *page, - size_t count) -{ - ssize_t ret; - u64 val; - - if (!q->rq_wb) - return -EINVAL; - - ret = queue_var_store64(&val, page); - if (ret < 0) - return ret; - - q->rq_wb->win_nsec = val * 1000ULL; - wbt_update_limits(q->rq_wb); - return count; -} - -static ssize_t queue_wb_lat_show(struct request_queue *q, char *page) -{ - if (!q->rq_wb) - return -EINVAL; - - return sprintf(page, "%llu\n", div_u64(q->rq_wb->min_lat_nsec, 1000)); -} - -static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page, - size_t count) -{ - ssize_t ret; - u64 val; - - if (!q->rq_wb) - return -EINVAL; - - ret = queue_var_store64(&val, page); - if (ret < 0) - return ret; - - q->rq_wb->min_lat_nsec = val * 1000ULL; - wbt_update_limits(q->rq_wb); - return count; -} - static ssize_t queue_wc_show(struct request_queue *q, char *page) { if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) @@ -450,26 +384,6 @@ static ssize_t queue_dax_show(struct request_queue *q, char *page) return queue_var_show(blk_queue_dax(q), page); } -static ssize_t print_stat(char *page, struct blk_rq_stat *stat, const char *pre) -{ - return sprintf(page, "%s samples=%llu, mean=%lld, min=%lld, max=%lld\n", - pre, (long long) stat->nr_samples, - (long long) stat->mean, (long long) stat->min, - (long long) stat->max); -} - -static ssize_t queue_stats_show(struct request_queue *q, char *page) -{ - struct blk_rq_stat stat[2]; - ssize_t ret; - - blk_queue_stat_get(q, stat); - - ret = print_stat(page, &stat[0], "read :"); - ret += print_stat(page + ret, &stat[1], "write:"); - return ret; -} - static struct queue_sysfs_entry queue_requests_entry = { .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, .show = queue_requests_show, @@ -612,23 +526,6 @@ static struct queue_sysfs_entry queue_dax_entry = { .show = queue_dax_show, }; -static struct queue_sysfs_entry queue_stats_entry = { - .attr = {.name = "stats", .mode = S_IRUGO }, - .show = queue_stats_show, -}; - -static struct queue_sysfs_entry queue_wb_lat_entry = { - .attr = {.name = "wbt_lat_usec", .mode = S_IRUGO | S_IWUSR }, - .show = queue_wb_lat_show, - .store = queue_wb_lat_store, -}; - -static struct queue_sysfs_entry queue_wb_win_entry = { - .attr = {.name = "wbt_window_usec", .mode = S_IRUGO | S_IWUSR }, - .show = queue_wb_win_show, - .store = queue_wb_win_store, -}; - static struct attribute *default_attrs[] = { &queue_requests_entry.attr, &queue_ra_entry.attr, @@ -656,9 +553,6 @@ static struct attribute *default_attrs[] = { &queue_poll_entry.attr, &queue_wc_entry.attr, &queue_dax_entry.attr, - &queue_stats_entry.attr, - &queue_wb_lat_entry.attr, - &queue_wb_win_entry.attr, NULL, }; @@ -773,49 +667,6 @@ struct kobj_type blk_queue_ktype = { .release = blk_release_queue, }; -static void blk_wb_stat_get(void *data, struct blk_rq_stat *stat) -{ - blk_queue_stat_get(data, stat); -} - -static void blk_wb_stat_clear(void *data) -{ - blk_stat_clear(data); -} - -static bool blk_wb_stat_is_current(struct blk_rq_stat *stat) -{ - return blk_stat_is_current(stat); -} - -static struct wb_stat_ops wb_stat_ops = { - .get = blk_wb_stat_get, - .is_current = blk_wb_stat_is_current, - .clear = blk_wb_stat_clear, -}; - -static void blk_wb_init(struct request_queue *q) -{ - struct rq_wb *rwb; - - rwb = wbt_init(&q->backing_dev_info, &wb_stat_ops, q); - - /* - * If this fails, we don't get throttling - */ - if (IS_ERR(rwb)) - return; - - if (blk_queue_nonrot(q)) - rwb->min_lat_nsec = 2000000ULL; - else - rwb->min_lat_nsec = 75000000ULL; - - wbt_set_queue_depth(rwb, blk_queue_depth(q)); - wbt_set_write_cache(rwb, test_bit(QUEUE_FLAG_WC, &q->queue_flags)); - q->rq_wb = rwb; -} - int blk_register_queue(struct gendisk *disk) { int ret; @@ -855,8 +706,6 @@ int blk_register_queue(struct gendisk *disk) if (q->mq_ops) blk_mq_register_disk(disk); - blk_wb_init(q); - if (!q->request_fn) return 0; diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index f336dcbed..5e24d8803 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -3771,11 +3771,9 @@ static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) struct cfq_data *cfqd = cic_to_cfqd(cic); struct cfq_queue *cfqq; uint64_t serial_nr; - bool nonroot_cg; rcu_read_lock(); serial_nr = bio_blkcg(bio)->css.serial_nr; - nonroot_cg = bio_blkcg(bio) != &blkcg_root; rcu_read_unlock(); /* @@ -3786,17 +3784,6 @@ static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) return; /* - * If we have a non-root cgroup, we can depend on that to - * do proper throttling of writes. Turn off wbt for that - * case. - */ - if (nonroot_cg) { - struct request_queue *q = cfqd->queue; - - wbt_disable(q->rq_wb); - } - - /* * Drop reference to queues. New queues will be assigned in new * group upon arrival of fresh requests. */ |