From 57f0f512b273f60d52568b8c6b77e17f5636edc0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Fabian=20Silva=20Delgado?= Date: Wed, 5 Aug 2015 17:04:01 -0300 Subject: Initial import --- block/Kconfig | 127 ++ block/Kconfig.iosched | 100 + block/Makefile | 26 + block/bfq-cgroup.c | 936 +++++++++ block/bfq-ioc.c | 36 + block/bfq-iosched.c | 4218 +++++++++++++++++++++++++++++++++++++++ block/bfq-sched.c | 1180 +++++++++++ block/bfq.h | 807 ++++++++ block/bio-integrity.c | 517 +++++ block/bio.c | 2079 ++++++++++++++++++++ block/blk-cgroup.c | 1160 +++++++++++ block/blk-cgroup.h | 603 ++++++ block/blk-core.c | 3371 ++++++++++++++++++++++++++++++++ block/blk-exec.c | 143 ++ block/blk-flush.c | 529 +++++ block/blk-integrity.c | 483 +++++ block/blk-ioc.c | 407 ++++ block/blk-iopoll.c | 224 +++ block/blk-lib.c | 317 +++ block/blk-map.c | 220 +++ block/blk-merge.c | 610 ++++++ block/blk-mq-cpu.c | 67 + block/blk-mq-cpumap.c | 119 ++ block/blk-mq-sysfs.c | 462 +++++ block/blk-mq-tag.c | 666 +++++++ block/blk-mq-tag.h | 91 + block/blk-mq.c | 2286 ++++++++++++++++++++++ block/blk-mq.h | 126 ++ block/blk-settings.c | 861 ++++++++ block/blk-softirq.c | 186 ++ block/blk-sysfs.c | 612 ++++++ block/blk-tag.c | 402 ++++ block/blk-throttle.c | 1699 ++++++++++++++++ block/blk-timeout.c | 235 +++ block/blk.h | 284 +++ block/bounce.c | 290 +++ block/bsg-lib.c | 232 +++ block/bsg.c | 1097 +++++++++++ block/cfq-iosched.c | 4671 ++++++++++++++++++++++++++++++++++++++++++++ block/cmdline-parser.c | 254 +++ block/compat_ioctl.c | 756 +++++++ block/deadline-iosched.c | 476 +++++ block/elevator.c | 1047 ++++++++++ block/genhd.c | 1926 ++++++++++++++++++ block/ioctl.c | 430 ++++ block/ioprio.c | 243 +++ block/noop-iosched.c | 124 ++ block/partition-generic.c | 571 ++++++ block/partitions/Kconfig | 269 +++ block/partitions/Makefile | 22 + block/partitions/acorn.c | 556 ++++++ block/partitions/acorn.h | 14 + block/partitions/aix.c | 293 +++ block/partitions/aix.h | 1 + block/partitions/amiga.c | 141 ++ block/partitions/amiga.h | 6 + block/partitions/atari.c | 149 ++ block/partitions/atari.h | 36 + block/partitions/check.c | 197 ++ block/partitions/check.h | 54 + block/partitions/cmdline.c | 99 + block/partitions/cmdline.h | 2 + block/partitions/efi.c | 737 +++++++ block/partitions/efi.h | 133 ++ block/partitions/ibm.c | 364 ++++ block/partitions/ibm.h | 1 + block/partitions/karma.c | 58 + block/partitions/karma.h | 8 + block/partitions/ldm.c | 1567 +++++++++++++++ block/partitions/ldm.h | 215 ++ block/partitions/mac.c | 138 ++ block/partitions/mac.h | 44 + block/partitions/msdos.c | 582 ++++++ block/partitions/msdos.h | 8 + block/partitions/osf.c | 86 + block/partitions/osf.h | 7 + block/partitions/sgi.c | 82 + block/partitions/sgi.h | 8 + block/partitions/sun.c | 122 ++ block/partitions/sun.h | 8 + block/partitions/sysv68.c | 95 + block/partitions/sysv68.h | 1 + block/partitions/ultrix.c | 48 + block/partitions/ultrix.h | 5 + block/scsi_ioctl.c | 748 +++++++ block/t10-pi.c | 197 ++ block/uuid.c | 509 +++++ 87 files changed, 45916 insertions(+) create mode 100644 block/Kconfig create mode 100644 block/Kconfig.iosched create mode 100644 block/Makefile create mode 100644 block/bfq-cgroup.c create mode 100644 block/bfq-ioc.c create mode 100644 block/bfq-iosched.c create mode 100644 block/bfq-sched.c create mode 100644 block/bfq.h create mode 100644 block/bio-integrity.c create mode 100644 block/bio.c create mode 100644 block/blk-cgroup.c create mode 100644 block/blk-cgroup.h create mode 100644 block/blk-core.c create mode 100644 block/blk-exec.c create mode 100644 block/blk-flush.c create mode 100644 block/blk-integrity.c create mode 100644 block/blk-ioc.c create mode 100644 block/blk-iopoll.c create mode 100644 block/blk-lib.c create mode 100644 block/blk-map.c create mode 100644 block/blk-merge.c create mode 100644 block/blk-mq-cpu.c create mode 100644 block/blk-mq-cpumap.c create mode 100644 block/blk-mq-sysfs.c create mode 100644 block/blk-mq-tag.c create mode 100644 block/blk-mq-tag.h create mode 100644 block/blk-mq.c create mode 100644 block/blk-mq.h create mode 100644 block/blk-settings.c create mode 100644 block/blk-softirq.c create mode 100644 block/blk-sysfs.c create mode 100644 block/blk-tag.c create mode 100644 block/blk-throttle.c create mode 100644 block/blk-timeout.c create mode 100644 block/blk.h create mode 100644 block/bounce.c create mode 100644 block/bsg-lib.c create mode 100644 block/bsg.c create mode 100644 block/cfq-iosched.c create mode 100644 block/cmdline-parser.c create mode 100644 block/compat_ioctl.c create mode 100644 block/deadline-iosched.c create mode 100644 block/elevator.c create mode 100644 block/genhd.c create mode 100644 block/ioctl.c create mode 100644 block/ioprio.c create mode 100644 block/noop-iosched.c create mode 100644 block/partition-generic.c create mode 100644 block/partitions/Kconfig create mode 100644 block/partitions/Makefile create mode 100644 block/partitions/acorn.c create mode 100644 block/partitions/acorn.h create mode 100644 block/partitions/aix.c create mode 100644 block/partitions/aix.h create mode 100644 block/partitions/amiga.c create mode 100644 block/partitions/amiga.h create mode 100644 block/partitions/atari.c create mode 100644 block/partitions/atari.h create mode 100644 block/partitions/check.c create mode 100644 block/partitions/check.h create mode 100644 block/partitions/cmdline.c create mode 100644 block/partitions/cmdline.h create mode 100644 block/partitions/efi.c create mode 100644 block/partitions/efi.h create mode 100644 block/partitions/ibm.c create mode 100644 block/partitions/ibm.h create mode 100644 block/partitions/karma.c create mode 100644 block/partitions/karma.h create mode 100644 block/partitions/ldm.c create mode 100644 block/partitions/ldm.h create mode 100644 block/partitions/mac.c create mode 100644 block/partitions/mac.h create mode 100644 block/partitions/msdos.c create mode 100644 block/partitions/msdos.h create mode 100644 block/partitions/osf.c create mode 100644 block/partitions/osf.h create mode 100644 block/partitions/sgi.c create mode 100644 block/partitions/sgi.h create mode 100644 block/partitions/sun.c create mode 100644 block/partitions/sun.h create mode 100644 block/partitions/sysv68.c create mode 100644 block/partitions/sysv68.h create mode 100644 block/partitions/ultrix.c create mode 100644 block/partitions/ultrix.h create mode 100644 block/scsi_ioctl.c create mode 100644 block/t10-pi.c create mode 100644 block/uuid.c (limited to 'block') diff --git a/block/Kconfig b/block/Kconfig new file mode 100644 index 000000000..161491d0a --- /dev/null +++ b/block/Kconfig @@ -0,0 +1,127 @@ +# +# Block layer core configuration +# +menuconfig BLOCK + bool "Enable the block layer" if EXPERT + default y + help + Provide block layer support for the kernel. + + Disable this option to remove the block layer support from the + kernel. This may be useful for embedded devices. + + If this option is disabled: + + - block device files will become unusable + - some filesystems (such as ext3) will become unavailable. + + Also, SCSI character devices and USB storage will be disabled since + they make use of various block layer definitions and facilities. + + Say Y here unless you know you really don't want to mount disks and + suchlike. + +if BLOCK + +config LBDAF + bool "Support for large (2TB+) block devices and files" + depends on !64BIT + default y + help + Enable block devices or files of size 2TB and larger. + + This option is required to support the full capacity of large + (2TB+) block devices, including RAID, disk, Network Block Device, + Logical Volume Manager (LVM) and loopback. + + This option also enables support for single files larger than + 2TB. + + The ext4 filesystem requires that this feature be enabled in + order to support filesystems that have the huge_file feature + enabled. Otherwise, it will refuse to mount in the read-write + mode any filesystems that use the huge_file feature, which is + enabled by default by mke2fs.ext4. + + The GFS2 filesystem also requires this feature. + + If unsure, say Y. + +config BLK_DEV_BSG + bool "Block layer SG support v4" + default y + help + Saying Y here will enable generic SG (SCSI generic) v4 support + for any block device. + + Unlike SG v3 (aka block/scsi_ioctl.c drivers/scsi/sg.c), SG v4 + can handle complicated SCSI commands: tagged variable length cdbs + with bidirectional data transfers and generic request/response + protocols (e.g. Task Management Functions and SMP in Serial + Attached SCSI). + + This option is required by recent UDEV versions to properly + access device serial numbers, etc. + + If unsure, say Y. + +config BLK_DEV_BSGLIB + bool "Block layer SG support v4 helper lib" + default n + select BLK_DEV_BSG + help + Subsystems will normally enable this if needed. Users will not + normally need to manually enable this. + + If unsure, say N. + +config BLK_DEV_INTEGRITY + bool "Block layer data integrity support" + select CRC_T10DIF if BLK_DEV_INTEGRITY + ---help--- + Some storage devices allow extra information to be + stored/retrieved to help protect the data. The block layer + data integrity option provides hooks which can be used by + filesystems to ensure better data integrity. + + Say yes here if you have a storage device that provides the + T10/SCSI Data Integrity Field or the T13/ATA External Path + Protection. If in doubt, say N. + +config BLK_DEV_THROTTLING + bool "Block layer bio throttling support" + depends on BLK_CGROUP=y + default n + ---help--- + Block layer bio throttling support. It can be used to limit + the IO rate to a device. IO rate policies are per cgroup and + one needs to mount and use blkio cgroup controller for creating + cgroups and specifying per device IO rate policies. + + See Documentation/cgroups/blkio-controller.txt for more information. + +config BLK_CMDLINE_PARSER + bool "Block device command line partition parser" + default n + ---help--- + Enabling this option allows you to specify the partition layout from + the kernel boot args. This is typically of use for embedded devices + which don't otherwise have any standardized method for listing the + partitions on a block device. + + See Documentation/block/cmdline-partition.txt for more information. + +menu "Partition Types" + +source "block/partitions/Kconfig" + +endmenu + +endif # BLOCK + +config BLOCK_COMPAT + bool + depends on BLOCK && COMPAT + default y + +source block/Kconfig.iosched diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched new file mode 100644 index 000000000..01da733dc --- /dev/null +++ b/block/Kconfig.iosched @@ -0,0 +1,100 @@ +if BLOCK + +menu "IO Schedulers" + +config IOSCHED_NOOP + bool + default y + ---help--- + The no-op I/O scheduler is a minimal scheduler that does basic merging + and sorting. Its main uses include non-disk based block devices like + memory devices, and specialised software or hardware environments + that do their own scheduling and require only minimal assistance from + the kernel. + +config IOSCHED_DEADLINE + tristate "Deadline I/O scheduler" + default y + ---help--- + The deadline I/O scheduler is simple and compact. It will provide + CSCAN service with FIFO expiration of requests, switching to + a new point in the service tree and doing a batch of IO from there + in case of expiry. + +config IOSCHED_CFQ + tristate "CFQ I/O scheduler" + default y + ---help--- + The CFQ I/O scheduler tries to distribute bandwidth equally + among all processes in the system. It should provide a fair + and low latency working environment, suitable for both desktop + and server systems. + + This is the default I/O scheduler. + +config CFQ_GROUP_IOSCHED + bool "CFQ Group Scheduling support" + depends on IOSCHED_CFQ && BLK_CGROUP + default n + ---help--- + Enable group IO scheduling in CFQ. + +config IOSCHED_BFQ + tristate "BFQ I/O scheduler" + default y + ---help--- + The BFQ I/O scheduler tries to distribute bandwidth among + all processes according to their weights. + It aims at distributing the bandwidth as desired, independently of + the disk parameters and with any workload. It also tries to + guarantee low latency to interactive and soft real-time + applications. If compiled built-in (saying Y here), BFQ can + be configured to support hierarchical scheduling. + +config CGROUP_BFQIO + bool "BFQ hierarchical scheduling support" + depends on CGROUPS && IOSCHED_BFQ=y + default n + ---help--- + Enable hierarchical scheduling in BFQ, using the cgroups + filesystem interface. The name of the subsystem will be + bfqio. + +choice + prompt "Default I/O scheduler" + default DEFAULT_BFQ + help + Select the I/O scheduler which will be used by default for all + block devices. + + config DEFAULT_DEADLINE + bool "Deadline" if IOSCHED_DEADLINE=y + + config DEFAULT_CFQ + bool "CFQ" if IOSCHED_CFQ=y + + config DEFAULT_BFQ + bool "BFQ" if IOSCHED_BFQ=y + help + Selects BFQ as the default I/O scheduler which will be + used by default for all block devices. + The BFQ I/O scheduler aims at distributing the bandwidth + as desired, independently of the disk parameters and with + any workload. It also tries to guarantee low latency to + interactive and soft real-time applications. + + config DEFAULT_NOOP + bool "No-op" + +endchoice + +config DEFAULT_IOSCHED + string + default "deadline" if DEFAULT_DEADLINE + default "cfq" if DEFAULT_CFQ + default "bfq" if DEFAULT_BFQ + default "noop" if DEFAULT_NOOP + +endmenu + +endif diff --git a/block/Makefile b/block/Makefile new file mode 100644 index 000000000..086be5007 --- /dev/null +++ b/block/Makefile @@ -0,0 +1,26 @@ +# +# Makefile for the kernel block layer +# + +obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-tag.o blk-sysfs.o \ + blk-flush.o blk-settings.o blk-ioc.o blk-map.o \ + blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \ + blk-iopoll.o blk-lib.o blk-mq.o blk-mq-tag.o \ + blk-mq-sysfs.o blk-mq-cpu.o blk-mq-cpumap.o ioctl.o \ + uuid.o genhd.o scsi_ioctl.o partition-generic.o ioprio.o \ + partitions/ + +obj-$(CONFIG_BOUNCE) += bounce.o +obj-$(CONFIG_BLK_DEV_BSG) += bsg.o +obj-$(CONFIG_BLK_DEV_BSGLIB) += bsg-lib.o +obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o +obj-$(CONFIG_BLK_DEV_THROTTLING) += blk-throttle.o +obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o +obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o +obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o +obj-$(CONFIG_IOSCHED_BFQ) += bfq-iosched.o + +obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o +obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o +obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o blk-integrity.o t10-pi.o + diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c new file mode 100644 index 000000000..11e2f1d4e --- /dev/null +++ b/block/bfq-cgroup.c @@ -0,0 +1,936 @@ +/* + * BFQ: CGROUPS support. + * + * Based on ideas and code from CFQ: + * Copyright (C) 2003 Jens Axboe + * + * Copyright (C) 2008 Fabio Checconi + * Paolo Valente + * + * Copyright (C) 2010 Paolo Valente + * + * Licensed under the GPL-2 as detailed in the accompanying COPYING.BFQ + * file. + */ + +#ifdef CONFIG_CGROUP_BFQIO + +static DEFINE_MUTEX(bfqio_mutex); + +static bool bfqio_is_removed(struct bfqio_cgroup *bgrp) +{ + return bgrp ? !bgrp->online : false; +} + +static struct bfqio_cgroup bfqio_root_cgroup = { + .weight = BFQ_DEFAULT_GRP_WEIGHT, + .ioprio = BFQ_DEFAULT_GRP_IOPRIO, + .ioprio_class = BFQ_DEFAULT_GRP_CLASS, +}; + +static inline void bfq_init_entity(struct bfq_entity *entity, + struct bfq_group *bfqg) +{ + entity->weight = entity->new_weight; + entity->orig_weight = entity->new_weight; + entity->ioprio = entity->new_ioprio; + entity->ioprio_class = entity->new_ioprio_class; + entity->parent = bfqg->my_entity; + entity->sched_data = &bfqg->sched_data; +} + +static struct bfqio_cgroup *css_to_bfqio(struct cgroup_subsys_state *css) +{ + return css ? container_of(css, struct bfqio_cgroup, css) : NULL; +} + +/* + * Search the bfq_group for bfqd into the hash table (by now only a list) + * of bgrp. Must be called under rcu_read_lock(). + */ +static struct bfq_group *bfqio_lookup_group(struct bfqio_cgroup *bgrp, + struct bfq_data *bfqd) +{ + struct bfq_group *bfqg; + void *key; + + hlist_for_each_entry_rcu(bfqg, &bgrp->group_data, group_node) { + key = rcu_dereference(bfqg->bfqd); + if (key == bfqd) + return bfqg; + } + + return NULL; +} + +static inline void bfq_group_init_entity(struct bfqio_cgroup *bgrp, + struct bfq_group *bfqg) +{ + struct bfq_entity *entity = &bfqg->entity; + + /* + * If the weight of the entity has never been set via the sysfs + * interface, then bgrp->weight == 0. In this case we initialize + * the weight from the current ioprio value. Otherwise, the group + * weight, if set, has priority over the ioprio value. + */ + if (bgrp->weight == 0) { + entity->new_weight = bfq_ioprio_to_weight(bgrp->ioprio); + entity->new_ioprio = bgrp->ioprio; + } else { + if (bgrp->weight < BFQ_MIN_WEIGHT || + bgrp->weight > BFQ_MAX_WEIGHT) { + printk(KERN_CRIT "bfq_group_init_entity: " + "bgrp->weight %d\n", bgrp->weight); + BUG(); + } + entity->new_weight = bgrp->weight; + entity->new_ioprio = bfq_weight_to_ioprio(bgrp->weight); + } + entity->orig_weight = entity->weight = entity->new_weight; + entity->ioprio = entity->new_ioprio; + entity->ioprio_class = entity->new_ioprio_class = bgrp->ioprio_class; + entity->my_sched_data = &bfqg->sched_data; + bfqg->active_entities = 0; +} + +static inline void bfq_group_set_parent(struct bfq_group *bfqg, + struct bfq_group *parent) +{ + struct bfq_entity *entity; + + BUG_ON(parent == NULL); + BUG_ON(bfqg == NULL); + + entity = &bfqg->entity; + entity->parent = parent->my_entity; + entity->sched_data = &parent->sched_data; +} + +/** + * bfq_group_chain_alloc - allocate a chain of groups. + * @bfqd: queue descriptor. + * @css: the leaf cgroup_subsys_state this chain starts from. + * + * Allocate a chain of groups starting from the one belonging to + * @cgroup up to the root cgroup. Stop if a cgroup on the chain + * to the root has already an allocated group on @bfqd. + */ +static struct bfq_group *bfq_group_chain_alloc(struct bfq_data *bfqd, + struct cgroup_subsys_state *css) +{ + struct bfqio_cgroup *bgrp; + struct bfq_group *bfqg, *prev = NULL, *leaf = NULL; + + for (; css != NULL; css = css->parent) { + bgrp = css_to_bfqio(css); + + bfqg = bfqio_lookup_group(bgrp, bfqd); + if (bfqg != NULL) { + /* + * All the cgroups in the path from there to the + * root must have a bfq_group for bfqd, so we don't + * need any more allocations. + */ + break; + } + + bfqg = kzalloc(sizeof(*bfqg), GFP_ATOMIC); + if (bfqg == NULL) + goto cleanup; + + bfq_group_init_entity(bgrp, bfqg); + bfqg->my_entity = &bfqg->entity; + + if (leaf == NULL) { + leaf = bfqg; + prev = leaf; + } else { + bfq_group_set_parent(prev, bfqg); + /* + * Build a list of allocated nodes using the bfqd + * filed, that is still unused and will be + * initialized only after the node will be + * connected. + */ + prev->bfqd = bfqg; + prev = bfqg; + } + } + + return leaf; + +cleanup: + while (leaf != NULL) { + prev = leaf; + leaf = leaf->bfqd; + kfree(prev); + } + + return NULL; +} + +/** + * bfq_group_chain_link - link an allocated group chain to a cgroup + * hierarchy. + * @bfqd: the queue descriptor. + * @css: the leaf cgroup_subsys_state to start from. + * @leaf: the leaf group (to be associated to @cgroup). + * + * Try to link a chain of groups to a cgroup hierarchy, connecting the + * nodes bottom-up, so we can be sure that when we find a cgroup in the + * hierarchy that already as a group associated to @bfqd all the nodes + * in the path to the root cgroup have one too. + * + * On locking: the queue lock protects the hierarchy (there is a hierarchy + * per device) while the bfqio_cgroup lock protects the list of groups + * belonging to the same cgroup. + */ +static void bfq_group_chain_link(struct bfq_data *bfqd, + struct cgroup_subsys_state *css, + struct bfq_group *leaf) +{ + struct bfqio_cgroup *bgrp; + struct bfq_group *bfqg, *next, *prev = NULL; + unsigned long flags; + + assert_spin_locked(bfqd->queue->queue_lock); + + for (; css != NULL && leaf != NULL; css = css->parent) { + bgrp = css_to_bfqio(css); + next = leaf->bfqd; + + bfqg = bfqio_lookup_group(bgrp, bfqd); + BUG_ON(bfqg != NULL); + + spin_lock_irqsave(&bgrp->lock, flags); + + rcu_assign_pointer(leaf->bfqd, bfqd); + hlist_add_head_rcu(&leaf->group_node, &bgrp->group_data); + hlist_add_head(&leaf->bfqd_node, &bfqd->group_list); + + spin_unlock_irqrestore(&bgrp->lock, flags); + + prev = leaf; + leaf = next; + } + + BUG_ON(css == NULL && leaf != NULL); + if (css != NULL && prev != NULL) { + bgrp = css_to_bfqio(css); + bfqg = bfqio_lookup_group(bgrp, bfqd); + bfq_group_set_parent(prev, bfqg); + } +} + +/** + * bfq_find_alloc_group - return the group associated to @bfqd in @cgroup. + * @bfqd: queue descriptor. + * @cgroup: cgroup being searched for. + * + * Return a group associated to @bfqd in @cgroup, allocating one if + * necessary. When a group is returned all the cgroups in the path + * to the root have a group associated to @bfqd. + * + * If the allocation fails, return the root group: this breaks guarantees + * but is a safe fallback. If this loss becomes a problem it can be + * mitigated using the equivalent weight (given by the product of the + * weights of the groups in the path from @group to the root) in the + * root scheduler. + * + * We allocate all the missing nodes in the path from the leaf cgroup + * to the root and we connect the nodes only after all the allocations + * have been successful. + */ +static struct bfq_group *bfq_find_alloc_group(struct bfq_data *bfqd, + struct cgroup_subsys_state *css) +{ + struct bfqio_cgroup *bgrp = css_to_bfqio(css); + struct bfq_group *bfqg; + + bfqg = bfqio_lookup_group(bgrp, bfqd); + if (bfqg != NULL) + return bfqg; + + bfqg = bfq_group_chain_alloc(bfqd, css); + if (bfqg != NULL) + bfq_group_chain_link(bfqd, css, bfqg); + else + bfqg = bfqd->root_group; + + return bfqg; +} + +/** + * bfq_bfqq_move - migrate @bfqq to @bfqg. + * @bfqd: queue descriptor. + * @bfqq: the queue to move. + * @entity: @bfqq's entity. + * @bfqg: the group to move to. + * + * Move @bfqq to @bfqg, deactivating it from its old group and reactivating + * it on the new one. Avoid putting the entity on the old group idle tree. + * + * Must be called under the queue lock; the cgroup owning @bfqg must + * not disappear (by now this just means that we are called under + * rcu_read_lock()). + */ +static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, + struct bfq_entity *entity, struct bfq_group *bfqg) +{ + int busy, resume; + + busy = bfq_bfqq_busy(bfqq); + resume = !RB_EMPTY_ROOT(&bfqq->sort_list); + + BUG_ON(resume && !entity->on_st); + BUG_ON(busy && !resume && entity->on_st && + bfqq != bfqd->in_service_queue); + + if (busy) { + BUG_ON(atomic_read(&bfqq->ref) < 2); + + if (!resume) + bfq_del_bfqq_busy(bfqd, bfqq, 0); + else + bfq_deactivate_bfqq(bfqd, bfqq, 0); + } else if (entity->on_st) + bfq_put_idle_entity(bfq_entity_service_tree(entity), entity); + + /* + * Here we use a reference to bfqg. We don't need a refcounter + * as the cgroup reference will not be dropped, so that its + * destroy() callback will not be invoked. + */ + entity->parent = bfqg->my_entity; + entity->sched_data = &bfqg->sched_data; + + if (busy && resume) + bfq_activate_bfqq(bfqd, bfqq); + + if (bfqd->in_service_queue == NULL && !bfqd->rq_in_driver) + bfq_schedule_dispatch(bfqd); +} + +/** + * __bfq_bic_change_cgroup - move @bic to @cgroup. + * @bfqd: the queue descriptor. + * @bic: the bic to move. + * @cgroup: the cgroup to move to. + * + * Move bic to cgroup, assuming that bfqd->queue is locked; the caller + * has to make sure that the reference to cgroup is valid across the call. + * + * NOTE: an alternative approach might have been to store the current + * cgroup in bfqq and getting a reference to it, reducing the lookup + * time here, at the price of slightly more complex code. + */ +static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd, + struct bfq_io_cq *bic, + struct cgroup_subsys_state *css) +{ + struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0); + struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1); + struct bfq_entity *entity; + struct bfq_group *bfqg; + struct bfqio_cgroup *bgrp; + + bgrp = css_to_bfqio(css); + + bfqg = bfq_find_alloc_group(bfqd, css); + if (async_bfqq != NULL) { + entity = &async_bfqq->entity; + + if (entity->sched_data != &bfqg->sched_data) { + bic_set_bfqq(bic, NULL, 0); + bfq_log_bfqq(bfqd, async_bfqq, + "bic_change_group: %p %d", + async_bfqq, atomic_read(&async_bfqq->ref)); + bfq_put_queue(async_bfqq); + } + } + + if (sync_bfqq != NULL) { + entity = &sync_bfqq->entity; + if (entity->sched_data != &bfqg->sched_data) + bfq_bfqq_move(bfqd, sync_bfqq, entity, bfqg); + } + + return bfqg; +} + +/** + * bfq_bic_change_cgroup - move @bic to @cgroup. + * @bic: the bic being migrated. + * @cgroup: the destination cgroup. + * + * When the task owning @bic is moved to @cgroup, @bic is immediately + * moved into its new parent group. + */ +static void bfq_bic_change_cgroup(struct bfq_io_cq *bic, + struct cgroup_subsys_state *css) +{ + struct bfq_data *bfqd; + unsigned long uninitialized_var(flags); + + bfqd = bfq_get_bfqd_locked(&(bic->icq.q->elevator->elevator_data), + &flags); + if (bfqd != NULL) { + __bfq_bic_change_cgroup(bfqd, bic, css); + bfq_put_bfqd_unlock(bfqd, &flags); + } +} + +/** + * bfq_bic_update_cgroup - update the cgroup of @bic. + * @bic: the @bic to update. + * + * Make sure that @bic is enqueued in the cgroup of the current task. + * We need this in addition to moving bics during the cgroup attach + * phase because the task owning @bic could be at its first disk + * access or we may end up in the root cgroup as the result of a + * memory allocation failure and here we try to move to the right + * group. + * + * Must be called under the queue lock. It is safe to use the returned + * value even after the rcu_read_unlock() as the migration/destruction + * paths act under the queue lock too. IOW it is impossible to race with + * group migration/destruction and end up with an invalid group as: + * a) here cgroup has not yet been destroyed, nor its destroy callback + * has started execution, as current holds a reference to it, + * b) if it is destroyed after rcu_read_unlock() [after current is + * migrated to a different cgroup] its attach() callback will have + * taken care of remove all the references to the old cgroup data. + */ +static struct bfq_group *bfq_bic_update_cgroup(struct bfq_io_cq *bic) +{ + struct bfq_data *bfqd = bic_to_bfqd(bic); + struct bfq_group *bfqg; + struct cgroup_subsys_state *css; + + BUG_ON(bfqd == NULL); + + rcu_read_lock(); + css = task_css(current, bfqio_cgrp_id); + bfqg = __bfq_bic_change_cgroup(bfqd, bic, css); + rcu_read_unlock(); + + return bfqg; +} + +/** + * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st. + * @st: the service tree being flushed. + */ +static inline void bfq_flush_idle_tree(struct bfq_service_tree *st) +{ + struct bfq_entity *entity = st->first_idle; + + for (; entity != NULL; entity = st->first_idle) + __bfq_deactivate_entity(entity, 0); +} + +/** + * bfq_reparent_leaf_entity - move leaf entity to the root_group. + * @bfqd: the device data structure with the root group. + * @entity: the entity to move. + */ +static inline void bfq_reparent_leaf_entity(struct bfq_data *bfqd, + struct bfq_entity *entity) +{ + struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); + + BUG_ON(bfqq == NULL); + bfq_bfqq_move(bfqd, bfqq, entity, bfqd->root_group); + return; +} + +/** + * bfq_reparent_active_entities - move to the root group all active + * entities. + * @bfqd: the device data structure with the root group. + * @bfqg: the group to move from. + * @st: the service tree with the entities. + * + * Needs queue_lock to be taken and reference to be valid over the call. + */ +static inline void bfq_reparent_active_entities(struct bfq_data *bfqd, + struct bfq_group *bfqg, + struct bfq_service_tree *st) +{ + struct rb_root *active = &st->active; + struct bfq_entity *entity = NULL; + + if (!RB_EMPTY_ROOT(&st->active)) + entity = bfq_entity_of(rb_first(active)); + + for (; entity != NULL; entity = bfq_entity_of(rb_first(active))) + bfq_reparent_leaf_entity(bfqd, entity); + + if (bfqg->sched_data.in_service_entity != NULL) + bfq_reparent_leaf_entity(bfqd, + bfqg->sched_data.in_service_entity); + + return; +} + +/** + * bfq_destroy_group - destroy @bfqg. + * @bgrp: the bfqio_cgroup containing @bfqg. + * @bfqg: the group being destroyed. + * + * Destroy @bfqg, making sure that it is not referenced from its parent. + */ +static void bfq_destroy_group(struct bfqio_cgroup *bgrp, struct bfq_group *bfqg) +{ + struct bfq_data *bfqd; + struct bfq_service_tree *st; + struct bfq_entity *entity = bfqg->my_entity; + unsigned long uninitialized_var(flags); + int i; + + hlist_del(&bfqg->group_node); + + /* + * Empty all service_trees belonging to this group before + * deactivating the group itself. + */ + for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) { + st = bfqg->sched_data.service_tree + i; + + /* + * The idle tree may still contain bfq_queues belonging + * to exited task because they never migrated to a different + * cgroup from the one being destroyed now. No one else + * can access them so it's safe to act without any lock. + */ + bfq_flush_idle_tree(st); + + /* + * It may happen that some queues are still active + * (busy) upon group destruction (if the corresponding + * processes have been forced to terminate). We move + * all the leaf entities corresponding to these queues + * to the root_group. + * Also, it may happen that the group has an entity + * in service, which is disconnected from the active + * tree: it must be moved, too. + * There is no need to put the sync queues, as the + * scheduler has taken no reference. + */ + bfqd = bfq_get_bfqd_locked(&bfqg->bfqd, &flags); + if (bfqd != NULL) { + bfq_reparent_active_entities(bfqd, bfqg, st); + bfq_put_bfqd_unlock(bfqd, &flags); + } + BUG_ON(!RB_EMPTY_ROOT(&st->active)); + BUG_ON(!RB_EMPTY_ROOT(&st->idle)); + } + BUG_ON(bfqg->sched_data.next_in_service != NULL); + BUG_ON(bfqg->sched_data.in_service_entity != NULL); + + /* + * We may race with device destruction, take extra care when + * dereferencing bfqg->bfqd. + */ + bfqd = bfq_get_bfqd_locked(&bfqg->bfqd, &flags); + if (bfqd != NULL) { + hlist_del(&bfqg->bfqd_node); + __bfq_deactivate_entity(entity, 0); + bfq_put_async_queues(bfqd, bfqg); + bfq_put_bfqd_unlock(bfqd, &flags); + } + BUG_ON(entity->tree != NULL); + + /* + * No need to defer the kfree() to the end of the RCU grace + * period: we are called from the destroy() callback of our + * cgroup, so we can be sure that no one is a) still using + * this cgroup or b) doing lookups in it. + */ + kfree(bfqg); +} + +static void bfq_end_wr_async(struct bfq_data *bfqd) +{ + struct hlist_node *tmp; + struct bfq_group *bfqg; + + hlist_for_each_entry_safe(bfqg, tmp, &bfqd->group_list, bfqd_node) + bfq_end_wr_async_queues(bfqd, bfqg); + bfq_end_wr_async_queues(bfqd, bfqd->root_group); +} + +/** + * bfq_disconnect_groups - disconnect @bfqd from all its groups. + * @bfqd: the device descriptor being exited. + * + * When the device exits we just make sure that no lookup can return + * the now unused group structures. They will be deallocated on cgroup + * destruction. + */ +static void bfq_disconnect_groups(struct bfq_data *bfqd) +{ + struct hlist_node *tmp; + struct bfq_group *bfqg; + + bfq_log(bfqd, "disconnect_groups beginning"); + hlist_for_each_entry_safe(bfqg, tmp, &bfqd->group_list, bfqd_node) { + hlist_del(&bfqg->bfqd_node); + + __bfq_deactivate_entity(bfqg->my_entity, 0); + + /* + * Don't remove from the group hash, just set an + * invalid key. No lookups can race with the + * assignment as bfqd is being destroyed; this + * implies also that new elements cannot be added + * to the list. + */ + rcu_assign_pointer(bfqg->bfqd, NULL); + + bfq_log(bfqd, "disconnect_groups: put async for group %p", + bfqg); + bfq_put_async_queues(bfqd, bfqg); + } +} + +static inline void bfq_free_root_group(struct bfq_data *bfqd) +{ + struct bfqio_cgroup *bgrp = &bfqio_root_cgroup; + struct bfq_group *bfqg = bfqd->root_group; + + bfq_put_async_queues(bfqd, bfqg); + + spin_lock_irq(&bgrp->lock); + hlist_del_rcu(&bfqg->group_node); + spin_unlock_irq(&bgrp->lock); + + /* + * No need to synchronize_rcu() here: since the device is gone + * there cannot be any read-side access to its root_group. + */ + kfree(bfqg); +} + +static struct bfq_group *bfq_alloc_root_group(struct bfq_data *bfqd, int node) +{ + struct bfq_group *bfqg; + struct bfqio_cgroup *bgrp; + int i; + + bfqg = kzalloc_node(sizeof(*bfqg), GFP_KERNEL, node); + if (bfqg == NULL) + return NULL; + + bfqg->entity.parent = NULL; + for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) + bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT; + + bgrp = &bfqio_root_cgroup; + spin_lock_irq(&bgrp->lock); + rcu_assign_pointer(bfqg->bfqd, bfqd); + hlist_add_head_rcu(&bfqg->group_node, &bgrp->group_data); + spin_unlock_irq(&bgrp->lock); + + return bfqg; +} + +#define SHOW_FUNCTION(__VAR) \ +static u64 bfqio_cgroup_##__VAR##_read(struct cgroup_subsys_state *css, \ + struct cftype *cftype) \ +{ \ + struct bfqio_cgroup *bgrp = css_to_bfqio(css); \ + u64 ret = -ENODEV; \ + \ + mutex_lock(&bfqio_mutex); \ + if (bfqio_is_removed(bgrp)) \ + goto out_unlock; \ + \ + spin_lock_irq(&bgrp->lock); \ + ret = bgrp->__VAR; \ + spin_unlock_irq(&bgrp->lock); \ + \ +out_unlock: \ + mutex_unlock(&bfqio_mutex); \ + return ret; \ +} + +SHOW_FUNCTION(weight); +SHOW_FUNCTION(ioprio); +SHOW_FUNCTION(ioprio_class); +#undef SHOW_FUNCTION + +#define STORE_FUNCTION(__VAR, __MIN, __MAX) \ +static int bfqio_cgroup_##__VAR##_write(struct cgroup_subsys_state *css,\ + struct cftype *cftype, \ + u64 val) \ +{ \ + struct bfqio_cgroup *bgrp = css_to_bfqio(css); \ + struct bfq_group *bfqg; \ + int ret = -EINVAL; \ + \ + if (val < (__MIN) || val > (__MAX)) \ + return ret; \ + \ + ret = -ENODEV; \ + mutex_lock(&bfqio_mutex); \ + if (bfqio_is_removed(bgrp)) \ + goto out_unlock; \ + ret = 0; \ + \ + spin_lock_irq(&bgrp->lock); \ + bgrp->__VAR = (unsigned short)val; \ + hlist_for_each_entry(bfqg, &bgrp->group_data, group_node) { \ + /* \ + * Setting the ioprio_changed flag of the entity \ + * to 1 with new_##__VAR == ##__VAR would re-set \ + * the value of the weight to its ioprio mapping. \ + * Set the flag only if necessary. \ + */ \ + if ((unsigned short)val != bfqg->entity.new_##__VAR) { \ + bfqg->entity.new_##__VAR = (unsigned short)val; \ + /* \ + * Make sure that the above new value has been \ + * stored in bfqg->entity.new_##__VAR before \ + * setting the ioprio_changed flag. In fact, \ + * this flag may be read asynchronously (in \ + * critical sections protected by a different \ + * lock than that held here), and finding this \ + * flag set may cause the execution of the code \ + * for updating parameters whose value may \ + * depend also on bfqg->entity.new_##__VAR (in \ + * __bfq_entity_update_weight_prio). \ + * This barrier makes sure that the new value \ + * of bfqg->entity.new_##__VAR is correctly \ + * seen in that code. \ + */ \ + smp_wmb(); \ + bfqg->entity.ioprio_changed = 1; \ + } \ + } \ + spin_unlock_irq(&bgrp->lock); \ + \ +out_unlock: \ + mutex_unlock(&bfqio_mutex); \ + return ret; \ +} + +STORE_FUNCTION(weight, BFQ_MIN_WEIGHT, BFQ_MAX_WEIGHT); +STORE_FUNCTION(ioprio, 0, IOPRIO_BE_NR - 1); +STORE_FUNCTION(ioprio_class, IOPRIO_CLASS_RT, IOPRIO_CLASS_IDLE); +#undef STORE_FUNCTION + +static struct cftype bfqio_files[] = { + { + .name = "weight", + .read_u64 = bfqio_cgroup_weight_read, + .write_u64 = bfqio_cgroup_weight_write, + }, + { + .name = "ioprio", + .read_u64 = bfqio_cgroup_ioprio_read, + .write_u64 = bfqio_cgroup_ioprio_write, + }, + { + .name = "ioprio_class", + .read_u64 = bfqio_cgroup_ioprio_class_read, + .write_u64 = bfqio_cgroup_ioprio_class_write, + }, + { }, /* terminate */ +}; + +static struct cgroup_subsys_state *bfqio_create(struct cgroup_subsys_state + *parent_css) +{ + struct bfqio_cgroup *bgrp; + + if (parent_css != NULL) { + bgrp = kzalloc(sizeof(*bgrp), GFP_KERNEL); + if (bgrp == NULL) + return ERR_PTR(-ENOMEM); + } else + bgrp = &bfqio_root_cgroup; + + spin_lock_init(&bgrp->lock); + INIT_HLIST_HEAD(&bgrp->group_data); + bgrp->ioprio = BFQ_DEFAULT_GRP_IOPRIO; + bgrp->ioprio_class = BFQ_DEFAULT_GRP_CLASS; + + return &bgrp->css; +} + +/* + * We cannot support shared io contexts, as we have no means to support + * two tasks with the same ioc in two different groups without major rework + * of the main bic/bfqq data structures. By now we allow a task to change + * its cgroup only if it's the only owner of its ioc; the drawback of this + * behavior is that a group containing a task that forked using CLONE_IO + * will not be destroyed until the tasks sharing the ioc die. + */ +static int bfqio_can_attach(struct cgroup_subsys_state *css, + struct cgroup_taskset *tset) +{ + struct task_struct *task; + struct io_context *ioc; + int ret = 0; + + cgroup_taskset_for_each(task, tset) { + /* + * task_lock() is needed to avoid races with + * exit_io_context() + */ + task_lock(task); + ioc = task->io_context; + if (ioc != NULL && atomic_read(&ioc->nr_tasks) > 1) + /* + * ioc == NULL means that the task is either too + * young or exiting: if it has still no ioc the + * ioc can't be shared, if the task is exiting the + * attach will fail anyway, no matter what we + * return here. + */ + ret = -EINVAL; + task_unlock(task); + if (ret) + break; + } + + return ret; +} + +static void bfqio_attach(struct cgroup_subsys_state *css, + struct cgroup_taskset *tset) +{ + struct task_struct *task; + struct io_context *ioc; + struct io_cq *icq; + + /* + * IMPORTANT NOTE: The move of more than one process at a time to a + * new group has not yet been tested. + */ + cgroup_taskset_for_each(task, tset) { + ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE); + if (ioc) { + /* + * Handle cgroup change here. + */ + rcu_read_lock(); + hlist_for_each_entry_rcu(icq, &ioc->icq_list, ioc_node) + if (!strncmp( + icq->q->elevator->type->elevator_name, + "bfq", ELV_NAME_MAX)) + bfq_bic_change_cgroup(icq_to_bic(icq), + css); + rcu_read_unlock(); + put_io_context(ioc); + } + } +} + +static void bfqio_destroy(struct cgroup_subsys_state *css) +{ + struct bfqio_cgroup *bgrp = css_to_bfqio(css); + struct hlist_node *tmp; + struct bfq_group *bfqg; + + /* + * Since we are destroying the cgroup, there are no more tasks + * referencing it, and all the RCU grace periods that may have + * referenced it are ended (as the destruction of the parent + * cgroup is RCU-safe); bgrp->group_data will not be accessed by + * anything else and we don't need any synchronization. + */ + hlist_for_each_entry_safe(bfqg, tmp, &bgrp->group_data, group_node) + bfq_destroy_group(bgrp, bfqg); + + BUG_ON(!hlist_empty(&bgrp->group_data)); + + kfree(bgrp); +} + +static int bfqio_css_online(struct cgroup_subsys_state *css) +{ + struct bfqio_cgroup *bgrp = css_to_bfqio(css); + + mutex_lock(&bfqio_mutex); + bgrp->online = true; + mutex_unlock(&bfqio_mutex); + + return 0; +} + +static void bfqio_css_offline(struct cgroup_subsys_state *css) +{ + struct bfqio_cgroup *bgrp = css_to_bfqio(css); + + mutex_lock(&bfqio_mutex); + bgrp->online = false; + mutex_unlock(&bfqio_mutex); +} + +struct cgroup_subsys bfqio_cgrp_subsys = { + .css_alloc = bfqio_create, + .css_online = bfqio_css_online, + .css_offline = bfqio_css_offline, + .can_attach = bfqio_can_attach, + .attach = bfqio_attach, + .css_free = bfqio_destroy, + .legacy_cftypes = bfqio_files, +}; +#else +static inline void bfq_init_entity(struct bfq_entity *entity, + struct bfq_group *bfqg) +{ + entity->weight = entity->new_weight; + entity->orig_weight = entity->new_weight; + entity->ioprio = entity->new_ioprio; + entity->ioprio_class = entity->new_ioprio_class; + entity->sched_data = &bfqg->sched_data; +} + +static inline struct bfq_group * +bfq_bic_update_cgroup(struct bfq_io_cq *bic) +{ + struct bfq_data *bfqd = bic_to_bfqd(bic); + return bfqd->root_group; +} + +static inline void bfq_bfqq_move(struct bfq_data *bfqd, + struct bfq_queue *bfqq, + struct bfq_entity *entity, + struct bfq_group *bfqg) +{ +} + +static void bfq_end_wr_async(struct bfq_data *bfqd) +{ + bfq_end_wr_async_queues(bfqd, bfqd->root_group); +} + +static inline void bfq_disconnect_groups(struct bfq_data *bfqd) +{ + bfq_put_async_queues(bfqd, bfqd->root_group); +} + +static inline void bfq_free_root_group(struct bfq_data *bfqd) +{ + kfree(bfqd->root_group); +} + +static struct bfq_group *bfq_alloc_root_group(struct bfq_data *bfqd, int node) +{ + struct bfq_group *bfqg; + int i; + + bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node); + if (bfqg == NULL) + return NULL; + + for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) + bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT; + + return bfqg; +} +#endif diff --git a/block/bfq-ioc.c b/block/bfq-ioc.c new file mode 100644 index 000000000..7f6b0004c --- /dev/null +++ b/block/bfq-ioc.c @@ -0,0 +1,36 @@ +/* + * BFQ: I/O context handling. + * + * Based on ideas and code from CFQ: + * Copyright (C) 2003 Jens Axboe + * + * Copyright (C) 2008 Fabio Checconi + * Paolo Valente + * + * Copyright (C) 2010 Paolo Valente + */ + +/** + * icq_to_bic - convert iocontext queue structure to bfq_io_cq. + * @icq: the iocontext queue. + */ +static inline struct bfq_io_cq *icq_to_bic(struct io_cq *icq) +{ + /* bic->icq is the first member, %NULL will convert to %NULL */ + return container_of(icq, struct bfq_io_cq, icq); +} + +/** + * bfq_bic_lookup - search into @ioc a bic associated to @bfqd. + * @bfqd: the lookup key. + * @ioc: the io_context of the process doing I/O. + * + * Queue lock must be held. + */ +static inline struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd, + struct io_context *ioc) +{ + if (ioc) + return icq_to_bic(ioc_lookup_icq(ioc, bfqd->queue)); + return NULL; +} diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c new file mode 100644 index 000000000..71b51c1b4 --- /dev/null +++ b/block/bfq-iosched.c @@ -0,0 +1,4218 @@ +/* + * Budget Fair Queueing (BFQ) disk scheduler. + * + * Based on ideas and code from CFQ: + * Copyright (C) 2003 Jens Axboe + * + * Copyright (C) 2008 Fabio Checconi + * Paolo Valente + * + * Copyright (C) 2010 Paolo Valente + * + * Licensed under the GPL-2 as detailed in the accompanying COPYING.BFQ + * file. + * + * BFQ is a proportional-share storage-I/O scheduling algorithm based on + * the slice-by-slice service scheme of CFQ. But BFQ assigns budgets, + * measured in number of sectors, to processes instead of time slices. The + * device is not granted to the in-service process for a given time slice, + * but until it has exhausted its assigned budget. This change from the time + * to the service domain allows BFQ to distribute the device throughput + * among processes as desired, without any distortion due to ZBR, workload + * fluctuations or other factors. BFQ uses an ad hoc internal scheduler, + * called B-WF2Q+, to schedule processes according to their budgets. More + * precisely, BFQ schedules queues associated to processes. Thanks to the + * accurate policy of B-WF2Q+, BFQ can afford to assign high budgets to + * I/O-bound processes issuing sequential requests (to boost the + * throughput), and yet guarantee a low latency to interactive and soft + * real-time applications. + * + * BFQ is described in [1], where also a reference to the initial, more + * theoretical paper on BFQ can be found. The interested reader can find + * in the latter paper full details on the main algorithm, as well as + * formulas of the guarantees and formal proofs of all the properties. + * With respect to the version of BFQ presented in these papers, this + * implementation adds a few more heuristics, such as the one that + * guarantees a low latency to soft real-time applications, and a + * hierarchical extension based on H-WF2Q+. + * + * B-WF2Q+ is based on WF2Q+, that is described in [2], together with + * H-WF2Q+, while the augmented tree used to implement B-WF2Q+ with O(log N) + * complexity derives from the one introduced with EEVDF in [3]. + * + * [1] P. Valente and M. Andreolini, ``Improving Application Responsiveness + * with the BFQ Disk I/O Scheduler'', + * Proceedings of the 5th Annual International Systems and Storage + * Conference (SYSTOR '12), June 2012. + * + * http://algogroup.unimo.it/people/paolo/disk_sched/bf1-v1-suite-results.pdf + * + * [2] Jon C.R. Bennett and H. Zhang, ``Hierarchical Packet Fair Queueing + * Algorithms,'' IEEE/ACM Transactions on Networking, 5(5):675-689, + * Oct 1997. + * + * http://www.cs.cmu.edu/~hzhang/papers/TON-97-Oct.ps.gz + * + * [3] I. Stoica and H. Abdel-Wahab, ``Earliest Eligible Virtual Deadline + * First: A Flexible and Accurate Mechanism for Proportional Share + * Resource Allocation,'' technical report. + * + * http://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include "bfq.h" +#include "blk.h" + +/* Expiration time of sync (0) and async (1) requests, in jiffies. */ +static const int bfq_fifo_expire[2] = { HZ / 4, HZ / 8 }; + +/* Maximum backwards seek, in KiB. */ +static const int bfq_back_max = 16 * 1024; + +/* Penalty of a backwards seek, in number of sectors. */ +static const int bfq_back_penalty = 2; + +/* Idling period duration, in jiffies. */ +static int bfq_slice_idle = HZ / 125; + +/* Default maximum budget values, in sectors and number of requests. */ +static const int bfq_default_max_budget = 16 * 1024; +static const int bfq_max_budget_async_rq = 4; + +/* + * Async to sync throughput distribution is controlled as follows: + * when an async request is served, the entity is charged the number + * of sectors of the request, multiplied by the factor below + */ +static const int bfq_async_charge_factor = 10; + +/* Default timeout values, in jiffies, approximating CFQ defaults. */ +static const int bfq_timeout_sync = HZ / 8; +static int bfq_timeout_async = HZ / 25; + +struct kmem_cache *bfq_pool; + +/* Below this threshold (in ms), we consider thinktime immediate. */ +#define BFQ_MIN_TT 2 + +/* hw_tag detection: parallel requests threshold and min samples needed. */ +#define BFQ_HW_QUEUE_THRESHOLD 4 +#define BFQ_HW_QUEUE_SAMPLES 32 + +#define BFQQ_SEEK_THR (sector_t)(8 * 1024) +#define BFQQ_SEEKY(bfqq) ((bfqq)->seek_mean > BFQQ_SEEK_THR) + +/* Min samples used for peak rate estimation (for autotuning). */ +#define BFQ_PEAK_RATE_SAMPLES 32 + +/* Shift used for peak rate fixed precision calculations. */ +#define BFQ_RATE_SHIFT 16 + +/* + * By default, BFQ computes the duration of the weight raising for + * interactive applications automatically, using the following formula: + * duration = (R / r) * T, where r is the peak rate of the device, and + * R and T are two reference parameters. + * In particular, R is the peak rate of the reference device (see below), + * and T is a reference time: given the systems that are likely to be + * installed on the reference device according to its speed class, T is + * about the maximum time needed, under BFQ and while reading two files in + * parallel, to load typical large applications on these systems. + * In practice, the slower/faster the device at hand is, the more/less it + * takes to load applications with respect to the reference device. + * Accordingly, the longer/shorter BFQ grants weight raising to interactive + * applications. + * + * BFQ uses four different reference pairs (R, T), depending on: + * . whether the device is rotational or non-rotational; + * . whether the device is slow, such as old or portable HDDs, as well as + * SD cards, or fast, such as newer HDDs and SSDs. + * + * The device's speed class is dynamically (re)detected in + * bfq_update_peak_rate() every time the estimated peak rate is updated. + * + * In the following definitions, R_slow[0]/R_fast[0] and T_slow[0]/T_fast[0] + * are the reference values for a slow/fast rotational device, whereas + * R_slow[1]/R_fast[1] and T_slow[1]/T_fast[1] are the reference values for + * a slow/fast non-rotational device. Finally, device_speed_thresh are the + * thresholds used to switch between speed classes. + * Both the reference peak rates and the thresholds are measured in + * sectors/usec, left-shifted by BFQ_RATE_SHIFT. + */ +static int R_slow[2] = {1536, 10752}; +static int R_fast[2] = {17415, 34791}; +/* + * To improve readability, a conversion function is used to initialize the + * following arrays, which entails that they can be initialized only in a + * function. + */ +static int T_slow[2]; +static int T_fast[2]; +static int device_speed_thresh[2]; + +#define BFQ_SERVICE_TREE_INIT ((struct bfq_service_tree) \ + { RB_ROOT, RB_ROOT, NULL, NULL, 0, 0 }) + +#define RQ_BIC(rq) ((struct bfq_io_cq *) (rq)->elv.priv[0]) +#define RQ_BFQQ(rq) ((rq)->elv.priv[1]) + +static inline void bfq_schedule_dispatch(struct bfq_data *bfqd); + +#include "bfq-ioc.c" +#include "bfq-sched.c" +#include "bfq-cgroup.c" + +#define bfq_class_idle(bfqq) ((bfqq)->entity.ioprio_class ==\ + IOPRIO_CLASS_IDLE) +#define bfq_class_rt(bfqq) ((bfqq)->entity.ioprio_class ==\ + IOPRIO_CLASS_RT) + +#define bfq_sample_valid(samples) ((samples) > 80) + +/* + * The following macro groups conditions that need to be evaluated when + * checking if existing queues and groups form a symmetric scenario + * and therefore idling can be reduced or disabled for some of the + * queues. See the comment to the function bfq_bfqq_must_not_expire() + * for further details. + */ +#ifdef CONFIG_CGROUP_BFQIO +#define symmetric_scenario (!bfqd->active_numerous_groups && \ + !bfq_differentiated_weights(bfqd)) +#else +#define symmetric_scenario (!bfq_differentiated_weights(bfqd)) +#endif + +/* + * We regard a request as SYNC, if either it's a read or has the SYNC bit + * set (in which case it could also be a direct WRITE). + */ +static inline int bfq_bio_sync(struct bio *bio) +{ + if (bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC)) + return 1; + + return 0; +} + +/* + * Scheduler run of queue, if there are requests pending and no one in the + * driver that will restart queueing. + */ +static inline void bfq_schedule_dispatch(struct bfq_data *bfqd) +{ + if (bfqd->queued != 0) { + bfq_log(bfqd, "schedule dispatch"); + kblockd_schedule_work(&bfqd->unplug_work); + } +} + +/* + * Lifted from AS - choose which of rq1 and rq2 that is best served now. + * We choose the request that is closesr to the head right now. Distance + * behind the head is penalized and only allowed to a certain extent. + */ +static struct request *bfq_choose_req(struct bfq_data *bfqd, + struct request *rq1, + struct request *rq2, + sector_t last) +{ + sector_t s1, s2, d1 = 0, d2 = 0; + unsigned long back_max; +#define BFQ_RQ1_WRAP 0x01 /* request 1 wraps */ +#define BFQ_RQ2_WRAP 0x02 /* request 2 wraps */ + unsigned wrap = 0; /* bit mask: requests behind the disk head? */ + + if (rq1 == NULL || rq1 == rq2) + return rq2; + if (rq2 == NULL) + return rq1; + + if (rq_is_sync(rq1) && !rq_is_sync(rq2)) + return rq1; + else if (rq_is_sync(rq2) && !rq_is_sync(rq1)) + return rq2; + if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META)) + return rq1; + else if ((rq2->cmd_flags & REQ_META) && !(rq1->cmd_flags & REQ_META)) + return rq2; + + s1 = blk_rq_pos(rq1); + s2 = blk_rq_pos(rq2); + + /* + * By definition, 1KiB is 2 sectors. + */ + back_max = bfqd->bfq_back_max * 2; + + /* + * Strict one way elevator _except_ in the case where we allow + * short backward seeks which are biased as twice the cost of a + * similar forward seek. + */ + if (s1 >= last) + d1 = s1 - last; + else if (s1 + back_max >= last) + d1 = (last - s1) * bfqd->bfq_back_penalty; + else + wrap |= BFQ_RQ1_WRAP; + + if (s2 >= last) + d2 = s2 - last; + else if (s2 + back_max >= last) + d2 = (last - s2) * bfqd->bfq_back_penalty; + else + wrap |= BFQ_RQ2_WRAP; + + /* Found required data */ + + /* + * By doing switch() on the bit mask "wrap" we avoid having to + * check two variables for all permutations: --> faster! + */ + switch (wrap) { + case 0: /* common case for CFQ: rq1 and rq2 not wrapped */ + if (d1 < d2) + return rq1; + else if (d2 < d1) + return rq2; + else { + if (s1 >= s2) + return rq1; + else + return rq2; + } + + case BFQ_RQ2_WRAP: + return rq1; + case BFQ_RQ1_WRAP: + return rq2; + case (BFQ_RQ1_WRAP|BFQ_RQ2_WRAP): /* both rqs wrapped */ + default: + /* + * Since both rqs are wrapped, + * start with the one that's further behind head + * (--> only *one* back seek required), + * since back seek takes more time than forward. + */ + if (s1 <= s2) + return rq1; + else + return rq2; + } +} + +static struct bfq_queue * +bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root, + sector_t sector, struct rb_node **ret_parent, + struct rb_node ***rb_link) +{ + struct rb_node **p, *parent; + struct bfq_queue *bfqq = NULL; + + parent = NULL; + p = &root->rb_node; + while (*p) { + struct rb_node **n; + + parent = *p; + bfqq = rb_entry(parent, struct bfq_queue, pos_node); + + /* + * Sort strictly based on sector. Smallest to the left, + * largest to the right. + */ + if (sector > blk_rq_pos(bfqq->next_rq)) + n = &(*p)->rb_right; + else if (sector < blk_rq_pos(bfqq->next_rq)) + n = &(*p)->rb_left; + else + break; + p = n; + bfqq = NULL; + } + + *ret_parent = parent; + if (rb_link) + *rb_link = p; + + bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d", + (long long unsigned)sector, + bfqq != NULL ? bfqq->pid : 0); + + return bfqq; +} + +static void bfq_rq_pos_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq) +{ + struct rb_node **p, *parent; + struct bfq_queue *__bfqq; + + if (bfqq->pos_root != NULL) { + rb_erase(&bfqq->pos_node, bfqq->pos_root); + bfqq->pos_root = NULL; + } + + if (bfq_class_idle(bfqq)) + return; + if (!bfqq->next_rq) + return; + + bfqq->pos_root = &bfqd->rq_pos_tree; + __bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root, + blk_rq_pos(bfqq->next_rq), &parent, &p); + if (__bfqq == NULL) { + rb_link_node(&bfqq->pos_node, parent, p); + rb_insert_color(&bfqq->pos_node, bfqq->pos_root); + } else + bfqq->pos_root = NULL; +} + +/* + * Tell whether there are active queues or groups with differentiated weights. + */ +static inline bool bfq_differentiated_weights(struct bfq_data *bfqd) +{ + /* + * For weights to differ, at least one of the trees must contain + * at least two nodes. + */ + return (!RB_EMPTY_ROOT(&bfqd->queue_weights_tree) && + (bfqd->queue_weights_tree.rb_node->rb_left || + bfqd->queue_weights_tree.rb_node->rb_right) +#ifdef CONFIG_CGROUP_BFQIO + ) || + (!RB_EMPTY_ROOT(&bfqd->group_weights_tree) && + (bfqd->group_weights_tree.rb_node->rb_left || + bfqd->group_weights_tree.rb_node->rb_right) +#endif + ); +} + +/* + * If the weight-counter tree passed as input contains no counter for + * the weight of the input entity, then add that counter; otherwise just + * increment the existing counter. + * + * Note that weight-counter trees contain few nodes in mostly symmetric + * scenarios. For example, if all queues have the same weight, then the + * weight-counter tree for the queues may contain at most one node. + * This holds even if low_latency is on, because weight-raised queues + * are not inserted in the tree. + * In most scenarios, the rate at which nodes are created/destroyed + * should be low too. + */ +static void bfq_weights_tree_add(struct bfq_data *bfqd, + struct bfq_entity *entity, + struct rb_root *root) +{ + struct rb_node **new = &(root->rb_node), *parent = NULL; + + /* + * Do not insert if the entity is already associated with a + * counter, which happens if: + * 1) the entity is associated with a queue, + * 2) a request arrival has caused the queue to become both + * non-weight-raised, and hence change its weight, and + * backlogged; in this respect, each of the two events + * causes an invocation of this function, + * 3) this is the invocation of this function caused by the + * second event. This second invocation is actually useless, + * and we handle this fact by exiting immediately. More + * efficient or clearer solutions might possibly be adopted. + */ + if (entity->weight_counter) + return; + + while (*new) { + struct bfq_weight_counter *__counter = container_of(*new, + struct bfq_weight_counter, + weights_node); + parent = *new; + + if (entity->weight == __counter->weight) { + entity->weight_counter = __counter; + goto inc_counter; + } + if (entity->weight < __counter->weight) + new = &((*new)->rb_left); + else + new = &((*new)->rb_right); + } + + entity->weight_counter = kzalloc(sizeof(struct bfq_weight_counter), + GFP_ATOMIC); + entity->weight_counter->weight = entity->weight; + rb_link_node(&entity->weight_counter->weights_node, parent, new); + rb_insert_color(&entity->weight_counter->weights_node, root); + +inc_counter: + entity->weight_counter->num_active++; +} + +/* + * Decrement the weight counter associated with the entity, and, if the + * counter reaches 0, remove the counter from the tree. + * See the comments to the function bfq_weights_tree_add() for considerations + * about overhead. + */ +static void bfq_weights_tree_remove(struct bfq_data *bfqd, + struct bfq_entity *entity, + struct rb_root *root) +{ + if (!entity->weight_counter) + return; + + BUG_ON(RB_EMPTY_ROOT(root)); + BUG_ON(entity->weight_counter->weight != entity->weight); + + BUG_ON(!entity->weight_counter->num_active); + entity->weight_counter->num_active--; + if (entity->weight_counter->num_active > 0) + goto reset_entity_pointer; + + rb_erase(&entity->weight_counter->weights_node, root); + kfree(entity->weight_counter); + +reset_entity_pointer: + entity->weight_counter = NULL; +} + +static struct request *bfq_find_next_rq(struct bfq_data *bfqd, + struct bfq_queue *bfqq, + struct request *last) +{ + struct rb_node *rbnext = rb_next(&last->rb_node); + struct rb_node *rbprev = rb_prev(&last->rb_node); + struct request *next = NULL, *prev = NULL; + + BUG_ON(RB_EMPTY_NODE(&last->rb_node)); + + if (rbprev != NULL) + prev = rb_entry_rq(rbprev); + + if (rbnext != NULL) + next = rb_entry_rq(rbnext); + else { + rbnext = rb_first(&bfqq->sort_list); + if (rbnext && rbnext != &last->rb_node) + next = rb_entry_rq(rbnext); + } + + return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last)); +} + +/* see the definition of bfq_async_charge_factor for details */ +static inline unsigned long bfq_serv_to_charge(struct request *rq, + struct bfq_queue *bfqq) +{ + return blk_rq_sectors(rq) * + (1 + ((!bfq_bfqq_sync(bfqq)) * (bfqq->wr_coeff == 1) * + bfq_async_charge_factor)); +} + +/** + * bfq_updated_next_req - update the queue after a new next_rq selection. + * @bfqd: the device data the queue belongs to. + * @bfqq: the queue to update. + * + * If the first request of a queue changes we make sure that the queue + * has enough budget to serve at least its first request (if the + * request has grown). We do this because if the queue has not enough + * budget for its first request, it has to go through two dispatch + * rounds to actually get it dispatched. + */ +static void bfq_updated_next_req(struct bfq_data *bfqd, + struct bfq_queue *bfqq) +{ + struct bfq_entity *entity = &bfqq->entity; + struct bfq_service_tree *st = bfq_entity_service_tree(entity); + struct request *next_rq = bfqq->next_rq; + unsigned long new_budget; + + if (next_rq == NULL) + return; + + if (bfqq == bfqd->in_service_queue) + /* + * In order not to break guarantees, budgets cannot be + * changed after an entity has been selected. + */ + return; + + BUG_ON(entity->tree != &st->active); + BUG_ON(entity == entity->sched_data->in_service_entity); + + new_budget = max_t(unsigned long, bfqq->max_budget, + bfq_serv_to_charge(next_rq, bfqq)); + if (entity->budget != new_budget) { + entity->budget = new_budget; + bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu", + new_budget); + bfq_activate_bfqq(bfqd, bfqq); + } +} + +static inline unsigned int bfq_wr_duration(struct bfq_data *bfqd) +{ + u64 dur; + + if (bfqd->bfq_wr_max_time > 0) + return bfqd->bfq_wr_max_time; + + dur = bfqd->RT_prod; + do_div(dur, bfqd->peak_rate); + + return dur; +} + +static inline unsigned +bfq_bfqq_cooperations(struct bfq_queue *bfqq) +{ + return bfqq->bic ? bfqq->bic->cooperations : 0; +} + +static inline void +bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_io_cq *bic) +{ + if (bic->saved_idle_window) + bfq_mark_bfqq_idle_window(bfqq); + else + bfq_clear_bfqq_idle_window(bfqq); + if (bic->saved_IO_bound) + bfq_mark_bfqq_IO_bound(bfqq); + else + bfq_clear_bfqq_IO_bound(bfqq); + /* Assuming that the flag in_large_burst is already correctly set */ + if (bic->wr_time_left && bfqq->bfqd->low_latency && + !bfq_bfqq_in_large_burst(bfqq) && + bic->cooperations < bfqq->bfqd->bfq_coop_thresh) { + /* + * Start a weight raising period with the duration given by + * the raising_time_left snapshot. + */ + if (bfq_bfqq_busy(bfqq)) + bfqq->bfqd->wr_busy_queues++; + bfqq->wr_coeff = bfqq->bfqd->bfq_wr_coeff; + bfqq->wr_cur_max_time = bic->wr_time_left; + bfqq->last_wr_start_finish = jiffies; + bfqq->entity.ioprio_changed = 1; + } + /* + * Clear wr_time_left to prevent bfq_bfqq_save_state() from + * getting confused about the queue's need of a weight-raising + * period. + */ + bic->wr_time_left = 0; +} + +/* Must be called with the queue_lock held. */ +static int bfqq_process_refs(struct bfq_queue *bfqq) +{ + int process_refs, io_refs; + + io_refs = bfqq->allocated[READ] + bfqq->allocated[WRITE]; + process_refs = atomic_read(&bfqq->ref) - io_refs - bfqq->entity.on_st; + BUG_ON(process_refs < 0); + return process_refs; +} + +/* Empty burst list and add just bfqq (see comments to bfq_handle_burst) */ +static inline void bfq_reset_burst_list(struct bfq_data *bfqd, + struct bfq_queue *bfqq) +{ + struct bfq_queue *item; + struct hlist_node *n; + + hlist_for_each_entry_safe(item, n, &bfqd->burst_list, burst_list_node) + hlist_del_init(&item->burst_list_node); + hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list); + bfqd->burst_size = 1; +} + +/* Add bfqq to the list of queues in current burst (see bfq_handle_burst) */ +static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) +{ + /* Increment burst size to take into account also bfqq */ + bfqd->burst_size++; + + if (bfqd->burst_size == bfqd->bfq_large_burst_thresh) { + struct bfq_queue *pos, *bfqq_item; + struct hlist_node *n; + + /* + * Enough queues have been activated shortly after each + * other to consider this burst as large. + */ + bfqd->large_burst = true; + + /* + * We can now mark all queues in the burst list as + * belonging to a large burst. + */ + hlist_for_each_entry(bfqq_item, &bfqd->burst_list, + burst_list_node) + bfq_mark_bfqq_in_large_burst(bfqq_item); + bfq_mark_bfqq_in_large_burst(bfqq); + + /* + * From now on, and until the current burst finishes, any + * new queue being activated shortly after the last queue + * was inserted in the burst can be immediately marked as + * belonging to a large burst. So the burst list is not + * needed any more. Remove it. + */ + hlist_for_each_entry_safe(pos, n, &bfqd->burst_list, + burst_list_node) + hlist_del_init(&pos->burst_list_node); + } else /* burst not yet large: add bfqq to the burst list */ + hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list); +} + +/* + * If many queues happen to become active shortly after each other, then, + * to help the processes associated to these queues get their job done as + * soon as possible, it is usually better to not grant either weight-raising + * or device idling to these queues. In this comment we describe, firstly, + * the reasons why this fact holds, and, secondly, the next function, which + * implements the main steps needed to properly mark these queues so that + * they can then be treated in a different way. + * + * As for the terminology, we say that a queue becomes active, i.e., + * switches from idle to backlogged, either when it is created (as a + * consequence of the arrival of an I/O request), or, if already existing, + * when a new request for the queue arrives while the queue is idle. + * Bursts of activations, i.e., activations of different queues occurring + * shortly after each other, are typically caused by services or applications + * that spawn or reactivate many parallel threads/processes. Examples are + * systemd during boot or git grep. + * + * These services or applications benefit mostly from a high throughput: + * the quicker the requests of the activated queues are cumulatively served, + * the sooner the target job of these queues gets completed. As a consequence, + * weight-raising any of these queues, which also implies idling the device + * for it, is almost always counterproductive: in most cases it just lowers + * throughput. + * + * On the other hand, a burst of activations may be also caused by the start + * of an application that does not consist in a lot of parallel I/O-bound + * threads. In fact, with a complex application, the burst may be just a + * consequence of the fact that several processes need to be executed to + * start-up the application. To start an application as quickly as possible, + * the best thing to do is to privilege the I/O related to the application + * with respect to all other I/O. Therefore, the best strategy to start as + * quickly as possible an application that causes a burst of activations is + * to weight-raise all the queues activated during the burst. This is the + * exact opposite of the best strategy for the other type of bursts. + * + * In the end, to take the best action for each of the two cases, the two + * types of bursts need to be distinguished. Fortunately, this seems + * relatively easy to do, by looking at the sizes of the bursts. In + * particular, we found a threshold such that bursts with a larger size + * than that threshold are apparently caused only by services or commands + * such as systemd or git grep. For brevity, hereafter we call just 'large' + * these bursts. BFQ *does not* weight-raise queues whose activations occur + * in a large burst. In addition, for each of these queues BFQ performs or + * does not perform idling depending on which choice boosts the throughput + * most. The exact choice depends on the device and request pattern at + * hand. + * + * Turning back to the next function, it implements all the steps needed + * to detect the occurrence of a large burst and to properly mark all the + * queues belonging to it (so that they can then be treated in a different + * way). This goal is achieved by maintaining a special "burst list" that + * holds, temporarily, the queues that belong to the burst in progress. The + * list is then used to mark these queues as belonging to a large burst if + * the burst does become large. The main steps are the following. + * + * . when the very first queue is activated, the queue is inserted into the + * list (as it could be the first queue in a possible burst) + * + * . if the current burst has not yet become large, and a queue Q that does + * not yet belong to the burst is activated shortly after the last time + * at which a new queue entered the burst list, then the function appends + * Q to the burst list + * + * . if, as a consequence of the previous step, the burst size reaches + * the large-burst threshold, then + * + * . all the queues in the burst list are marked as belonging to a + * large burst + * + * . the burst list is deleted; in fact, the burst list already served + * its purpose (keeping temporarily track of the queues in a burst, + * so as to be able to mark them as belonging to a large burst in the + * previous sub-step), and now is not needed any more + * + * . the device enters a large-burst mode + * + * . if a queue Q that does not belong to the burst is activated while + * the device is in large-burst mode and shortly after the last time + * at which a queue either entered the burst list or was marked as + * belonging to the current large burst, then Q is immediately marked + * as belonging to a large burst. + * + * . if a queue Q that does not belong to the burst is activated a while + * later, i.e., not shortly after, than the last time at which a queue + * either entered the burst list or was marked as belonging to the + * current large burst, then the current burst is deemed as finished and: + * + * . the large-burst mode is reset if set + * + * . the burst list is emptied + * + * . Q is inserted in the burst list, as Q may be the first queue + * in a possible new burst (then the burst list contains just Q + * after this step). + */ +static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq, + bool idle_for_long_time) +{ + /* + * If bfqq happened to be activated in a burst, but has been idle + * for at least as long as an interactive queue, then we assume + * that, in the overall I/O initiated in the burst, the I/O + * associated to bfqq is finished. So bfqq does not need to be + * treated as a queue belonging to a burst anymore. Accordingly, + * we reset bfqq's in_large_burst flag if set, and remove bfqq + * from the burst list if it's there. We do not decrement instead + * burst_size, because the fact that bfqq does not need to belong + * to the burst list any more does not invalidate the fact that + * bfqq may have been activated during the current burst. + */ + if (idle_for_long_time) { + hlist_del_init(&bfqq->burst_list_node); + bfq_clear_bfqq_in_large_burst(bfqq); + } + + /* + * If bfqq is already in the burst list or is part of a large + * burst, then there is nothing else to do. + */ + if (!hlist_unhashed(&bfqq->burst_list_node) || + bfq_bfqq_in_large_burst(bfqq)) + return; + + /* + * If bfqq's activation happens late enough, then the current + * burst is finished, and related data structures must be reset. + * + * In this respect, consider the special case where bfqq is the very + * first queue being activated. In this case, last_ins_in_burst is + * not yet significant when we get here. But it is easy to verify + * that, whether or not the following condition is true, bfqq will + * end up being inserted into the burst list. In particular the + * list will happen to contain only bfqq. And this is exactly what + * has to happen, as bfqq may be the first queue in a possible + * burst. + */ + if (time_is_before_jiffies(bfqd->last_ins_in_burst + + bfqd->bfq_burst_interval)) { + bfqd->large_burst = false; + bfq_reset_burst_list(bfqd, bfqq); + return; + } + + /* + * If we get here, then bfqq is being activated shortly after the + * last queue. So, if the current burst is also large, we can mark + * bfqq as belonging to this large burst immediately. + */ + if (bfqd->large_burst) { + bfq_mark_bfqq_in_large_burst(bfqq); + return; + } + + /* + * If we get here, then a large-burst state has not yet been + * reached, but bfqq is being activated shortly after the last + * queue. Then we add bfqq to the burst. + */ + bfq_add_to_burst(bfqd, bfqq); +} + +static void bfq_add_request(struct request *rq) +{ + struct bfq_queue *bfqq = RQ_BFQQ(rq); + struct bfq_entity *entity = &bfqq->entity; + struct bfq_data *bfqd = bfqq->bfqd; + struct request *next_rq, *prev; + unsigned long old_wr_coeff = bfqq->wr_coeff; + bool interactive = false; + + bfq_log_bfqq(bfqd, bfqq, "add_request %d", rq_is_sync(rq)); + bfqq->queued[rq_is_sync(rq)]++; + bfqd->queued++; + + elv_rb_add(&bfqq->sort_list, rq); + + /* + * Check if this request is a better next-serve candidate. + */ + prev = bfqq->next_rq; + next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position); + BUG_ON(next_rq == NULL); + bfqq->next_rq = next_rq; + + /* + * Adjust priority tree position, if next_rq changes. + */ + if (prev != bfqq->next_rq) + bfq_rq_pos_tree_add(bfqd, bfqq); + + if (!bfq_bfqq_busy(bfqq)) { + bool soft_rt, coop_or_in_burst, + idle_for_long_time = time_is_before_jiffies( + bfqq->budget_timeout + + bfqd->bfq_wr_min_idle_time); + + if (bfq_bfqq_sync(bfqq)) { + bool already_in_burst = + !hlist_unhashed(&bfqq->burst_list_node) || + bfq_bfqq_in_large_burst(bfqq); + bfq_handle_burst(bfqd, bfqq, idle_for_long_time); + /* + * If bfqq was not already in the current burst, + * then, at this point, bfqq either has been + * added to the current burst or has caused the + * current burst to terminate. In particular, in + * the second case, bfqq has become the first + * queue in a possible new burst. + * In both cases last_ins_in_burst needs to be + * moved forward. + */ + if (!already_in_burst) + bfqd->last_ins_in_burst = jiffies; + } + + coop_or_in_burst = bfq_bfqq_in_large_burst(bfqq) || + bfq_bfqq_cooperations(bfqq) >= bfqd->bfq_coop_thresh; + soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 && + !coop_or_in_burst && + time_is_before_jiffies(bfqq->soft_rt_next_start); + interactive = !coop_or_in_burst && idle_for_long_time; + entity->budget = max_t(unsigned long, bfqq->max_budget, + bfq_serv_to_charge(next_rq, bfqq)); + + if (!bfq_bfqq_IO_bound(bfqq)) { + if (time_before(jiffies, + RQ_BIC(rq)->ttime.last_end_request + + bfqd->bfq_slice_idle)) { + bfqq->requests_within_timer++; + if (bfqq->requests_within_timer >= + bfqd->bfq_requests_within_timer) + bfq_mark_bfqq_IO_bound(bfqq); + } else + bfqq->requests_within_timer = 0; + } + + if (!bfqd->low_latency) + goto add_bfqq_busy; + + if (bfq_bfqq_just_split(bfqq)) + goto set_ioprio_changed; + + /* + * If the queue: + * - is not being boosted, + * - has been idle for enough time, + * - is not a sync queue or is linked to a bfq_io_cq (it is + * shared "for its nature" or it is not shared and its + * requests have not been redirected to a shared queue) + * start a weight-raising period. + */ + if (old_wr_coeff == 1 && (interactive || soft_rt) && + (!bfq_bfqq_sync(bfqq) || bfqq->bic != NULL)) { + bfqq->wr_coeff = bfqd->bfq_wr_coeff; + if (interactive) + bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); + else + bfqq->wr_cur_max_time = + bfqd->bfq_wr_rt_max_time; + bfq_log_bfqq(bfqd, bfqq, + "wrais starting at %lu, rais_max_time %u", + jiffies, + jiffies_to_msecs(bfqq->wr_cur_max_time)); + } else if (old_wr_coeff > 1) { + if (interactive) + bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); + else if (coop_or_in_burst || + (bfqq->wr_cur_max_time == + bfqd->bfq_wr_rt_max_time && + !soft_rt)) { + bfqq->wr_coeff = 1; + bfq_log_bfqq(bfqd, bfqq, + "wrais ending at %lu, rais_max_time %u", + jiffies, + jiffies_to_msecs(bfqq-> + wr_cur_max_time)); + } else if (time_before( + bfqq->last_wr_start_finish + + bfqq->wr_cur_max_time, + jiffies + + bfqd->bfq_wr_rt_max_time) && + soft_rt) { + /* + * + * The remaining weight-raising time is lower + * than bfqd->bfq_wr_rt_max_time, which means + * that the application is enjoying weight + * raising either because deemed soft-rt in + * the near past, or because deemed interactive + * a long ago. + * In both cases, resetting now the current + * remaining weight-raising time for the + * application to the weight-raising duration + * for soft rt applications would not cause any + * latency increase for the application (as the + * new duration would be higher than the + * remaining time). + * + * In addition, the application is now meeting + * the requirements for being deemed soft rt. + * In the end we can correctly and safely + * (re)charge the weight-raising duration for + * the application with the weight-raising + * duration for soft rt applications. + * + * In particular, doing this recharge now, i.e., + * before the weight-raising period for the + * application finishes, reduces the probability + * of the following negative scenario: + * 1) the weight of a soft rt application is + * raised at startup (as for any newly + * created application), + * 2) since the application is not interactive, + * at a certain time weight-raising is + * stopped for the application, + * 3) at that time the application happens to + * still have pending requests, and hence + * is destined to not have a chance to be + * deemed soft rt before these requests are + * completed (see the comments to the + * function bfq_bfqq_softrt_next_start() + * for details on soft rt detection), + * 4) these pending requests experience a high + * latency because the application is not + * weight-raised while they are pending. + */ + bfqq->last_wr_start_finish = jiffies; + bfqq->wr_cur_max_time = + bfqd->bfq_wr_rt_max_time; + } + } +set_ioprio_changed: + if (old_wr_coeff != bfqq->wr_coeff) + entity->ioprio_changed = 1; +add_bfqq_busy: + bfqq->last_idle_bklogged = jiffies; + bfqq->service_from_backlogged = 0; + bfq_clear_bfqq_softrt_update(bfqq); + bfq_add_bfqq_busy(bfqd, bfqq); + } else { + if (bfqd->low_latency && old_wr_coeff == 1 && !rq_is_sync(rq) && + time_is_before_jiffies( + bfqq->last_wr_start_finish + + bfqd->bfq_wr_min_inter_arr_async)) { + bfqq->wr_coeff = bfqd->bfq_wr_coeff; + bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); + + bfqd->wr_busy_queues++; + entity->ioprio_changed = 1; + bfq_log_bfqq(bfqd, bfqq, + "non-idle wrais starting at %lu, rais_max_time %u", + jiffies, + jiffies_to_msecs(bfqq->wr_cur_max_time)); + } + if (prev != bfqq->next_rq) + bfq_updated_next_req(bfqd, bfqq); + } + + if (bfqd->low_latency && + (old_wr_coeff == 1 || bfqq->wr_coeff == 1 || interactive)) + bfqq->last_wr_start_finish = jiffies; +} + +static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd, + struct bio *bio) +{ + struct task_struct *tsk = current; + struct bfq_io_cq *bic; + struct bfq_queue *bfqq; + + bic = bfq_bic_lookup(bfqd, tsk->io_context); + if (bic == NULL) + return NULL; + + bfqq = bic_to_bfqq(bic, bfq_bio_sync(bio)); + if (bfqq != NULL) + return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio)); + + return NULL; +} + +static void bfq_activate_request(struct request_queue *q, struct request *rq) +{ + struct bfq_data *bfqd = q->elevator->elevator_data; + + bfqd->rq_in_driver++; + bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); + bfq_log(bfqd, "activate_request: new bfqd->last_position %llu", + (long long unsigned)bfqd->last_position); +} + +static inline void bfq_deactivate_request(struct request_queue *q, + struct request *rq) +{ + struct bfq_data *bfqd = q->elevator->elevator_data; + + BUG_ON(bfqd->rq_in_driver == 0); + bfqd->rq_in_driver--; +} + +static void bfq_remove_request(struct request *rq) +{ + struct bfq_queue *bfqq = RQ_BFQQ(rq); + struct bfq_data *bfqd = bfqq->bfqd; + const int sync = rq_is_sync(rq); + + if (bfqq->next_rq == rq) { + bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq); + bfq_updated_next_req(bfqd, bfqq); + } + + if (rq->queuelist.prev != &rq->queuelist) + list_del_init(&rq->queuelist); + BUG_ON(bfqq->queued[sync] == 0); + bfqq->queued[sync]--; + bfqd->queued--; + elv_rb_del(&bfqq->sort_list, rq); + + if (RB_EMPTY_ROOT(&bfqq->sort_list)) { + if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue) + bfq_del_bfqq_busy(bfqd, bfqq, 1); + /* + * Remove queue from request-position tree as it is empty. + */ + if (bfqq->pos_root != NULL) { + rb_erase(&bfqq->pos_node, bfqq->pos_root); + bfqq->pos_root = NULL; + } + } + + if (rq->cmd_flags & REQ_META) { + BUG_ON(bfqq->meta_pending == 0); + bfqq->meta_pending--; + } +} + +static int bfq_merge(struct request_queue *q, struct request **req, + struct bio *bio) +{ + struct bfq_data *bfqd = q->elevator->elevator_data; + struct request *__rq; + + __rq = bfq_find_rq_fmerge(bfqd, bio); + if (__rq != NULL && elv_rq_merge_ok(__rq, bio)) { + *req = __rq; + return ELEVATOR_FRONT_MERGE; + } + + return ELEVATOR_NO_MERGE; +} + +static void bfq_merged_request(struct request_queue *q, struct request *req, + int type) +{ + if (type == ELEVATOR_FRONT_MERGE && + rb_prev(&req->rb_node) && + blk_rq_pos(req) < + blk_rq_pos(container_of(rb_prev(&req->rb_node), + struct request, rb_node))) { + struct bfq_queue *bfqq = RQ_BFQQ(req); + struct bfq_data *bfqd = bfqq->bfqd; + struct request *prev, *next_rq; + + /* Reposition request in its sort_list */ + elv_rb_del(&bfqq->sort_list, req); + elv_rb_add(&bfqq->sort_list, req); + /* Choose next request to be served for bfqq */ + prev = bfqq->next_rq; + next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req, + bfqd->last_position); + BUG_ON(next_rq == NULL); + bfqq->next_rq = next_rq; + /* + * If next_rq changes, update both the queue's budget to + * fit the new request and the queue's position in its + * rq_pos_tree. + */ + if (prev != bfqq->next_rq) { + bfq_updated_next_req(bfqd, bfqq); + bfq_rq_pos_tree_add(bfqd, bfqq); + } + } +} + +static void bfq_merged_requests(struct request_queue *q, struct request *rq, + struct request *next) +{ + struct bfq_queue *bfqq = RQ_BFQQ(rq), *next_bfqq = RQ_BFQQ(next); + + /* + * If next and rq belong to the same bfq_queue and next is older + * than rq, then reposition rq in the fifo (by substituting next + * with rq). Otherwise, if next and rq belong to different + * bfq_queues, never reposition rq: in fact, we would have to + * reposition it with respect to next's position in its own fifo, + * which would most certainly be too expensive with respect to + * the benefits. + */ + if (bfqq == next_bfqq && + !list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && + time_before(next->fifo_time, rq->fifo_time)) { + list_del_init(&rq->queuelist); + list_replace_init(&next->queuelist, &rq->queuelist); + rq->fifo_time = next->fifo_time; + } + + if (bfqq->next_rq == next) + bfqq->next_rq = rq; + + bfq_remove_request(next); +} + +/* Must be called with bfqq != NULL */ +static inline void bfq_bfqq_end_wr(struct bfq_queue *bfqq) +{ + BUG_ON(bfqq == NULL); + if (bfq_bfqq_busy(bfqq)) + bfqq->bfqd->wr_busy_queues--; + bfqq->wr_coeff = 1; + bfqq->wr_cur_max_time = 0; + /* Trigger a weight change on the next activation of the queue */ + bfqq->entity.ioprio_changed = 1; +} + +static void bfq_end_wr_async_queues(struct bfq_data *bfqd, + struct bfq_group *bfqg) +{ + int i, j; + + for (i = 0; i < 2; i++) + for (j = 0; j < IOPRIO_BE_NR; j++) + if (bfqg->async_bfqq[i][j] != NULL) + bfq_bfqq_end_wr(bfqg->async_bfqq[i][j]); + if (bfqg->async_idle_bfqq != NULL) + bfq_bfqq_end_wr(bfqg->async_idle_bfqq); +} + +static void bfq_end_wr(struct bfq_data *bfqd) +{ + struct bfq_queue *bfqq; + + spin_lock_irq(bfqd->queue->queue_lock); + + list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) + bfq_bfqq_end_wr(bfqq); + list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list) + bfq_bfqq_end_wr(bfqq); + bfq_end_wr_async(bfqd); + + spin_unlock_irq(bfqd->queue->queue_lock); +} + +static inline sector_t bfq_io_struct_pos(void *io_struct, bool request) +{ + if (request) + return blk_rq_pos(io_struct); + else + return ((struct bio *)io_struct)->bi_iter.bi_sector; +} + +static inline sector_t bfq_dist_from(sector_t pos1, + sector_t pos2) +{ + if (pos1 >= pos2) + return pos1 - pos2; + else + return pos2 - pos1; +} + +static inline int bfq_rq_close_to_sector(void *io_struct, bool request, + sector_t sector) +{ + return bfq_dist_from(bfq_io_struct_pos(io_struct, request), sector) <= + BFQQ_SEEK_THR; +} + +static struct bfq_queue *bfqq_close(struct bfq_data *bfqd, sector_t sector) +{ + struct rb_root *root = &bfqd->rq_pos_tree; + struct rb_node *parent, *node; + struct bfq_queue *__bfqq; + + if (RB_EMPTY_ROOT(root)) + return NULL; + + /* + * First, if we find a request starting at the end of the last + * request, choose it. + */ + __bfqq = bfq_rq_pos_tree_lookup(bfqd, root, sector, &parent, NULL); + if (__bfqq != NULL) + return __bfqq; + + /* + * If the exact sector wasn't found, the parent of the NULL leaf + * will contain the closest sector (rq_pos_tree sorted by + * next_request position). + */ + __bfqq = rb_entry(parent, struct bfq_queue, pos_node); + if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector)) + return __bfqq; + + if (blk_rq_pos(__bfqq->next_rq) < sector) + node = rb_next(&__bfqq->pos_node); + else + node = rb_prev(&__bfqq->pos_node); + if (node == NULL) + return NULL; + + __bfqq = rb_entry(node, struct bfq_queue, pos_node); + if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector)) + return __bfqq; + + return NULL; +} + +/* + * bfqd - obvious + * cur_bfqq - passed in so that we don't decide that the current queue + * is closely cooperating with itself + * sector - used as a reference point to search for a close queue + */ +static struct bfq_queue *bfq_close_cooperator(struct bfq_data *bfqd, + struct bfq_queue *cur_bfqq, + sector_t sector) +{ + struct bfq_queue *bfqq; + + if (bfq_class_idle(cur_bfqq)) + return NULL; + if (!bfq_bfqq_sync(cur_bfqq)) + return NULL; + if (BFQQ_SEEKY(cur_bfqq)) + return NULL; + + /* If device has only one backlogged bfq_queue, don't search. */ + if (bfqd->busy_queues == 1) + return NULL; + + /* + * We should notice if some of the queues are cooperating, e.g. + * working closely on the same area of the disk. In that case, + * we can group them together and don't waste time idling. + */ + bfqq = bfqq_close(bfqd, sector); + if (bfqq == NULL || bfqq == cur_bfqq) + return NULL; + + /* + * Do not merge queues from different bfq_groups. + */ + if (bfqq->entity.parent != cur_bfqq->entity.parent) + return NULL; + + /* + * It only makes sense to merge sync queues. + */ + if (!bfq_bfqq_sync(bfqq)) + return NULL; + if (BFQQ_SEEKY(bfqq)) + return NULL; + + /* + * Do not merge queues of different priority classes. + */ + if (bfq_class_rt(bfqq) != bfq_class_rt(cur_bfqq)) + return NULL; + + return bfqq; +} + +static struct bfq_queue * +bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) +{ + int process_refs, new_process_refs; + struct bfq_queue *__bfqq; + + /* + * If there are no process references on the new_bfqq, then it is + * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain + * may have dropped their last reference (not just their last process + * reference). + */ + if (!bfqq_process_refs(new_bfqq)) + return NULL; + + /* Avoid a circular list and skip interim queue merges. */ + while ((__bfqq = new_bfqq->new_bfqq)) { + if (__bfqq == bfqq) + return NULL; + new_bfqq = __bfqq; + } + + process_refs = bfqq_process_refs(bfqq); + new_process_refs = bfqq_process_refs(new_bfqq); + /* + * If the process for the bfqq has gone away, there is no + * sense in merging the queues. + */ + if (process_refs == 0 || new_process_refs == 0) + return NULL; + + bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d", + new_bfqq->pid); + + /* + * Merging is just a redirection: the requests of the process + * owning one of the two queues are redirected to the other queue. + * The latter queue, in its turn, is set as shared if this is the + * first time that the requests of some process are redirected to + * it. + * + * We redirect bfqq to new_bfqq and not the opposite, because we + * are in the context of the process owning bfqq, hence we have + * the io_cq of this process. So we can immediately configure this + * io_cq to redirect the requests of the process to new_bfqq. + * + * NOTE, even if new_bfqq coincides with the in-service queue, the + * io_cq of new_bfqq is not available, because, if the in-service + * queue is shared, bfqd->in_service_bic may not point to the + * io_cq of the in-service queue. + * Redirecting the requests of the process owning bfqq to the + * currently in-service queue is in any case the best option, as + * we feed the in-service queue with new requests close to the + * last request served and, by doing so, hopefully increase the + * throughput. + */ + bfqq->new_bfqq = new_bfqq; + atomic_add(process_refs, &new_bfqq->ref); + return new_bfqq; +} + +/* + * Attempt to schedule a merge of bfqq with the currently in-service queue + * or with a close queue among the scheduled queues. + * Return NULL if no merge was scheduled, a pointer to the shared bfq_queue + * structure otherwise. + * + * The OOM queue is not allowed to participate to cooperation: in fact, since + * the requests temporarily redirected to the OOM queue could be redirected + * again to dedicated queues at any time, the state needed to correctly + * handle merging with the OOM queue would be quite complex and expensive + * to maintain. Besides, in such a critical condition as an out of memory, + * the benefits of queue merging may be little relevant, or even negligible. + */ +static struct bfq_queue * +bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, + void *io_struct, bool request) +{ + struct bfq_queue *in_service_bfqq, *new_bfqq; + + if (bfqq->new_bfqq) + return bfqq->new_bfqq; + + if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq)) + return NULL; + + in_service_bfqq = bfqd->in_service_queue; + + if (in_service_bfqq == NULL || in_service_bfqq == bfqq || + !bfqd->in_service_bic || + unlikely(in_service_bfqq == &bfqd->oom_bfqq)) + goto check_scheduled; + + if (bfq_class_idle(in_service_bfqq) || bfq_class_idle(bfqq)) + goto check_scheduled; + + if (bfq_class_rt(in_service_bfqq) != bfq_class_rt(bfqq)) + goto check_scheduled; + + if (in_service_bfqq->entity.parent != bfqq->entity.parent) + goto check_scheduled; + + if (bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) && + bfq_bfqq_sync(in_service_bfqq) && bfq_bfqq_sync(bfqq)) { + new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq); + if (new_bfqq != NULL) + return new_bfqq; /* Merge with in-service queue */ + } + + /* + * Check whether there is a cooperator among currently scheduled + * queues. The only thing we need is that the bio/request is not + * NULL, as we need it to establish whether a cooperator exists. + */ +check_scheduled: + new_bfqq = bfq_close_cooperator(bfqd, bfqq, + bfq_io_struct_pos(io_struct, request)); + if (new_bfqq && likely(new_bfqq != &bfqd->oom_bfqq)) + return bfq_setup_merge(bfqq, new_bfqq); + + return NULL; +} + +static inline void +bfq_bfqq_save_state(struct bfq_queue *bfqq) +{ + /* + * If bfqq->bic == NULL, the queue is already shared or its requests + * have already been redirected to a shared queue; both idle window + * and weight raising state have already been saved. Do nothing. + */ + if (bfqq->bic == NULL) + return; + if (bfqq->bic->wr_time_left) + /* + * This is the queue of a just-started process, and would + * deserve weight raising: we set wr_time_left to the full + * weight-raising duration to trigger weight-raising when + * and if the queue is split and the first request of the + * queue is enqueued. + */ + bfqq->bic->wr_time_left = bfq_wr_duration(bfqq->bfqd); + else if (bfqq->wr_coeff > 1) { + unsigned long wr_duration = + jiffies - bfqq->last_wr_start_finish; + /* + * It may happen that a queue's weight raising period lasts + * longer than its wr_cur_max_time, as weight raising is + * handled only when a request is enqueued or dispatched (it + * does not use any timer). If the weight raising period is + * about to end, don't save it. + */ + if (bfqq->wr_cur_max_time <= wr_duration) + bfqq->bic->wr_time_left = 0; + else + bfqq->bic->wr_time_left = + bfqq->wr_cur_max_time - wr_duration; + /* + * The bfq_queue is becoming shared or the requests of the + * process owning the queue are being redirected to a shared + * queue. Stop the weight raising period of the queue, as in + * both cases it should not be owned by an interactive or + * soft real-time application. + */ + bfq_bfqq_end_wr(bfqq); + } else + bfqq->bic->wr_time_left = 0; + bfqq->bic->saved_idle_window = bfq_bfqq_idle_window(bfqq); + bfqq->bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq); + bfqq->bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq); + bfqq->bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node); + bfqq->bic->cooperations++; + bfqq->bic->failed_cooperations = 0; +} + +static inline void +bfq_get_bic_reference(struct bfq_queue *bfqq) +{ + /* + * If bfqq->bic has a non-NULL value, the bic to which it belongs + * is about to begin using a shared bfq_queue. + */ + if (bfqq->bic) + atomic_long_inc(&bfqq->bic->icq.ioc->refcount); +} + +static void +bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic, + struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) +{ + bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu", + (long unsigned)new_bfqq->pid); + /* Save weight raising and idle window of the merged queues */ + bfq_bfqq_save_state(bfqq); + bfq_bfqq_save_state(new_bfqq); + if (bfq_bfqq_IO_bound(bfqq)) + bfq_mark_bfqq_IO_bound(new_bfqq); + bfq_clear_bfqq_IO_bound(bfqq); + /* + * Grab a reference to the bic, to prevent it from being destroyed + * before being possibly touched by a bfq_split_bfqq(). + */ + bfq_get_bic_reference(bfqq); + bfq_get_bic_reference(new_bfqq); + /* + * Merge queues (that is, let bic redirect its requests to new_bfqq) + */ + bic_set_bfqq(bic, new_bfqq, 1); + bfq_mark_bfqq_coop(new_bfqq); + /* + * new_bfqq now belongs to at least two bics (it is a shared queue): + * set new_bfqq->bic to NULL. bfqq either: + * - does not belong to any bic any more, and hence bfqq->bic must + * be set to NULL, or + * - is a queue whose owning bics have already been redirected to a + * different queue, hence the queue is destined to not belong to + * any bic soon and bfqq->bic is already NULL (therefore the next + * assignment causes no harm). + */ + new_bfqq->bic = NULL; + bfqq->bic = NULL; + bfq_put_queue(bfqq); +} + +static inline void bfq_bfqq_increase_failed_cooperations(struct bfq_queue *bfqq) +{ + struct bfq_io_cq *bic = bfqq->bic; + struct bfq_data *bfqd = bfqq->bfqd; + + if (bic && bfq_bfqq_cooperations(bfqq) >= bfqd->bfq_coop_thresh) { + bic->failed_cooperations++; + if (bic->failed_cooperations >= bfqd->bfq_failed_cooperations) + bic->cooperations = 0; + } +} + +static int bfq_allow_merge(struct request_queue *q, struct request *rq, + struct bio *bio) +{ + struct bfq_data *bfqd = q->elevator->elevator_data; + struct bfq_io_cq *bic; + struct bfq_queue *bfqq, *new_bfqq; + + /* + * Disallow merge of a sync bio into an async request. + */ + if (bfq_bio_sync(bio) && !rq_is_sync(rq)) + return 0; + + /* + * Lookup the bfqq that this bio will be queued with. Allow + * merge only if rq is queued there. + * Queue lock is held here. + */ + bic = bfq_bic_lookup(bfqd, current->io_context); + if (bic == NULL) + return 0; + + bfqq = bic_to_bfqq(bic, bfq_bio_sync(bio)); + /* + * We take advantage of this function to perform an early merge + * of the queues of possible cooperating processes. + */ + if (bfqq != NULL) { + new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false); + if (new_bfqq != NULL) { + bfq_merge_bfqqs(bfqd, bic, bfqq, new_bfqq); + /* + * If we get here, the bio will be queued in the + * shared queue, i.e., new_bfqq, so use new_bfqq + * to decide whether bio and rq can be merged. + */ + bfqq = new_bfqq; + } else + bfq_bfqq_increase_failed_cooperations(bfqq); + } + + return bfqq == RQ_BFQQ(rq); +} + +static void __bfq_set_in_service_queue(struct bfq_data *bfqd, + struct bfq_queue *bfqq) +{ + if (bfqq != NULL) { + bfq_mark_bfqq_must_alloc(bfqq); + bfq_mark_bfqq_budget_new(bfqq); + bfq_clear_bfqq_fifo_expire(bfqq); + + bfqd->budgets_assigned = (bfqd->budgets_assigned*7 + 256) / 8; + + bfq_log_bfqq(bfqd, bfqq, + "set_in_service_queue, cur-budget = %lu", + bfqq->entity.budget); + } + + bfqd->in_service_queue = bfqq; +} + +/* + * Get and set a new queue for service. + */ +static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd) +{ + struct bfq_queue *bfqq = bfq_get_next_queue(bfqd); + + __bfq_set_in_service_queue(bfqd, bfqq); + return bfqq; +} + +/* + * If enough samples have been computed, return the current max budget + * stored in bfqd, which is dynamically updated according to the + * estimated disk peak rate; otherwise return the default max budget + */ +static inline unsigned long bfq_max_budget(struct bfq_data *bfqd) +{ + if (bfqd->budgets_assigned < 194) + return bfq_default_max_budget; + else + return bfqd->bfq_max_budget; +} + +/* + * Return min budget, which is a fraction of the current or default + * max budget (trying with 1/32) + */ +static inline unsigned long bfq_min_budget(struct bfq_data *bfqd) +{ + if (bfqd->budgets_assigned < 194) + return bfq_default_max_budget / 32; + else + return bfqd->bfq_max_budget / 32; +} + +static void bfq_arm_slice_timer(struct bfq_data *bfqd) +{ + struct bfq_queue *bfqq = bfqd->in_service_queue; + struct bfq_io_cq *bic; + unsigned long sl; + + BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list)); + + /* Processes have exited, don't wait. */ + bic = bfqd->in_service_bic; + if (bic == NULL || atomic_read(&bic->icq.ioc->active_ref) == 0) + return; + + bfq_mark_bfqq_wait_request(bfqq); + + /* + * We don't want to idle for seeks, but we do want to allow + * fair distribution of slice time for a process doing back-to-back + * seeks. So allow a little bit of time for him to submit a new rq. + * + * To prevent processes with (partly) seeky workloads from + * being too ill-treated, grant them a small fraction of the + * assigned budget before reducing the waiting time to + * BFQ_MIN_TT. This happened to help reduce latency. + */ + sl = bfqd->bfq_slice_idle; + /* + * Unless the queue is being weight-raised or the scenario is + * asymmetric, grant only minimum idle time if the queue either + * has been seeky for long enough or has already proved to be + * constantly seeky. + */ + if (bfq_sample_valid(bfqq->seek_samples) && + ((BFQQ_SEEKY(bfqq) && bfqq->entity.service > + bfq_max_budget(bfqq->bfqd) / 8) || + bfq_bfqq_constantly_seeky(bfqq)) && bfqq->wr_coeff == 1 && + symmetric_scenario) + sl = min(sl, msecs_to_jiffies(BFQ_MIN_TT)); + else if (bfqq->wr_coeff > 1) + sl = sl * 3; + bfqd->last_idling_start = ktime_get(); + mod_timer(&bfqd->idle_slice_timer, jiffies + sl); + bfq_log(bfqd, "arm idle: %u/%u ms", + jiffies_to_msecs(sl), jiffies_to_msecs(bfqd->bfq_slice_idle)); +} + +/* + * Set the maximum time for the in-service queue to consume its + * budget. This prevents seeky processes from lowering the disk + * throughput (always guaranteed with a time slice scheme as in CFQ). + */ +static void bfq_set_budget_timeout(struct bfq_data *bfqd) +{ + struct bfq_queue *bfqq = bfqd->in_service_queue; + unsigned int timeout_coeff; + if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time) + timeout_coeff = 1; + else + timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight; + + bfqd->last_budget_start = ktime_get(); + + bfq_clear_bfqq_budget_new(bfqq); + bfqq->budget_timeout = jiffies + + bfqd->bfq_timeout[bfq_bfqq_sync(bfqq)] * timeout_coeff; + + bfq_log_bfqq(bfqd, bfqq, "set budget_timeout %u", + jiffies_to_msecs(bfqd->bfq_timeout[bfq_bfqq_sync(bfqq)] * + timeout_coeff)); +} + +/* + * Move request from internal lists to the request queue dispatch list. + */ +static void bfq_dispatch_insert(struct request_queue *q, struct request *rq) +{ + struct bfq_data *bfqd = q->elevator->elevator_data; + struct bfq_queue *bfqq = RQ_BFQQ(rq); + + /* + * For consistency, the next instruction should have been executed + * after removing the request from the queue and dispatching it. + * We execute instead this instruction before bfq_remove_request() + * (and hence introduce a temporary inconsistency), for efficiency. + * In fact, in a forced_dispatch, this prevents two counters related + * to bfqq->dispatched to risk to be uselessly decremented if bfqq + * is not in service, and then to be incremented again after + * incrementing bfqq->dispatched. + */ + bfqq->dispatched++; + bfq_remove_request(rq); + elv_dispatch_sort(q, rq); + + if (bfq_bfqq_sync(bfqq)) + bfqd->sync_flight++; +} + +/* + * Return expired entry, or NULL to just start from scratch in rbtree. + */ +static struct request *bfq_check_fifo(struct bfq_queue *bfqq) +{ + struct request *rq = NULL; + + if (bfq_bfqq_fifo_expire(bfqq)) + return NULL; + + bfq_mark_bfqq_fifo_expire(bfqq); + + if (list_empty(&bfqq->fifo)) + return NULL; + + rq = rq_entry_fifo(bfqq->fifo.next); + + if (time_before(jiffies, rq->fifo_time)) + return NULL; + + return rq; +} + +static inline unsigned long bfq_bfqq_budget_left(struct bfq_queue *bfqq) +{ + struct bfq_entity *entity = &bfqq->entity; + return entity->budget - entity->service; +} + +static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq) +{ + BUG_ON(bfqq != bfqd->in_service_queue); + + __bfq_bfqd_reset_in_service(bfqd); + + /* + * If this bfqq is shared between multiple processes, check + * to make sure that those processes are still issuing I/Os + * within the mean seek distance. If not, it may be time to + * break the queues apart again. + */ + if (bfq_bfqq_coop(bfqq) && BFQQ_SEEKY(bfqq)) + bfq_mark_bfqq_split_coop(bfqq); + + if (RB_EMPTY_ROOT(&bfqq->sort_list)) { + /* + * Overloading budget_timeout field to store the time + * at which the queue remains with no backlog; used by + * the weight-raising mechanism. + */ + bfqq->budget_timeout = jiffies; + bfq_del_bfqq_busy(bfqd, bfqq, 1); + } else { + bfq_activate_bfqq(bfqd, bfqq); + /* + * Resort priority tree of potential close cooperators. + */ + bfq_rq_pos_tree_add(bfqd, bfqq); + } +} + +/** + * __bfq_bfqq_recalc_budget - try to adapt the budget to the @bfqq behavior. + * @bfqd: device data. + * @bfqq: queue to update. + * @reason: reason for expiration. + * + * Handle the feedback on @bfqq budget. See the body for detailed + * comments. + */ +static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd, + struct bfq_queue *bfqq, + enum bfqq_expiration reason) +{ + struct request *next_rq; + unsigned long budget, min_budget; + + budget = bfqq->max_budget; + min_budget = bfq_min_budget(bfqd); + + BUG_ON(bfqq != bfqd->in_service_queue); + + bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %lu, budg left %lu", + bfqq->entity.budget, bfq_bfqq_budget_left(bfqq)); + bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %lu, min budg %lu", + budget, bfq_min_budget(bfqd)); + bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d", + bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue)); + + if (bfq_bfqq_sync(bfqq)) { + switch (reason) { + /* + * Caveat: in all the following cases we trade latency + * for throughput. + */ + case BFQ_BFQQ_TOO_IDLE: + /* + * This is the only case where we may reduce + * the budget: if there is no request of the + * process still waiting for completion, then + * we assume (tentatively) that the timer has + * expired because the batch of requests of + * the process could have been served with a + * smaller budget. Hence, betting that + * process will behave in the same way when it + * becomes backlogged again, we reduce its + * next budget. As long as we guess right, + * this budget cut reduces the latency + * experienced by the process. + * + * However, if there are still outstanding + * requests, then the process may have not yet + * issued its next request just because it is + * still waiting for the completion of some of + * the still outstanding ones. So in this + * subcase we do not reduce its budget, on the + * contrary we increase it to possibly boost + * the throughput, as discussed in the + * comments to the BUDGET_TIMEOUT case. + */ + if (bfqq->dispatched > 0) /* still outstanding reqs */ + budget = min(budget * 2, bfqd->bfq_max_budget); + else { + if (budget > 5 * min_budget) + budget -= 4 * min_budget; + else + budget = min_budget; + } + break; + case BFQ_BFQQ_BUDGET_TIMEOUT: + /* + * We double the budget here because: 1) it + * gives the chance to boost the throughput if + * this is not a seeky process (which may have + * bumped into this timeout because of, e.g., + * ZBR), 2) together with charge_full_budget + * it helps give seeky processes higher + * timestamps, and hence be served less + * frequently. + */ + budget = min(budget * 2, bfqd->bfq_max_budget); + break; + case BFQ_BFQQ_BUDGET_EXHAUSTED: + /* + * The process still has backlog, and did not + * let either the budget timeout or the disk + * idling timeout expire. Hence it is not + * seeky, has a short thinktime and may be + * happy with a higher budget too. So + * definitely increase the budget of this good + * candidate to boost the disk throughput. + */ + budget = min(budget * 4, bfqd->bfq_max_budget); + break; + case BFQ_BFQQ_NO_MORE_REQUESTS: + /* + * Leave the budget unchanged. + */ + default: + return; + } + } else /* async queue */ + /* async queues get always the maximum possible budget + * (their ability to dispatch is limited by + * @bfqd->bfq_max_budget_async_rq). + */ + budget = bfqd->bfq_max_budget; + + bfqq->max_budget = budget; + + if (bfqd->budgets_assigned >= 194 && bfqd->bfq_user_max_budget == 0 && + bfqq->max_budget > bfqd->bfq_max_budget) + bfqq->max_budget = bfqd->bfq_max_budget; + + /* + * Make sure that we have enough budget for the next request. + * Since the finish time of the bfqq must be kept in sync with + * the budget, be sure to call __bfq_bfqq_expire() after the + * update. + */ + next_rq = bfqq->next_rq; + if (next_rq != NULL) + bfqq->entity.budget = max_t(unsigned long, bfqq->max_budget, + bfq_serv_to_charge(next_rq, bfqq)); + else + bfqq->entity.budget = bfqq->max_budget; + + bfq_log_bfqq(bfqd, bfqq, "head sect: %u, new budget %lu", + next_rq != NULL ? blk_rq_sectors(next_rq) : 0, + bfqq->entity.budget); +} + +static unsigned long bfq_calc_max_budget(u64 peak_rate, u64 timeout) +{ + unsigned long max_budget; + + /* + * The max_budget calculated when autotuning is equal to the + * amount of sectors transfered in timeout_sync at the + * estimated peak rate. + */ + max_budget = (unsigned long)(peak_rate * 1000 * + timeout >> BFQ_RATE_SHIFT); + + return max_budget; +} + +/* + * In addition to updating the peak rate, checks whether the process + * is "slow", and returns 1 if so. This slow flag is used, in addition + * to the budget timeout, to reduce the amount of service provided to + * seeky processes, and hence reduce their chances to lower the + * throughput. See the code for more details. + */ +static int bfq_update_peak_rate(struct bfq_data *bfqd, struct bfq_queue *bfqq, + int compensate, enum bfqq_expiration reason) +{ + u64 bw, usecs, expected, timeout; + ktime_t delta; + int update = 0; + + if (!bfq_bfqq_sync(bfqq) || bfq_bfqq_budget_new(bfqq)) + return 0; + + if (compensate) + delta = bfqd->last_idling_start; + else + delta = ktime_get(); + delta = ktime_sub(delta, bfqd->last_budget_start); + usecs = ktime_to_us(delta); + + /* Don't trust short/unrealistic values. */ + if (usecs < 100 || usecs >= LONG_MAX) + return 0; + + /* + * Calculate the bandwidth for the last slice. We use a 64 bit + * value to store the peak rate, in sectors per usec in fixed + * point math. We do so to have enough precision in the estimate + * and to avoid overflows. + */ + bw = (u64)bfqq->entity.service << BFQ_RATE_SHIFT; + do_div(bw, (unsigned long)usecs); + + timeout = jiffies_to_msecs(bfqd->bfq_timeout[BLK_RW_SYNC]); + + /* + * Use only long (> 20ms) intervals to filter out spikes for + * the peak rate estimation. + */ + if (usecs > 20000) { + if (bw > bfqd->peak_rate || + (!BFQQ_SEEKY(bfqq) && + reason == BFQ_BFQQ_BUDGET_TIMEOUT)) { + bfq_log(bfqd, "measured bw =%llu", bw); + /* + * To smooth oscillations use a low-pass filter with + * alpha=7/8, i.e., + * new_rate = (7/8) * old_rate + (1/8) * bw + */ + do_div(bw, 8); + if (bw == 0) + return 0; + bfqd->peak_rate *= 7; + do_div(bfqd->peak_rate, 8); + bfqd->peak_rate += bw; + update = 1; + bfq_log(bfqd, "new peak_rate=%llu", bfqd->peak_rate); + } + + update |= bfqd->peak_rate_samples == BFQ_PEAK_RATE_SAMPLES - 1; + + if (bfqd->peak_rate_samples < BFQ_PEAK_RATE_SAMPLES) + bfqd->peak_rate_samples++; + + if (bfqd->peak_rate_samples == BFQ_PEAK_RATE_SAMPLES && + update) { + int dev_type = blk_queue_nonrot(bfqd->queue); + if (bfqd->bfq_user_max_budget == 0) { + bfqd->bfq_max_budget = + bfq_calc_max_budget(bfqd->peak_rate, + timeout); + bfq_log(bfqd, "new max_budget=%lu", + bfqd->bfq_max_budget); + } + if (bfqd->device_speed == BFQ_BFQD_FAST && + bfqd->peak_rate < device_speed_thresh[dev_type]) { + bfqd->device_speed = BFQ_BFQD_SLOW; + bfqd->RT_prod = R_slow[dev_type] * + T_slow[dev_type]; + } else if (bfqd->device_speed == BFQ_BFQD_SLOW && + bfqd->peak_rate > device_speed_thresh[dev_type]) { + bfqd->device_speed = BFQ_BFQD_FAST; + bfqd->RT_prod = R_fast[dev_type] * + T_fast[dev_type]; + } + } + } + + /* + * If the process has been served for a too short time + * interval to let its possible sequential accesses prevail on + * the initial seek time needed to move the disk head on the + * first sector it requested, then give the process a chance + * and for the moment return false. + */ + if (bfqq->entity.budget <= bfq_max_budget(bfqd) / 8) + return 0; + + /* + * A process is considered ``slow'' (i.e., seeky, so that we + * cannot treat it fairly in the service domain, as it would + * slow down too much the other processes) if, when a slice + * ends for whatever reason, it has received service at a + * rate that would not be high enough to complete the budget + * before the budget timeout expiration. + */ + expected = bw * 1000 * timeout >> BFQ_RATE_SHIFT; + + /* + * Caveat: processes doing IO in the slower disk zones will + * tend to be slow(er) even if not seeky. And the estimated + * peak rate will actually be an average over the disk + * surface. Hence, to not be too harsh with unlucky processes, + * we keep a budget/3 margin of safety before declaring a + * process slow. + */ + return expected > (4 * bfqq->entity.budget) / 3; +} + +/* + * To be deemed as soft real-time, an application must meet two + * requirements. First, the application must not require an average + * bandwidth higher than the approximate bandwidth required to playback or + * record a compressed high-definition video. + * The next function is invoked on the completion of the last request of a + * batch, to compute the next-start time instant, soft_rt_next_start, such + * that, if the next request of the application does not arrive before + * soft_rt_next_start, then the above requirement on the bandwidth is met. + * + * The second requirement is that the request pattern of the application is + * isochronous, i.e., that, after issuing a request or a batch of requests, + * the application stops issuing new requests until all its pending requests + * have been completed. After that, the application may issue a new batch, + * and so on. + * For this reason the next function is invoked to compute + * soft_rt_next_start only for applications that meet this requirement, + * whereas soft_rt_next_start is set to infinity for applications that do + * not. + * + * Unfortunately, even a greedy application may happen to behave in an + * isochronous way if the CPU load is high. In fact, the application may + * stop issuing requests while the CPUs are busy serving other processes, + * then restart, then stop again for a while, and so on. In addition, if + * the disk achieves a low enough throughput with the request pattern + * issued by the application (e.g., because the request pattern is random + * and/or the device is slow), then the application may meet the above + * bandwidth requirement too. To prevent such a greedy application to be + * deemed as soft real-time, a further rule is used in the computation of + * soft_rt_next_start: soft_rt_next_start must be higher than the current + * time plus the maximum time for which the arrival of a request is waited + * for when a sync queue becomes idle, namely bfqd->bfq_slice_idle. + * This filters out greedy applications, as the latter issue instead their + * next request as soon as possible after the last one has been completed + * (in contrast, when a batch of requests is completed, a soft real-time + * application spends some time processing data). + * + * Unfortunately, the last filter may easily generate false positives if + * only bfqd->bfq_slice_idle is used as a reference time interval and one + * or both the following cases occur: + * 1) HZ is so low that the duration of a jiffy is comparable to or higher + * than bfqd->bfq_slice_idle. This happens, e.g., on slow devices with + * HZ=100. + * 2) jiffies, instead of increasing at a constant rate, may stop increasing + * for a while, then suddenly 'jump' by several units to recover the lost + * increments. This seems to happen, e.g., inside virtual machines. + * To address this issue, we do not use as a reference time interval just + * bfqd->bfq_slice_idle, but bfqd->bfq_slice_idle plus a few jiffies. In + * particular we add the minimum number of jiffies for which the filter + * seems to be quite precise also in embedded systems and KVM/QEMU virtual + * machines. + */ +static inline unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd, + struct bfq_queue *bfqq) +{ + return max(bfqq->last_idle_bklogged + + HZ * bfqq->service_from_backlogged / + bfqd->bfq_wr_max_softrt_rate, + jiffies + bfqq->bfqd->bfq_slice_idle + 4); +} + +/* + * Return the largest-possible time instant such that, for as long as possible, + * the current time will be lower than this time instant according to the macro + * time_is_before_jiffies(). + */ +static inline unsigned long bfq_infinity_from_now(unsigned long now) +{ + return now + ULONG_MAX / 2; +} + +/** + * bfq_bfqq_expire - expire a queue. + * @bfqd: device owning the queue. + * @bfqq: the queue to expire. + * @compensate: if true, compensate for the time spent idling. + * @reason: the reason causing the expiration. + * + * + * If the process associated to the queue is slow (i.e., seeky), or in + * case of budget timeout, or, finally, if it is async, we + * artificially charge it an entire budget (independently of the + * actual service it received). As a consequence, the queue will get + * higher timestamps than the correct ones upon reactivation, and + * hence it will be rescheduled as if it had received more service + * than what it actually received. In the end, this class of processes + * will receive less service in proportion to how slowly they consume + * their budgets (and hence how seriously they tend to lower the + * throughput). + * + * In contrast, when a queue expires because it has been idling for + * too much or because it exhausted its budget, we do not touch the + * amount of service it has received. Hence when the queue will be + * reactivated and its timestamps updated, the latter will be in sync + * with the actual service received by the queue until expiration. + * + * Charging a full budget to the first type of queues and the exact + * service to the others has the effect of using the WF2Q+ policy to + * schedule the former on a timeslice basis, without violating the + * service domain guarantees of the latter. + */ +static void bfq_bfqq_expire(struct bfq_data *bfqd, + struct bfq_queue *bfqq, + int compensate, + enum bfqq_expiration reason) +{ + int slow; + BUG_ON(bfqq != bfqd->in_service_queue); + + /* Update disk peak rate for autotuning and check whether the + * process is slow (see bfq_update_peak_rate). + */ + slow = bfq_update_peak_rate(bfqd, bfqq, compensate, reason); + + /* + * As above explained, 'punish' slow (i.e., seeky), timed-out + * and async queues, to favor sequential sync workloads. + * + * Processes doing I/O in the slower disk zones will tend to be + * slow(er) even if not seeky. Hence, since the estimated peak + * rate is actually an average over the disk surface, these + * processes may timeout just for bad luck. To avoid punishing + * them we do not charge a full budget to a process that + * succeeded in consuming at least 2/3 of its budget. + */ + if (slow || (reason == BFQ_BFQQ_BUDGET_TIMEOUT && + bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3)) + bfq_bfqq_charge_full_budget(bfqq); + + bfqq->service_from_backlogged += bfqq->entity.service; + + if (BFQQ_SEEKY(bfqq) && reason == BFQ_BFQQ_BUDGET_TIMEOUT && + !bfq_bfqq_constantly_seeky(bfqq)) { + bfq_mark_bfqq_constantly_seeky(bfqq); + if (!blk_queue_nonrot(bfqd->queue)) + bfqd->const_seeky_busy_in_flight_queues++; + } + + if (reason == BFQ_BFQQ_TOO_IDLE && + bfqq->entity.service <= 2 * bfqq->entity.budget / 10 ) + bfq_clear_bfqq_IO_bound(bfqq); + + if (bfqd->low_latency && bfqq->wr_coeff == 1) + bfqq->last_wr_start_finish = jiffies; + + if (bfqd->low_latency && bfqd->bfq_wr_max_softrt_rate > 0 && + RB_EMPTY_ROOT(&bfqq->sort_list)) { + /* + * If we get here, and there are no outstanding requests, + * then the request pattern is isochronous (see the comments + * to the function bfq_bfqq_softrt_next_start()). Hence we + * can compute soft_rt_next_start. If, instead, the queue + * still has outstanding requests, then we have to wait + * for the completion of all the outstanding requests to + * discover whether the request pattern is actually + * isochronous. + */ + if (bfqq->dispatched == 0) + bfqq->soft_rt_next_start = + bfq_bfqq_softrt_next_start(bfqd, bfqq); + else { + /* + * The application is still waiting for the + * completion of one or more requests: + * prevent it from possibly being incorrectly + * deemed as soft real-time by setting its + * soft_rt_next_start to infinity. In fact, + * without this assignment, the application + * would be incorrectly deemed as soft + * real-time if: + * 1) it issued a new request before the + * completion of all its in-flight + * requests, and + * 2) at that time, its soft_rt_next_start + * happened to be in the past. + */ + bfqq->soft_rt_next_start = + bfq_infinity_from_now(jiffies); + /* + * Schedule an update of soft_rt_next_start to when + * the task may be discovered to be isochronous. + */ + bfq_mark_bfqq_softrt_update(bfqq); + } + } + + bfq_log_bfqq(bfqd, bfqq, + "expire (%d, slow %d, num_disp %d, idle_win %d)", reason, + slow, bfqq->dispatched, bfq_bfqq_idle_window(bfqq)); + + /* + * Increase, decrease or leave budget unchanged according to + * reason. + */ + __bfq_bfqq_recalc_budget(bfqd, bfqq, reason); + __bfq_bfqq_expire(bfqd, bfqq); +} + +/* + * Budget timeout is not implemented through a dedicated timer, but + * just checked on request arrivals and completions, as well as on + * idle timer expirations. + */ +static int bfq_bfqq_budget_timeout(struct bfq_queue *bfqq) +{ + if (bfq_bfqq_budget_new(bfqq) || + time_before(jiffies, bfqq->budget_timeout)) + return 0; + return 1; +} + +/* + * If we expire a queue that is waiting for the arrival of a new + * request, we may prevent the fictitious timestamp back-shifting that + * allows the guarantees of the queue to be preserved (see [1] for + * this tricky aspect). Hence we return true only if this condition + * does not hold, or if the queue is slow enough to deserve only to be + * kicked off for preserving a high throughput. +*/ +static inline int bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq) +{ + bfq_log_bfqq(bfqq->bfqd, bfqq, + "may_budget_timeout: wait_request %d left %d timeout %d", + bfq_bfqq_wait_request(bfqq), + bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3, + bfq_bfqq_budget_timeout(bfqq)); + + return (!bfq_bfqq_wait_request(bfqq) || + bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3) + && + bfq_bfqq_budget_timeout(bfqq); +} + +/* + * Device idling is allowed only for the queues for which this function + * returns true. For this reason, the return value of this function plays a + * critical role for both throughput boosting and service guarantees. The + * return value is computed through a logical expression. In this rather + * long comment, we try to briefly describe all the details and motivations + * behind the components of this logical expression. + * + * First, the expression is false if bfqq is not sync, or if: bfqq happened + * to become active during a large burst of queue activations, and the + * pattern of requests bfqq contains boosts the throughput if bfqq is + * expired. In fact, queues that became active during a large burst benefit + * only from throughput, as discussed in the comments to bfq_handle_burst. + * In this respect, expiring bfqq certainly boosts the throughput on NCQ- + * capable flash-based devices, whereas, on rotational devices, it boosts + * the throughput only if bfqq contains random requests. + * + * On the opposite end, if (a) bfqq is sync, (b) the above burst-related + * condition does not hold, and (c) bfqq is being weight-raised, then the + * expression always evaluates to true, as device idling is instrumental + * for preserving low-latency guarantees (see [1]). If, instead, conditions + * (a) and (b) do hold, but (c) does not, then the expression evaluates to + * true only if: (1) bfqq is I/O-bound and has a non-null idle window, and + * (2) at least one of the following two conditions holds. + * The first condition is that the device is not performing NCQ, because + * idling the device most certainly boosts the throughput if this condition + * holds and bfqq is I/O-bound and has been granted a non-null idle window. + * The second compound condition is made of the logical AND of two components. + * + * The first component is true only if there is no weight-raised busy + * queue. This guarantees that the device is not idled for a sync non- + * weight-raised queue when there are busy weight-raised queues. The former + * is then expired immediately if empty. Combined with the timestamping + * rules of BFQ (see [1] for details), this causes sync non-weight-raised + * queues to get a lower number of requests served, and hence to ask for a + * lower number of requests from the request pool, before the busy weight- + * raised queues get served again. + * + * This is beneficial for the processes associated with weight-raised + * queues, when the request pool is saturated (e.g., in the presence of + * write hogs). In fact, if the processes associated with the other queues + * ask for requests at a lower rate, then weight-raised processes have a + * higher probability to get a request from the pool immediately (or at + * least soon) when they need one. Hence they have a higher probability to + * actually get a fraction of the disk throughput proportional to their + * high weight. This is especially true with NCQ-capable drives, which + * enqueue several requests in advance and further reorder internally- + * queued requests. + * + * In the end, mistreating non-weight-raised queues when there are busy + * weight-raised queues seems to mitigate starvation problems in the + * presence of heavy write workloads and NCQ, and hence to guarantee a + * higher application and system responsiveness in these hostile scenarios. + * + * If the first component of the compound condition is instead true, i.e., + * there is no weight-raised busy queue, then the second component of the + * compound condition takes into account service-guarantee and throughput + * issues related to NCQ (recall that the compound condition is evaluated + * only if the device is detected as supporting NCQ). + * + * As for service guarantees, allowing the drive to enqueue more than one + * request at a time, and hence delegating de facto final scheduling + * decisions to the drive's internal scheduler, causes loss of control on + * the actual request service order. In this respect, when the drive is + * allowed to enqueue more than one request at a time, the service + * distribution enforced by the drive's internal scheduler is likely to + * coincide with the desired device-throughput distribution only in the + * following, perfectly symmetric, scenario: + * 1) all active queues have the same weight, + * 2) all active groups at the same level in the groups tree have the same + * weight, + * 3) all active groups at the same level in the groups tree have the same + * number of children. + * + * Even in such a scenario, sequential I/O may still receive a preferential + * treatment, but this is not likely to be a big issue with flash-based + * devices, because of their non-dramatic loss of throughput with random + * I/O. Things do differ with HDDs, for which additional care is taken, as + * explained after completing the discussion for flash-based devices. + * + * Unfortunately, keeping the necessary state for evaluating exactly the + * above symmetry conditions would be quite complex and time-consuming. + * Therefore BFQ evaluates instead the following stronger sub-conditions, + * for which it is much easier to maintain the needed state: + * 1) all active queues have the same weight, + * 2) all active groups have the same weight, + * 3) all active groups have at most one active child each. + * In particular, the last two conditions are always true if hierarchical + * support and the cgroups interface are not enabled, hence no state needs + * to be maintained in this case. + * + * According to the above considerations, the second component of the + * compound condition evaluates to true if any of the above symmetry + * sub-condition does not hold, or the device is not flash-based. Therefore, + * if also the first component is true, then idling is allowed for a sync + * queue. These are the only sub-conditions considered if the device is + * flash-based, as, for such a device, it is sensible to force idling only + * for service-guarantee issues. In fact, as for throughput, idling + * NCQ-capable flash-based devices would not boost the throughput even + * with sequential I/O; rather it would lower the throughput in proportion + * to how fast the device is. In the end, (only) if all the three + * sub-conditions hold and the device is flash-based, the compound + * condition evaluates to false and therefore no idling is performed. + * + * As already said, things change with a rotational device, where idling + * boosts the throughput with sequential I/O (even with NCQ). Hence, for + * such a device the second component of the compound condition evaluates + * to true also if the following additional sub-condition does not hold: + * the queue is constantly seeky. Unfortunately, this different behavior + * with respect to flash-based devices causes an additional asymmetry: if + * some sync queues enjoy idling and some other sync queues do not, then + * the latter get a low share of the device throughput, simply because the + * former get many requests served after being set as in service, whereas + * the latter do not. As a consequence, to guarantee the desired throughput + * distribution, on HDDs the compound expression evaluates to true (and + * hence device idling is performed) also if the following last symmetry + * condition does not hold: no other queue is benefiting from idling. Also + * this last condition is actually replaced with a simpler-to-maintain and + * stronger condition: there is no busy queue which is not constantly seeky + * (and hence may also benefit from idling). + * + * To sum up, when all the required symmetry and throughput-boosting + * sub-conditions hold, the second component of the compound condition + * evaluates to false, and hence no idling is performed. This helps to + * keep the drives' internal queues full on NCQ-capable devices, and hence + * to boost the throughput, without causing 'almost' any loss of service + * guarantees. The 'almost' follows from the fact that, if the internal + * queue of one such device is filled while all the sub-conditions hold, + * but at some point in time some sub-condition stops to hold, then it may + * become impossible to let requests be served in the new desired order + * until all the requests already queued in the device have been served. + */ +static inline bool bfq_bfqq_must_not_expire(struct bfq_queue *bfqq) +{ + struct bfq_data *bfqd = bfqq->bfqd; +#define cond_for_seeky_on_ncq_hdd (bfq_bfqq_constantly_seeky(bfqq) && \ + bfqd->busy_in_flight_queues == \ + bfqd->const_seeky_busy_in_flight_queues) + +#define cond_for_expiring_in_burst (bfq_bfqq_in_large_burst(bfqq) && \ + bfqd->hw_tag && \ + (blk_queue_nonrot(bfqd->queue) || \ + bfq_bfqq_constantly_seeky(bfqq))) + +/* + * Condition for expiring a non-weight-raised queue (and hence not idling + * the device). + */ +#define cond_for_expiring_non_wr (bfqd->hw_tag && \ + (bfqd->wr_busy_queues > 0 || \ + (blk_queue_nonrot(bfqd->queue) || \ + cond_for_seeky_on_ncq_hdd))) + + return bfq_bfqq_sync(bfqq) && + !cond_for_expiring_in_burst && + (bfqq->wr_coeff > 1 || !symmetric_scenario || + (bfq_bfqq_IO_bound(bfqq) && bfq_bfqq_idle_window(bfqq) && + !cond_for_expiring_non_wr) + ); +} + +/* + * If the in-service queue is empty but sync, and the function + * bfq_bfqq_must_not_expire returns true, then: + * 1) the queue must remain in service and cannot be expired, and + * 2) the disk must be idled to wait for the possible arrival of a new + * request for the queue. + * See the comments to the function bfq_bfqq_must_not_expire for the reasons + * why performing device idling is the best choice to boost the throughput + * and preserve service guarantees when bfq_bfqq_must_not_expire itself + * returns true. + */ +static inline bool bfq_bfqq_must_idle(struct bfq_queue *bfqq) +{ + struct bfq_data *bfqd = bfqq->bfqd; + + return RB_EMPTY_ROOT(&bfqq->sort_list) && bfqd->bfq_slice_idle != 0 && + bfq_bfqq_must_not_expire(bfqq); +} + +/* + * Select a queue for service. If we have a current queue in service, + * check whether to continue servicing it, or retrieve and set a new one. + */ +static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd) +{ + struct bfq_queue *bfqq; + struct request *next_rq; + enum bfqq_expiration reason = BFQ_BFQQ_BUDGET_TIMEOUT; + + bfqq = bfqd->in_service_queue; + if (bfqq == NULL) + goto new_queue; + + bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue"); + + if (bfq_may_expire_for_budg_timeout(bfqq) && + !timer_pending(&bfqd->idle_slice_timer) && + !bfq_bfqq_must_idle(bfqq)) + goto expire; + + next_rq = bfqq->next_rq; + /* + * If bfqq has requests queued and it has enough budget left to + * serve them, keep the queue, otherwise expire it. + */ + if (next_rq != NULL) { + if (bfq_serv_to_charge(next_rq, bfqq) > + bfq_bfqq_budget_left(bfqq)) { + reason = BFQ_BFQQ_BUDGET_EXHAUSTED; + goto expire; + } else { + /* + * The idle timer may be pending because we may + * not disable disk idling even when a new request + * arrives. + */ + if (timer_pending(&bfqd->idle_slice_timer)) { + /* + * If we get here: 1) at least a new request + * has arrived but we have not disabled the + * timer because the request was too small, + * 2) then the block layer has unplugged + * the device, causing the dispatch to be + * invoked. + * + * Since the device is unplugged, now the + * requests are probably large enough to + * provide a reasonable throughput. + * So we disable idling. + */ + bfq_clear_bfqq_wait_request(bfqq); + del_timer(&bfqd->idle_slice_timer); + } + goto keep_queue; + } + } + + /* + * No requests pending. However, if the in-service queue is idling + * for a new request, or has requests waiting for a completion and + * may idle after their completion, then keep it anyway. + */ + if (timer_pending(&bfqd->idle_slice_timer) || + (bfqq->dispatched != 0 && bfq_bfqq_must_not_expire(bfqq))) { + bfqq = NULL; + goto keep_queue; + } + + reason = BFQ_BFQQ_NO_MORE_REQUESTS; +expire: + bfq_bfqq_expire(bfqd, bfqq, 0, reason); +new_queue: + bfqq = bfq_set_in_service_queue(bfqd); + bfq_log(bfqd, "select_queue: new queue %d returned", + bfqq != NULL ? bfqq->pid : 0); +keep_queue: + return bfqq; +} + +static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq) +{ + struct bfq_entity *entity = &bfqq->entity; + if (bfqq->wr_coeff > 1) { /* queue is being weight-raised */ + bfq_log_bfqq(bfqd, bfqq, + "raising period dur %u/%u msec, old coeff %u, w %d(%d)", + jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish), + jiffies_to_msecs(bfqq->wr_cur_max_time), + bfqq->wr_coeff, + bfqq->entity.weight, bfqq->entity.orig_weight); + + BUG_ON(bfqq != bfqd->in_service_queue && entity->weight != + entity->orig_weight * bfqq->wr_coeff); + if (entity->ioprio_changed) + bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change"); + + /* + * If the queue was activated in a burst, or + * too much time has elapsed from the beginning + * of this weight-raising period, or the queue has + * exceeded the acceptable number of cooperations, + * then end weight raising. + */ + if (bfq_bfqq_in_large_burst(bfqq) || + bfq_bfqq_cooperations(bfqq) >= bfqd->bfq_coop_thresh || + time_is_before_jiffies(bfqq->last_wr_start_finish + + bfqq->wr_cur_max_time)) { + bfqq->last_wr_start_finish = jiffies; + bfq_log_bfqq(bfqd, bfqq, + "wrais ending at %lu, rais_max_time %u", + bfqq->last_wr_start_finish, + jiffies_to_msecs(bfqq->wr_cur_max_time)); + bfq_bfqq_end_wr(bfqq); + } + } + /* Update weight both if it must be raised and if it must be lowered */ + if ((entity->weight > entity->orig_weight) != (bfqq->wr_coeff > 1)) + __bfq_entity_update_weight_prio( + bfq_entity_service_tree(entity), + entity); +} + +/* + * Dispatch one request from bfqq, moving it to the request queue + * dispatch list. + */ +static int bfq_dispatch_request(struct bfq_data *bfqd, + struct bfq_queue *bfqq) +{ + int dispatched = 0; + struct request *rq; + unsigned long service_to_charge; + + BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list)); + + /* Follow expired path, else get first next available. */ + rq = bfq_check_fifo(bfqq); + if (rq == NULL) + rq = bfqq->next_rq; + service_to_charge = bfq_serv_to_charge(rq, bfqq); + + if (service_to_charge > bfq_bfqq_budget_left(bfqq)) { + /* + * This may happen if the next rq is chosen in fifo order + * instead of sector order. The budget is properly + * dimensioned to be always sufficient to serve the next + * request only if it is chosen in sector order. The reason + * is that it would be quite inefficient and little useful + * to always make sure that the budget is large enough to + * serve even the possible next rq in fifo order. + * In fact, requests are seldom served in fifo order. + * + * Expire the queue for budget exhaustion, and make sure + * that the next act_budget is enough to serve the next + * request, even if it comes from the fifo expired path. + */ + bfqq->next_rq = rq; + /* + * Since this dispatch is failed, make sure that + * a new one will be performed + */ + if (!bfqd->rq_in_driver) + bfq_schedule_dispatch(bfqd); + goto expire; + } + + /* Finally, insert request into driver dispatch list. */ + bfq_bfqq_served(bfqq, service_to_charge); + bfq_dispatch_insert(bfqd->queue, rq); + + bfq_update_wr_data(bfqd, bfqq); + + bfq_log_bfqq(bfqd, bfqq, + "dispatched %u sec req (%llu), budg left %lu", + blk_rq_sectors(rq), + (long long unsigned)blk_rq_pos(rq), + bfq_bfqq_budget_left(bfqq)); + + dispatched++; + + if (bfqd->in_service_bic == NULL) { + atomic_long_inc(&RQ_BIC(rq)->icq.ioc->refcount); + bfqd->in_service_bic = RQ_BIC(rq); + } + + if (bfqd->busy_queues > 1 && ((!bfq_bfqq_sync(bfqq) && + dispatched >= bfqd->bfq_max_budget_async_rq) || + bfq_class_idle(bfqq))) + goto expire; + + return dispatched; + +expire: + bfq_bfqq_expire(bfqd, bfqq, 0, BFQ_BFQQ_BUDGET_EXHAUSTED); + return dispatched; +} + +static int __bfq_forced_dispatch_bfqq(struct bfq_queue *bfqq) +{ + int dispatched = 0; + + while (bfqq->next_rq != NULL) { + bfq_dispatch_insert(bfqq->bfqd->queue, bfqq->next_rq); + dispatched++; + } + + BUG_ON(!list_empty(&bfqq->fifo)); + return dispatched; +} + +/* + * Drain our current requests. + * Used for barriers and when switching io schedulers on-the-fly. + */ +static int bfq_forced_dispatch(struct bfq_data *bfqd) +{ + struct bfq_queue *bfqq, *n; + struct bfq_service_tree *st; + int dispatched = 0; + + bfqq = bfqd->in_service_queue; + if (bfqq != NULL) + __bfq_bfqq_expire(bfqd, bfqq); + + /* + * Loop through classes, and be careful to leave the scheduler + * in a consistent state, as feedback mechanisms and vtime + * updates cannot be disabled during the process. + */ + list_for_each_entry_safe(bfqq, n, &bfqd->active_list, bfqq_list) { + st = bfq_entity_service_tree(&bfqq->entity); + + dispatched += __bfq_forced_dispatch_bfqq(bfqq); + bfqq->max_budget = bfq_max_budget(bfqd); + + bfq_forget_idle(st); + } + + BUG_ON(bfqd->busy_queues != 0); + + return dispatched; +} + +static int bfq_dispatch_requests(struct request_queue *q, int force) +{ + struct bfq_data *bfqd = q->elevator->elevator_data; + struct bfq_queue *bfqq; + int max_dispatch; + + bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues); + if (bfqd->busy_queues == 0) + return 0; + + if (unlikely(force)) + return bfq_forced_dispatch(bfqd); + + bfqq = bfq_select_queue(bfqd); + if (bfqq == NULL) + return 0; + + if (bfq_class_idle(bfqq)) + max_dispatch = 1; + + if (!bfq_bfqq_sync(bfqq)) + max_dispatch = bfqd->bfq_max_budget_async_rq; + + if (!bfq_bfqq_sync(bfqq) && bfqq->dispatched >= max_dispatch) { + if (bfqd->busy_queues > 1) + return 0; + if (bfqq->dispatched >= 4 * max_dispatch) + return 0; + } + + if (bfqd->sync_flight != 0 && !bfq_bfqq_sync(bfqq)) + return 0; + + bfq_clear_bfqq_wait_request(bfqq); + BUG_ON(timer_pending(&bfqd->idle_slice_timer)); + + if (!bfq_dispatch_request(bfqd, bfqq)) + return 0; + + bfq_log_bfqq(bfqd, bfqq, "dispatched %s request", + bfq_bfqq_sync(bfqq) ? "sync" : "async"); + + return 1; +} + +/* + * Task holds one reference to the queue, dropped when task exits. Each rq + * in-flight on this queue also holds a reference, dropped when rq is freed. + * + * Queue lock must be held here. + */ +static void bfq_put_queue(struct bfq_queue *bfqq) +{ + struct bfq_data *bfqd = bfqq->bfqd; + + BUG_ON(atomic_read(&bfqq->ref) <= 0); + + bfq_log_bfqq(bfqd, bfqq, "put_queue: %p %d", bfqq, + atomic_read(&bfqq->ref)); + if (!atomic_dec_and_test(&bfqq->ref)) + return; + + BUG_ON(rb_first(&bfqq->sort_list) != NULL); + BUG_ON(bfqq->allocated[READ] + bfqq->allocated[WRITE] != 0); + BUG_ON(bfqq->entity.tree != NULL); + BUG_ON(bfq_bfqq_busy(bfqq)); + BUG_ON(bfqd->in_service_queue == bfqq); + + if (bfq_bfqq_sync(bfqq)) + /* + * The fact that this queue is being destroyed does not + * invalidate the fact that this queue may have been + * activated during the current burst. As a consequence, + * although the queue does not exist anymore, and hence + * needs to be removed from the burst list if there, + * the burst size has not to be decremented. + */ + hlist_del_init(&bfqq->burst_list_node); + + bfq_log_bfqq(bfqd, bfqq, "put_queue: %p freed", bfqq); + + kmem_cache_free(bfq_pool, bfqq); +} + +static void bfq_put_cooperator(struct bfq_queue *bfqq) +{ + struct bfq_queue *__bfqq, *next; + + /* + * If this queue was scheduled to merge with another queue, be + * sure to drop the reference taken on that queue (and others in + * the merge chain). See bfq_setup_merge and bfq_merge_bfqqs. + */ + __bfqq = bfqq->new_bfqq; + while (__bfqq) { + if (__bfqq == bfqq) + break; + next = __bfqq->new_bfqq; + bfq_put_queue(__bfqq); + __bfqq = next; + } +} + +static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) +{ + if (bfqq == bfqd->in_service_queue) { + __bfq_bfqq_expire(bfqd, bfqq); + bfq_schedule_dispatch(bfqd); + } + + bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, + atomic_read(&bfqq->ref)); + + bfq_put_cooperator(bfqq); + + bfq_put_queue(bfqq); +} + +static inline void bfq_init_icq(struct io_cq *icq) +{ + struct bfq_io_cq *bic = icq_to_bic(icq); + + bic->ttime.last_end_request = jiffies; + /* + * A newly created bic indicates that the process has just + * started doing I/O, and is probably mapping into memory its + * executable and libraries: it definitely needs weight raising. + * There is however the possibility that the process performs, + * for a while, I/O close to some other process. EQM intercepts + * this behavior and may merge the queue corresponding to the + * process with some other queue, BEFORE the weight of the queue + * is raised. Merged queues are not weight-raised (they are assumed + * to belong to processes that benefit only from high throughput). + * If the merge is basically the consequence of an accident, then + * the queue will be split soon and will get back its old weight. + * It is then important to write down somewhere that this queue + * does need weight raising, even if it did not make it to get its + * weight raised before being merged. To this purpose, we overload + * the field raising_time_left and assign 1 to it, to mark the queue + * as needing weight raising. + */ + bic->wr_time_left = 1; +} + +static void bfq_exit_icq(struct io_cq *icq) +{ + struct bfq_io_cq *bic = icq_to_bic(icq); + struct bfq_data *bfqd = bic_to_bfqd(bic); + + if (bic->bfqq[BLK_RW_ASYNC]) { + bfq_exit_bfqq(bfqd, bic->bfqq[BLK_RW_ASYNC]); + bic->bfqq[BLK_RW_ASYNC] = NULL; + } + + if (bic->bfqq[BLK_RW_SYNC]) { + /* + * If the bic is using a shared queue, put the reference + * taken on the io_context when the bic started using a + * shared bfq_queue. + */ + if (bfq_bfqq_coop(bic->bfqq[BLK_RW_SYNC])) + put_io_context(icq->ioc); + bfq_exit_bfqq(bfqd, bic->bfqq[BLK_RW_SYNC]); + bic->bfqq[BLK_RW_SYNC] = NULL; + } +} + +/* + * Update the entity prio values; note that the new values will not + * be used until the next (re)activation. + */ +static void bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic) +{ + struct task_struct *tsk = current; + int ioprio_class; + + ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio); + switch (ioprio_class) { + default: + dev_err(bfqq->bfqd->queue->backing_dev_info.dev, + "bfq: bad prio class %d\n", ioprio_class); + case IOPRIO_CLASS_NONE: + /* + * No prio set, inherit CPU scheduling settings. + */ + bfqq->entity.new_ioprio = task_nice_ioprio(tsk); + bfqq->entity.new_ioprio_class = task_nice_ioclass(tsk); + break; + case IOPRIO_CLASS_RT: + bfqq->entity.new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio); + bfqq->entity.new_ioprio_class = IOPRIO_CLASS_RT; + break; + case IOPRIO_CLASS_BE: + bfqq->entity.new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio); + bfqq->entity.new_ioprio_class = IOPRIO_CLASS_BE; + break; + case IOPRIO_CLASS_IDLE: + bfqq->entity.new_ioprio_class = IOPRIO_CLASS_IDLE; + bfqq->entity.new_ioprio = 7; + bfq_clear_bfqq_idle_window(bfqq); + break; + } + + if (bfqq->entity.new_ioprio < 0 || + bfqq->entity.new_ioprio >= IOPRIO_BE_NR) { + printk(KERN_CRIT "bfq_set_next_ioprio_data: new_ioprio %d\n", + bfqq->entity.new_ioprio); + BUG(); + } + + bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->entity.new_ioprio); + bfqq->entity.ioprio_changed = 1; +} + +static void bfq_check_ioprio_change(struct bfq_io_cq *bic) +{ + struct bfq_data *bfqd; + struct bfq_queue *bfqq, *new_bfqq; + struct bfq_group *bfqg; + unsigned long uninitialized_var(flags); + int ioprio = bic->icq.ioc->ioprio; + + bfqd = bfq_get_bfqd_locked(&(bic->icq.q->elevator->elevator_data), + &flags); + /* + * This condition may trigger on a newly created bic, be sure to + * drop the lock before returning. + */ + if (unlikely(bfqd == NULL) || likely(bic->ioprio == ioprio)) + goto out; + + bic->ioprio = ioprio; + + bfqq = bic->bfqq[BLK_RW_ASYNC]; + if (bfqq != NULL) { + bfqg = container_of(bfqq->entity.sched_data, struct bfq_group, + sched_data); + new_bfqq = bfq_get_queue(bfqd, bfqg, BLK_RW_ASYNC, bic, + GFP_ATOMIC); + if (new_bfqq != NULL) { + bic->bfqq[BLK_RW_ASYNC] = new_bfqq; + bfq_log_bfqq(bfqd, bfqq, + "check_ioprio_change: bfqq %p %d", + bfqq, atomic_read(&bfqq->ref)); + bfq_put_queue(bfqq); + } + } + + bfqq = bic->bfqq[BLK_RW_SYNC]; + if (bfqq != NULL) + bfq_set_next_ioprio_data(bfqq, bic); + +out: + bfq_put_bfqd_unlock(bfqd, &flags); +} + +static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, + struct bfq_io_cq *bic, pid_t pid, int is_sync) +{ + RB_CLEAR_NODE(&bfqq->entity.rb_node); + INIT_LIST_HEAD(&bfqq->fifo); + INIT_HLIST_NODE(&bfqq->burst_list_node); + + atomic_set(&bfqq->ref, 0); + bfqq->bfqd = bfqd; + + if (bic) + bfq_set_next_ioprio_data(bfqq, bic); + + if (is_sync) { + if (!bfq_class_idle(bfqq)) + bfq_mark_bfqq_idle_window(bfqq); + bfq_mark_bfqq_sync(bfqq); + } + bfq_mark_bfqq_IO_bound(bfqq); + + /* Tentative initial value to trade off between thr and lat */ + bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3; + bfqq->pid = pid; + + bfqq->wr_coeff = 1; + bfqq->last_wr_start_finish = 0; + /* + * Set to the value for which bfqq will not be deemed as + * soft rt when it becomes backlogged. + */ + bfqq->soft_rt_next_start = bfq_infinity_from_now(jiffies); +} + +static struct bfq_queue *bfq_find_alloc_queue(struct bfq_data *bfqd, + struct bfq_group *bfqg, + int is_sync, + struct bfq_io_cq *bic, + gfp_t gfp_mask) +{ + struct bfq_queue *bfqq, *new_bfqq = NULL; + +retry: + /* bic always exists here */ + bfqq = bic_to_bfqq(bic, is_sync); + + /* + * Always try a new alloc if we fall back to the OOM bfqq + * originally, since it should just be a temporary situation. + */ + if (bfqq == NULL || bfqq == &bfqd->oom_bfqq) { + bfqq = NULL; + if (new_bfqq != NULL) { + bfqq = new_bfqq; + new_bfqq = NULL; + } else if (gfp_mask & __GFP_WAIT) { + spin_unlock_irq(bfqd->queue->queue_lock); + new_bfqq = kmem_cache_alloc_node(bfq_pool, + gfp_mask | __GFP_ZERO, + bfqd->queue->node); + spin_lock_irq(bfqd->queue->queue_lock); + if (new_bfqq != NULL) + goto retry; + } else { + bfqq = kmem_cache_alloc_node(bfq_pool, + gfp_mask | __GFP_ZERO, + bfqd->queue->node); + } + + if (bfqq != NULL) { + bfq_init_bfqq(bfqd, bfqq, bic, current->pid, + is_sync); + bfq_init_entity(&bfqq->entity, bfqg); + bfq_log_bfqq(bfqd, bfqq, "allocated"); + } else { + bfqq = &bfqd->oom_bfqq; + bfq_log_bfqq(bfqd, bfqq, "using oom bfqq"); + } + } + + if (new_bfqq != NULL) + kmem_cache_free(bfq_pool, new_bfqq); + + return bfqq; +} + +static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd, + struct bfq_group *bfqg, + int ioprio_class, int ioprio) +{ + switch (ioprio_class) { + case IOPRIO_CLASS_RT: + return &bfqg->async_bfqq[0][ioprio]; + case IOPRIO_CLASS_NONE: + ioprio = IOPRIO_NORM; + /* fall through */ + case IOPRIO_CLASS_BE: + return &bfqg->async_bfqq[1][ioprio]; + case IOPRIO_CLASS_IDLE: + return &bfqg->async_idle_bfqq; + default: + BUG(); + } +} + +static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd, + struct bfq_group *bfqg, int is_sync, + struct bfq_io_cq *bic, gfp_t gfp_mask) +{ + const int ioprio = IOPRIO_PRIO_DATA(bic->ioprio); + const int ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio); + struct bfq_queue **async_bfqq = NULL; + struct bfq_queue *bfqq = NULL; + + if (!is_sync) { + async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class, + ioprio); + bfqq = *async_bfqq; + } + + if (bfqq == NULL) + bfqq = bfq_find_alloc_queue(bfqd, bfqg, is_sync, bic, gfp_mask); + + /* + * Pin the queue now that it's allocated, scheduler exit will + * prune it. + */ + if (!is_sync && *async_bfqq == NULL) { + atomic_inc(&bfqq->ref); + bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d", + bfqq, atomic_read(&bfqq->ref)); + *async_bfqq = bfqq; + } + + atomic_inc(&bfqq->ref); + bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq, + atomic_read(&bfqq->ref)); + return bfqq; +} + +static void bfq_update_io_thinktime(struct bfq_data *bfqd, + struct bfq_io_cq *bic) +{ + unsigned long elapsed = jiffies - bic->ttime.last_end_request; + unsigned long ttime = min(elapsed, 2UL * bfqd->bfq_slice_idle); + + bic->ttime.ttime_samples = (7*bic->ttime.ttime_samples + 256) / 8; + bic->ttime.ttime_total = (7*bic->ttime.ttime_total + 256*ttime) / 8; + bic->ttime.ttime_mean = (bic->ttime.ttime_total + 128) / + bic->ttime.ttime_samples; +} + +static void bfq_update_io_seektime(struct bfq_data *bfqd, + struct bfq_queue *bfqq, + struct request *rq) +{ + sector_t sdist; + u64 total; + + if (bfqq->last_request_pos < blk_rq_pos(rq)) + sdist = blk_rq_pos(rq) - bfqq->last_request_pos; + else + sdist = bfqq->last_request_pos - blk_rq_pos(rq); + + /* + * Don't allow the seek distance to get too large from the + * odd fragment, pagein, etc. + */ + if (bfqq->seek_samples == 0) /* first request, not really a seek */ + sdist = 0; + else if (bfqq->seek_samples <= 60) /* second & third seek */ + sdist = min(sdist, (bfqq->seek_mean * 4) + 2*1024*1024); + else + sdist = min(sdist, (bfqq->seek_mean * 4) + 2*1024*64); + + bfqq->seek_samples = (7*bfqq->seek_samples + 256) / 8; + bfqq->seek_total = (7*bfqq->seek_total + (u64)256*sdist) / 8; + total = bfqq->seek_total + (bfqq->seek_samples/2); + do_div(total, bfqq->seek_samples); + bfqq->seek_mean = (sector_t)total; + + bfq_log_bfqq(bfqd, bfqq, "dist=%llu mean=%llu", (u64)sdist, + (u64)bfqq->seek_mean); +} + +/* + * Disable idle window if the process thinks too long or seeks so much that + * it doesn't matter. + */ +static void bfq_update_idle_window(struct bfq_data *bfqd, + struct bfq_queue *bfqq, + struct bfq_io_cq *bic) +{ + int enable_idle; + + /* Don't idle for async or idle io prio class. */ + if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq)) + return; + + /* Idle window just restored, statistics are meaningless. */ + if (bfq_bfqq_just_split(bfqq)) + return; + + enable_idle = bfq_bfqq_idle_window(bfqq); + + if (atomic_read(&bic->icq.ioc->active_ref) == 0 || + bfqd->bfq_slice_idle == 0 || + (bfqd->hw_tag && BFQQ_SEEKY(bfqq) && + bfqq->wr_coeff == 1)) + enable_idle = 0; + else if (bfq_sample_valid(bic->ttime.ttime_samples)) { + if (bic->ttime.ttime_mean > bfqd->bfq_slice_idle && + bfqq->wr_coeff == 1) + enable_idle = 0; + else + enable_idle = 1; + } + bfq_log_bfqq(bfqd, bfqq, "update_idle_window: enable_idle %d", + enable_idle); + + if (enable_idle) + bfq_mark_bfqq_idle_window(bfqq); + else + bfq_clear_bfqq_idle_window(bfqq); +} + +/* + * Called when a new fs request (rq) is added to bfqq. Check if there's + * something we should do about it. + */ +static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, + struct request *rq) +{ + struct bfq_io_cq *bic = RQ_BIC(rq); + + if (rq->cmd_flags & REQ_META) + bfqq->meta_pending++; + + bfq_update_io_thinktime(bfqd, bic); + bfq_update_io_seektime(bfqd, bfqq, rq); + if (!BFQQ_SEEKY(bfqq) && bfq_bfqq_constantly_seeky(bfqq)) { + bfq_clear_bfqq_constantly_seeky(bfqq); + if (!blk_queue_nonrot(bfqd->queue)) { + BUG_ON(!bfqd->const_seeky_busy_in_flight_queues); + bfqd->const_seeky_busy_in_flight_queues--; + } + } + if (bfqq->entity.service > bfq_max_budget(bfqd) / 8 || + !BFQQ_SEEKY(bfqq)) + bfq_update_idle_window(bfqd, bfqq, bic); + bfq_clear_bfqq_just_split(bfqq); + + bfq_log_bfqq(bfqd, bfqq, + "rq_enqueued: idle_window=%d (seeky %d, mean %llu)", + bfq_bfqq_idle_window(bfqq), BFQQ_SEEKY(bfqq), + (long long unsigned)bfqq->seek_mean); + + bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); + + if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) { + int small_req = bfqq->queued[rq_is_sync(rq)] == 1 && + blk_rq_sectors(rq) < 32; + int budget_timeout = bfq_bfqq_budget_timeout(bfqq); + + /* + * There is just this request queued: if the request + * is small and the queue is not to be expired, then + * just exit. + * + * In this way, if the disk is being idled to wait for + * a new request from the in-service queue, we avoid + * unplugging the device and committing the disk to serve + * just a small request. On the contrary, we wait for + * the block layer to decide when to unplug the device: + * hopefully, new requests will be merged to this one + * quickly, then the device will be unplugged and + * larger requests will be dispatched. + */ + if (small_req && !budget_timeout) + return; + + /* + * A large enough request arrived, or the queue is to + * be expired: in both cases disk idling is to be + * stopped, so clear wait_request flag and reset + * timer. + */ + bfq_clear_bfqq_wait_request(bfqq); + del_timer(&bfqd->idle_slice_timer); + + /* + * The queue is not empty, because a new request just + * arrived. Hence we can safely expire the queue, in + * case of budget timeout, without risking that the + * timestamps of the queue are not updated correctly. + * See [1] for more details. + */ + if (budget_timeout) + bfq_bfqq_expire(bfqd, bfqq, 0, BFQ_BFQQ_BUDGET_TIMEOUT); + + /* + * Let the request rip immediately, or let a new queue be + * selected if bfqq has just been expired. + */ + __blk_run_queue(bfqd->queue); + } +} + +static void bfq_insert_request(struct request_queue *q, struct request *rq) +{ + struct bfq_data *bfqd = q->elevator->elevator_data; + struct bfq_queue *bfqq = RQ_BFQQ(rq), *new_bfqq; + + assert_spin_locked(bfqd->queue->queue_lock); + + /* + * An unplug may trigger a requeue of a request from the device + * driver: make sure we are in process context while trying to + * merge two bfq_queues. + */ + if (!in_interrupt()) { + new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true); + if (new_bfqq != NULL) { + if (bic_to_bfqq(RQ_BIC(rq), 1) != bfqq) + new_bfqq = bic_to_bfqq(RQ_BIC(rq), 1); + /* + * Release the request's reference to the old bfqq + * and make sure one is taken to the shared queue. + */ + new_bfqq->allocated[rq_data_dir(rq)]++; + bfqq->allocated[rq_data_dir(rq)]--; + atomic_inc(&new_bfqq->ref); + bfq_put_queue(bfqq); + if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq) + bfq_merge_bfqqs(bfqd, RQ_BIC(rq), + bfqq, new_bfqq); + rq->elv.priv[1] = new_bfqq; + bfqq = new_bfqq; + } else + bfq_bfqq_increase_failed_cooperations(bfqq); + } + + bfq_add_request(rq); + + /* + * Here a newly-created bfq_queue has already started a weight-raising + * period: clear raising_time_left to prevent bfq_bfqq_save_state() + * from assigning it a full weight-raising period. See the detailed + * comments about this field in bfq_init_icq(). + */ + if (bfqq->bic != NULL) + bfqq->bic->wr_time_left = 0; + rq->fifo_time = jiffies + bfqd->bfq_fifo_expire[rq_is_sync(rq)]; + list_add_tail(&rq->queuelist, &bfqq->fifo); + + bfq_rq_enqueued(bfqd, bfqq, rq); +} + +static void bfq_update_hw_tag(struct bfq_data *bfqd) +{ + bfqd->max_rq_in_driver = max(bfqd->max_rq_in_driver, + bfqd->rq_in_driver); + + if (bfqd->hw_tag == 1) + return; + + /* + * This sample is valid if the number of outstanding requests + * is large enough to allow a queueing behavior. Note that the + * sum is not exact, as it's not taking into account deactivated + * requests. + */ + if (bfqd->rq_in_driver + bfqd->queued < BFQ_HW_QUEUE_THRESHOLD) + return; + + if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES) + return; + + bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD; + bfqd->max_rq_in_driver = 0; + bfqd->hw_tag_samples = 0; +} + +static void bfq_completed_request(struct request_queue *q, struct request *rq) +{ + struct bfq_queue *bfqq = RQ_BFQQ(rq); + struct bfq_data *bfqd = bfqq->bfqd; + bool sync = bfq_bfqq_sync(bfqq); + + bfq_log_bfqq(bfqd, bfqq, "completed one req with %u sects left (%d)", + blk_rq_sectors(rq), sync); + + bfq_update_hw_tag(bfqd); + + BUG_ON(!bfqd->rq_in_driver); + BUG_ON(!bfqq->dispatched); + bfqd->rq_in_driver--; + bfqq->dispatched--; + + if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) { + bfq_weights_tree_remove(bfqd, &bfqq->entity, + &bfqd->queue_weights_tree); + if (!blk_queue_nonrot(bfqd->queue)) { + BUG_ON(!bfqd->busy_in_flight_queues); + bfqd->busy_in_flight_queues--; + if (bfq_bfqq_constantly_seeky(bfqq)) { + BUG_ON(!bfqd-> + const_seeky_busy_in_flight_queues); + bfqd->const_seeky_busy_in_flight_queues--; + } + } + } + + if (sync) { + bfqd->sync_flight--; + RQ_BIC(rq)->ttime.last_end_request = jiffies; + } + + /* + * If we are waiting to discover whether the request pattern of the + * task associated with the queue is actually isochronous, and + * both requisites for this condition to hold are satisfied, then + * compute soft_rt_next_start (see the comments to the function + * bfq_bfqq_softrt_next_start()). + */ + if (bfq_bfqq_softrt_update(bfqq) && bfqq->dispatched == 0 && + RB_EMPTY_ROOT(&bfqq->sort_list)) + bfqq->soft_rt_next_start = + bfq_bfqq_softrt_next_start(bfqd, bfqq); + + /* + * If this is the in-service queue, check if it needs to be expired, + * or if we want to idle in case it has no pending requests. + */ + if (bfqd->in_service_queue == bfqq) { + if (bfq_bfqq_budget_new(bfqq)) + bfq_set_budget_timeout(bfqd); + + if (bfq_bfqq_must_idle(bfqq)) { + bfq_arm_slice_timer(bfqd); + goto out; + } else if (bfq_may_expire_for_budg_timeout(bfqq)) + bfq_bfqq_expire(bfqd, bfqq, 0, BFQ_BFQQ_BUDGET_TIMEOUT); + else if (RB_EMPTY_ROOT(&bfqq->sort_list) && + (bfqq->dispatched == 0 || + !bfq_bfqq_must_not_expire(bfqq))) + bfq_bfqq_expire(bfqd, bfqq, 0, + BFQ_BFQQ_NO_MORE_REQUESTS); + } + + if (!bfqd->rq_in_driver) + bfq_schedule_dispatch(bfqd); + +out: + return; +} + +static inline int __bfq_may_queue(struct bfq_queue *bfqq) +{ + if (bfq_bfqq_wait_request(bfqq) && bfq_bfqq_must_alloc(bfqq)) { + bfq_clear_bfqq_must_alloc(bfqq); + return ELV_MQUEUE_MUST; + } + + return ELV_MQUEUE_MAY; +} + +static int bfq_may_queue(struct request_queue *q, int rw) +{ + struct bfq_data *bfqd = q->elevator->elevator_data; + struct task_struct *tsk = current; + struct bfq_io_cq *bic; + struct bfq_queue *bfqq; + + /* + * Don't force setup of a queue from here, as a call to may_queue + * does not necessarily imply that a request actually will be + * queued. So just lookup a possibly existing queue, or return + * 'may queue' if that fails. + */ + bic = bfq_bic_lookup(bfqd, tsk->io_context); + if (bic == NULL) + return ELV_MQUEUE_MAY; + + bfqq = bic_to_bfqq(bic, rw_is_sync(rw)); + if (bfqq != NULL) + return __bfq_may_queue(bfqq); + + return ELV_MQUEUE_MAY; +} + +/* + * Queue lock held here. + */ +static void bfq_put_request(struct request *rq) +{ + struct bfq_queue *bfqq = RQ_BFQQ(rq); + + if (bfqq != NULL) { + const int rw = rq_data_dir(rq); + + BUG_ON(!bfqq->allocated[rw]); + bfqq->allocated[rw]--; + + rq->elv.priv[0] = NULL; + rq->elv.priv[1] = NULL; + + bfq_log_bfqq(bfqq->bfqd, bfqq, "put_request %p, %d", + bfqq, atomic_read(&bfqq->ref)); + bfq_put_queue(bfqq); + } +} + +/* + * Returns NULL if a new bfqq should be allocated, or the old bfqq if this + * was the last process referring to said bfqq. + */ +static struct bfq_queue * +bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq) +{ + bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue"); + + put_io_context(bic->icq.ioc); + + if (bfqq_process_refs(bfqq) == 1) { + bfqq->pid = current->pid; + bfq_clear_bfqq_coop(bfqq); + bfq_clear_bfqq_split_coop(bfqq); + return bfqq; + } + + bic_set_bfqq(bic, NULL, 1); + + bfq_put_cooperator(bfqq); + + bfq_put_queue(bfqq); + return NULL; +} + +/* + * Allocate bfq data structures associated with this request. + */ +static int bfq_set_request(struct request_queue *q, struct request *rq, + struct bio *bio, gfp_t gfp_mask) +{ + struct bfq_data *bfqd = q->elevator->elevator_data; + struct bfq_io_cq *bic = icq_to_bic(rq->elv.icq); + const int rw = rq_data_dir(rq); + const int is_sync = rq_is_sync(rq); + struct bfq_queue *bfqq; + struct bfq_group *bfqg; + unsigned long flags; + bool split = false; + + might_sleep_if(gfp_mask & __GFP_WAIT); + + bfq_check_ioprio_change(bic); + + spin_lock_irqsave(q->queue_lock, flags); + + if (bic == NULL) + goto queue_fail; + + bfqg = bfq_bic_update_cgroup(bic); + +new_queue: + bfqq = bic_to_bfqq(bic, is_sync); + if (bfqq == NULL || bfqq == &bfqd->oom_bfqq) { + bfqq = bfq_get_queue(bfqd, bfqg, is_sync, bic, gfp_mask); + bic_set_bfqq(bic, bfqq, is_sync); + if (split && is_sync) { + if ((bic->was_in_burst_list && bfqd->large_burst) || + bic->saved_in_large_burst) + bfq_mark_bfqq_in_large_burst(bfqq); + else { + bfq_clear_bfqq_in_large_burst(bfqq); + if (bic->was_in_burst_list) + hlist_add_head(&bfqq->burst_list_node, + &bfqd->burst_list); + } + } + } else { + /* If the queue was seeky for too long, break it apart. */ + if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) { + bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq"); + bfqq = bfq_split_bfqq(bic, bfqq); + split = true; + if (!bfqq) + goto new_queue; + } + } + + bfqq->allocated[rw]++; + atomic_inc(&bfqq->ref); + bfq_log_bfqq(bfqd, bfqq, "set_request: bfqq %p, %d", bfqq, + atomic_read(&bfqq->ref)); + + rq->elv.priv[0] = bic; + rq->elv.priv[1] = bfqq; + + /* + * If a bfq_queue has only one process reference, it is owned + * by only one bfq_io_cq: we can set the bic field of the + * bfq_queue to the address of that structure. Also, if the + * queue has just been split, mark a flag so that the + * information is available to the other scheduler hooks. + */ + if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) { + bfqq->bic = bic; + if (split) { + bfq_mark_bfqq_just_split(bfqq); + /* + * If the queue has just been split from a shared + * queue, restore the idle window and the possible + * weight raising period. + */ + bfq_bfqq_resume_state(bfqq, bic); + } + } + + spin_unlock_irqrestore(q->queue_lock, flags); + + return 0; + +queue_fail: + bfq_schedule_dispatch(bfqd); + spin_unlock_irqrestore(q->queue_lock, flags); + + return 1; +} + +static void bfq_kick_queue(struct work_struct *work) +{ + struct bfq_data *bfqd = + container_of(work, struct bfq_data, unplug_work); + struct request_queue *q = bfqd->queue; + + spin_lock_irq(q->queue_lock); + __blk_run_queue(q); + spin_unlock_irq(q->queue_lock); +} + +/* + * Handler of the expiration of the timer running if the in-service queue + * is idling inside its time slice. + */ +static void bfq_idle_slice_timer(unsigned long data) +{ + struct bfq_data *bfqd = (struct bfq_data *)data; + struct bfq_queue *bfqq; + unsigned long flags; + enum bfqq_expiration reason; + + spin_lock_irqsave(bfqd->queue->queue_lock, flags); + + bfqq = bfqd->in_service_queue; + /* + * Theoretical race here: the in-service queue can be NULL or + * different from the queue that was idling if the timer handler + * spins on the queue_lock and a new request arrives for the + * current queue and there is a full dispatch cycle that changes + * the in-service queue. This can hardly happen, but in the worst + * case we just expire a queue too early. + */ + if (bfqq != NULL) { + bfq_log_bfqq(bfqd, bfqq, "slice_timer expired"); + if (bfq_bfqq_budget_timeout(bfqq)) + /* + * Also here the queue can be safely expired + * for budget timeout without wasting + * guarantees + */ + reason = BFQ_BFQQ_BUDGET_TIMEOUT; + else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0) + /* + * The queue may not be empty upon timer expiration, + * because we may not disable the timer when the + * first request of the in-service queue arrives + * during disk idling. + */ + reason = BFQ_BFQQ_TOO_IDLE; + else + goto schedule_dispatch; + + bfq_bfqq_expire(bfqd, bfqq, 1, reason); + } + +schedule_dispatch: + bfq_schedule_dispatch(bfqd); + + spin_unlock_irqrestore(bfqd->queue->queue_lock, flags); +} + +static void bfq_shutdown_timer_wq(struct bfq_data *bfqd) +{ + del_timer_sync(&bfqd->idle_slice_timer); + cancel_work_sync(&bfqd->unplug_work); +} + +static inline void __bfq_put_async_bfqq(struct bfq_data *bfqd, + struct bfq_queue **bfqq_ptr) +{ + struct bfq_group *root_group = bfqd->root_group; + struct bfq_queue *bfqq = *bfqq_ptr; + + bfq_log(bfqd, "put_async_bfqq: %p", bfqq); + if (bfqq != NULL) { + bfq_bfqq_move(bfqd, bfqq, &bfqq->entity, root_group); + bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d", + bfqq, atomic_read(&bfqq->ref)); + bfq_put_queue(bfqq); + *bfqq_ptr = NULL; + } +} + +/* + * Release all the bfqg references to its async queues. If we are + * deallocating the group these queues may still contain requests, so + * we reparent them to the root cgroup (i.e., the only one that will + * exist for sure until all the requests on a device are gone). + */ +static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg) +{ + int i, j; + + for (i = 0; i < 2; i++) + for (j = 0; j < IOPRIO_BE_NR; j++) + __bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j]); + + __bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq); +} + +static void bfq_exit_queue(struct elevator_queue *e) +{ + struct bfq_data *bfqd = e->elevator_data; + struct request_queue *q = bfqd->queue; + struct bfq_queue *bfqq, *n; + + bfq_shutdown_timer_wq(bfqd); + + spin_lock_irq(q->queue_lock); + + BUG_ON(bfqd->in_service_queue != NULL); + list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list) + bfq_deactivate_bfqq(bfqd, bfqq, 0); + + bfq_disconnect_groups(bfqd); + spin_unlock_irq(q->queue_lock); + + bfq_shutdown_timer_wq(bfqd); + + synchronize_rcu(); + + BUG_ON(timer_pending(&bfqd->idle_slice_timer)); + + bfq_free_root_group(bfqd); + kfree(bfqd); +} + +static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) +{ + struct bfq_group *bfqg; + struct bfq_data *bfqd; + struct elevator_queue *eq; + + eq = elevator_alloc(q, e); + if (eq == NULL) + return -ENOMEM; + + bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node); + if (bfqd == NULL) { + kobject_put(&eq->kobj); + return -ENOMEM; + } + eq->elevator_data = bfqd; + + /* + * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues. + * Grab a permanent reference to it, so that the normal code flow + * will not attempt to free it. + */ + bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, NULL, 1, 0); + atomic_inc(&bfqd->oom_bfqq.ref); + bfqd->oom_bfqq.entity.new_ioprio = BFQ_DEFAULT_QUEUE_IOPRIO; + bfqd->oom_bfqq.entity.new_ioprio_class = IOPRIO_CLASS_BE; + bfqd->oom_bfqq.entity.new_weight = + bfq_ioprio_to_weight(bfqd->oom_bfqq.entity.new_ioprio); + /* + * Trigger weight initialization, according to ioprio, at the + * oom_bfqq's first activation. The oom_bfqq's ioprio and ioprio + * class won't be changed any more. + */ + bfqd->oom_bfqq.entity.ioprio_changed = 1; + + bfqd->queue = q; + + spin_lock_irq(q->queue_lock); + q->elevator = eq; + spin_unlock_irq(q->queue_lock); + + bfqg = bfq_alloc_root_group(bfqd, q->node); + if (bfqg == NULL) { + kfree(bfqd); + kobject_put(&eq->kobj); + return -ENOMEM; + } + + bfqd->root_group = bfqg; + bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group); +#ifdef CONFIG_CGROUP_BFQIO + bfqd->active_numerous_groups = 0; +#endif + + init_timer(&bfqd->idle_slice_timer); + bfqd->idle_slice_timer.function = bfq_idle_slice_timer; + bfqd->idle_slice_timer.data = (unsigned long)bfqd; + + bfqd->rq_pos_tree = RB_ROOT; + bfqd->queue_weights_tree = RB_ROOT; + bfqd->group_weights_tree = RB_ROOT; + + INIT_WORK(&bfqd->unplug_work, bfq_kick_queue); + + INIT_LIST_HEAD(&bfqd->active_list); + INIT_LIST_HEAD(&bfqd->idle_list); + INIT_HLIST_HEAD(&bfqd->burst_list); + + bfqd->hw_tag = -1; + + bfqd->bfq_max_budget = bfq_default_max_budget; + + bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0]; + bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1]; + bfqd->bfq_back_max = bfq_back_max; + bfqd->bfq_back_penalty = bfq_back_penalty; + bfqd->bfq_slice_idle = bfq_slice_idle; + bfqd->bfq_class_idle_last_service = 0; + bfqd->bfq_max_budget_async_rq = bfq_max_budget_async_rq; + bfqd->bfq_timeout[BLK_RW_ASYNC] = bfq_timeout_async; + bfqd->bfq_timeout[BLK_RW_SYNC] = bfq_timeout_sync; + + bfqd->bfq_coop_thresh = 2; + bfqd->bfq_failed_cooperations = 7000; + bfqd->bfq_requests_within_timer = 120; + + bfqd->bfq_large_burst_thresh = 11; + bfqd->bfq_burst_interval = msecs_to_jiffies(500); + + bfqd->low_latency = true; + + bfqd->bfq_wr_coeff = 20; + bfqd->bfq_wr_rt_max_time = msecs_to_jiffies(300); + bfqd->bfq_wr_max_time = 0; + bfqd->bfq_wr_min_idle_time = msecs_to_jiffies(2000); + bfqd->bfq_wr_min_inter_arr_async = msecs_to_jiffies(500); + bfqd->bfq_wr_max_softrt_rate = 7000; /* + * Approximate rate required + * to playback or record a + * high-definition compressed + * video. + */ + bfqd->wr_busy_queues = 0; + bfqd->busy_in_flight_queues = 0; + bfqd->const_seeky_busy_in_flight_queues = 0; + + /* + * Begin by assuming, optimistically, that the device peak rate is + * equal to the highest reference rate. + */ + bfqd->RT_prod = R_fast[blk_queue_nonrot(bfqd->queue)] * + T_fast[blk_queue_nonrot(bfqd->queue)]; + bfqd->peak_rate = R_fast[blk_queue_nonrot(bfqd->queue)]; + bfqd->device_speed = BFQ_BFQD_FAST; + + return 0; +} + +static void bfq_slab_kill(void) +{ + if (bfq_pool != NULL) + kmem_cache_destroy(bfq_pool); +} + +static int __init bfq_slab_setup(void) +{ + bfq_pool = KMEM_CACHE(bfq_queue, 0); + if (bfq_pool == NULL) + return -ENOMEM; + return 0; +} + +static ssize_t bfq_var_show(unsigned int var, char *page) +{ + return sprintf(page, "%d\n", var); +} + +static ssize_t bfq_var_store(unsigned long *var, const char *page, + size_t count) +{ + unsigned long new_val; + int ret = kstrtoul(page, 10, &new_val); + + if (ret == 0) + *var = new_val; + + return count; +} + +static ssize_t bfq_wr_max_time_show(struct elevator_queue *e, char *page) +{ + struct bfq_data *bfqd = e->elevator_data; + return sprintf(page, "%d\n", bfqd->bfq_wr_max_time > 0 ? + jiffies_to_msecs(bfqd->bfq_wr_max_time) : + jiffies_to_msecs(bfq_wr_duration(bfqd))); +} + +static ssize_t bfq_weights_show(struct elevator_queue *e, char *page) +{ + struct bfq_queue *bfqq; + struct bfq_data *bfqd = e->elevator_data; + ssize_t num_char = 0; + + num_char += sprintf(page + num_char, "Tot reqs queued %d\n\n", + bfqd->queued); + + spin_lock_irq(bfqd->queue->queue_lock); + + num_char += sprintf(page + num_char, "Active:\n"); + list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) { + num_char += sprintf(page + num_char, + "pid%d: weight %hu, nr_queued %d %d, dur %d/%u\n", + bfqq->pid, + bfqq->entity.weight, + bfqq->queued[0], + bfqq->queued[1], + jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish), + jiffies_to_msecs(bfqq->wr_cur_max_time)); + } + + num_char += sprintf(page + num_char, "Idle:\n"); + list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list) { + num_char += sprintf(page + num_char, + "pid%d: weight %hu, dur %d/%u\n", + bfqq->pid, + bfqq->entity.weight, + jiffies_to_msecs(jiffies - + bfqq->last_wr_start_finish), + jiffies_to_msecs(bfqq->wr_cur_max_time)); + } + + spin_unlock_irq(bfqd->queue->queue_lock); + + return num_char; +} + +#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ +static ssize_t __FUNC(struct elevator_queue *e, char *page) \ +{ \ + struct bfq_data *bfqd = e->elevator_data; \ + unsigned int __data = __VAR; \ + if (__CONV) \ + __data = jiffies_to_msecs(__data); \ + return bfq_var_show(__data, (page)); \ +} +SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 1); +SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 1); +SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0); +SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0); +SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 1); +SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0); +SHOW_FUNCTION(bfq_max_budget_async_rq_show, + bfqd->bfq_max_budget_async_rq, 0); +SHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout[BLK_RW_SYNC], 1); +SHOW_FUNCTION(bfq_timeout_async_show, bfqd->bfq_timeout[BLK_RW_ASYNC], 1); +SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0); +SHOW_FUNCTION(bfq_wr_coeff_show, bfqd->bfq_wr_coeff, 0); +SHOW_FUNCTION(bfq_wr_rt_max_time_show, bfqd->bfq_wr_rt_max_time, 1); +SHOW_FUNCTION(bfq_wr_min_idle_time_show, bfqd->bfq_wr_min_idle_time, 1); +SHOW_FUNCTION(bfq_wr_min_inter_arr_async_show, bfqd->bfq_wr_min_inter_arr_async, + 1); +SHOW_FUNCTION(bfq_wr_max_softrt_rate_show, bfqd->bfq_wr_max_softrt_rate, 0); +#undef SHOW_FUNCTION + +#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ +static ssize_t \ +__FUNC(struct elevator_queue *e, const char *page, size_t count) \ +{ \ + struct bfq_data *bfqd = e->elevator_data; \ + unsigned long uninitialized_var(__data); \ + int ret = bfq_var_store(&__data, (page), count); \ + if (__data < (MIN)) \ + __data = (MIN); \ + else if (__data > (MAX)) \ + __data = (MAX); \ + if (__CONV) \ + *(__PTR) = msecs_to_jiffies(__data); \ + else \ + *(__PTR) = __data; \ + return ret; \ +} +STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1, + INT_MAX, 1); +STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1, + INT_MAX, 1); +STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0); +STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1, + INT_MAX, 0); +STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 1); +STORE_FUNCTION(bfq_max_budget_async_rq_store, &bfqd->bfq_max_budget_async_rq, + 1, INT_MAX, 0); +STORE_FUNCTION(bfq_timeout_async_store, &bfqd->bfq_timeout[BLK_RW_ASYNC], 0, + INT_MAX, 1); +STORE_FUNCTION(bfq_wr_coeff_store, &bfqd->bfq_wr_coeff, 1, INT_MAX, 0); +STORE_FUNCTION(bfq_wr_max_time_store, &bfqd->bfq_wr_max_time, 0, INT_MAX, 1); +STORE_FUNCTION(bfq_wr_rt_max_time_store, &bfqd->bfq_wr_rt_max_time, 0, INT_MAX, + 1); +STORE_FUNCTION(bfq_wr_min_idle_time_store, &bfqd->bfq_wr_min_idle_time, 0, + INT_MAX, 1); +STORE_FUNCTION(bfq_wr_min_inter_arr_async_store, + &bfqd->bfq_wr_min_inter_arr_async, 0, INT_MAX, 1); +STORE_FUNCTION(bfq_wr_max_softrt_rate_store, &bfqd->bfq_wr_max_softrt_rate, 0, + INT_MAX, 0); +#undef STORE_FUNCTION + +/* do nothing for the moment */ +static ssize_t bfq_weights_store(struct elevator_queue *e, + const char *page, size_t count) +{ + return count; +} + +static inline unsigned long bfq_estimated_max_budget(struct bfq_data *bfqd) +{ + u64 timeout = jiffies_to_msecs(bfqd->bfq_timeout[BLK_RW_SYNC]); + + if (bfqd->peak_rate_samples >= BFQ_PEAK_RATE_SAMPLES) + return bfq_calc_max_budget(bfqd->peak_rate, timeout); + else + return bfq_default_max_budget; +} + +static ssize_t bfq_max_budget_store(struct elevator_queue *e, + const char *page, size_t count) +{ + struct bfq_data *bfqd = e->elevator_data; + unsigned long uninitialized_var(__data); + int ret = bfq_var_store(&__data, (page), count); + + if (__data == 0) + bfqd->bfq_max_budget = bfq_estimated_max_budget(bfqd); + else { + if (__data > INT_MAX) + __data = INT_MAX; + bfqd->bfq_max_budget = __data; + } + + bfqd->bfq_user_max_budget = __data; + + return ret; +} + +static ssize_t bfq_timeout_sync_store(struct elevator_queue *e, + const char *page, size_t count) +{ + struct bfq_data *bfqd = e->elevator_data; + unsigned long uninitialized_var(__data); + int ret = bfq_var_store(&__data, (page), count); + + if (__data < 1) + __data = 1; + else if (__data > INT_MAX) + __data = INT_MAX; + + bfqd->bfq_timeout[BLK_RW_SYNC] = msecs_to_jiffies(__data); + if (bfqd->bfq_user_max_budget == 0) + bfqd->bfq_max_budget = bfq_estimated_max_budget(bfqd); + + return ret; +} + +static ssize_t bfq_low_latency_store(struct elevator_queue *e, + const char *page, size_t count) +{ + struct bfq_data *bfqd = e->elevator_data; + unsigned long uninitialized_var(__data); + int ret = bfq_var_store(&__data, (page), count); + + if (__data > 1) + __data = 1; + if (__data == 0 && bfqd->low_latency != 0) + bfq_end_wr(bfqd); + bfqd->low_latency = __data; + + return ret; +} + +#define BFQ_ATTR(name) \ + __ATTR(name, S_IRUGO|S_IWUSR, bfq_##name##_show, bfq_##name##_store) + +static struct elv_fs_entry bfq_attrs[] = { + BFQ_ATTR(fifo_expire_sync), + BFQ_ATTR(fifo_expire_async), + BFQ_ATTR(back_seek_max), + BFQ_ATTR(back_seek_penalty), + BFQ_ATTR(slice_idle), + BFQ_ATTR(max_budget), + BFQ_ATTR(max_budget_async_rq), + BFQ_ATTR(timeout_sync), + BFQ_ATTR(timeout_async), + BFQ_ATTR(low_latency), + BFQ_ATTR(wr_coeff), + BFQ_ATTR(wr_max_time), + BFQ_ATTR(wr_rt_max_time), + BFQ_ATTR(wr_min_idle_time), + BFQ_ATTR(wr_min_inter_arr_async), + BFQ_ATTR(wr_max_softrt_rate), + BFQ_ATTR(weights), + __ATTR_NULL +}; + +static struct elevator_type iosched_bfq = { + .ops = { + .elevator_merge_fn = bfq_merge, + .elevator_merged_fn = bfq_merged_request, + .elevator_merge_req_fn = bfq_merged_requests, + .elevator_allow_merge_fn = bfq_allow_merge, + .elevator_dispatch_fn = bfq_dispatch_requests, + .elevator_add_req_fn = bfq_insert_request, + .elevator_activate_req_fn = bfq_activate_request, + .elevator_deactivate_req_fn = bfq_deactivate_request, + .elevator_completed_req_fn = bfq_completed_request, + .elevator_former_req_fn = elv_rb_former_request, + .elevator_latter_req_fn = elv_rb_latter_request, + .elevator_init_icq_fn = bfq_init_icq, + .elevator_exit_icq_fn = bfq_exit_icq, + .elevator_set_req_fn = bfq_set_request, + .elevator_put_req_fn = bfq_put_request, + .elevator_may_queue_fn = bfq_may_queue, + .elevator_init_fn = bfq_init_queue, + .elevator_exit_fn = bfq_exit_queue, + }, + .icq_size = sizeof(struct bfq_io_cq), + .icq_align = __alignof__(struct bfq_io_cq), + .elevator_attrs = bfq_attrs, + .elevator_name = "bfq", + .elevator_owner = THIS_MODULE, +}; + +static int __init bfq_init(void) +{ + /* + * Can be 0 on HZ < 1000 setups. + */ + if (bfq_slice_idle == 0) + bfq_slice_idle = 1; + + if (bfq_timeout_async == 0) + bfq_timeout_async = 1; + + if (bfq_slab_setup()) + return -ENOMEM; + + /* + * Times to load large popular applications for the typical systems + * installed on the reference devices (see the comments before the + * definitions of the two arrays). + */ + T_slow[0] = msecs_to_jiffies(2600); + T_slow[1] = msecs_to_jiffies(1000); + T_fast[0] = msecs_to_jiffies(5500); + T_fast[1] = msecs_to_jiffies(2000); + + /* + * Thresholds that determine the switch between speed classes (see + * the comments before the definition of the array). + */ + device_speed_thresh[0] = (R_fast[0] + R_slow[0]) / 2; + device_speed_thresh[1] = (R_fast[1] + R_slow[1]) / 2; + + elv_register(&iosched_bfq); + pr_info("BFQ I/O-scheduler: v7r8"); + + return 0; +} + +static void __exit bfq_exit(void) +{ + elv_unregister(&iosched_bfq); + bfq_slab_kill(); +} + +module_init(bfq_init); +module_exit(bfq_exit); + +MODULE_AUTHOR("Fabio Checconi, Paolo Valente"); +MODULE_LICENSE("GPL"); diff --git a/block/bfq-sched.c b/block/bfq-sched.c new file mode 100644 index 000000000..d0890c6d4 --- /dev/null +++ b/block/bfq-sched.c @@ -0,0 +1,1180 @@ +/* + * BFQ: Hierarchical B-WF2Q+ scheduler. + * + * Based on ideas and code from CFQ: + * Copyright (C) 2003 Jens Axboe + * + * Copyright (C) 2008 Fabio Checconi + * Paolo Valente + * + * Copyright (C) 2010 Paolo Valente + */ + +#ifdef CONFIG_CGROUP_BFQIO +#define for_each_entity(entity) \ + for (; entity != NULL; entity = entity->parent) + +#define for_each_entity_safe(entity, parent) \ + for (; entity && ({ parent = entity->parent; 1; }); entity = parent) + +static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd, + int extract, + struct bfq_data *bfqd); + +static inline void bfq_update_budget(struct bfq_entity *next_in_service) +{ + struct bfq_entity *bfqg_entity; + struct bfq_group *bfqg; + struct bfq_sched_data *group_sd; + + BUG_ON(next_in_service == NULL); + + group_sd = next_in_service->sched_data; + + bfqg = container_of(group_sd, struct bfq_group, sched_data); + /* + * bfq_group's my_entity field is not NULL only if the group + * is not the root group. We must not touch the root entity + * as it must never become an in-service entity. + */ + bfqg_entity = bfqg->my_entity; + if (bfqg_entity != NULL) + bfqg_entity->budget = next_in_service->budget; +} + +static int bfq_update_next_in_service(struct bfq_sched_data *sd) +{ + struct bfq_entity *next_in_service; + + if (sd->in_service_entity != NULL) + /* will update/requeue at the end of service */ + return 0; + + /* + * NOTE: this can be improved in many ways, such as returning + * 1 (and thus propagating upwards the update) only when the + * budget changes, or caching the bfqq that will be scheduled + * next from this subtree. By now we worry more about + * correctness than about performance... + */ + next_in_service = bfq_lookup_next_entity(sd, 0, NULL); + sd->next_in_service = next_in_service; + + if (next_in_service != NULL) + bfq_update_budget(next_in_service); + + return 1; +} + +static inline void bfq_check_next_in_service(struct bfq_sched_data *sd, + struct bfq_entity *entity) +{ + BUG_ON(sd->next_in_service != entity); +} +#else +#define for_each_entity(entity) \ + for (; entity != NULL; entity = NULL) + +#define for_each_entity_safe(entity, parent) \ + for (parent = NULL; entity != NULL; entity = parent) + +static inline int bfq_update_next_in_service(struct bfq_sched_data *sd) +{ + return 0; +} + +static inline void bfq_check_next_in_service(struct bfq_sched_data *sd, + struct bfq_entity *entity) +{ +} + +static inline void bfq_update_budget(struct bfq_entity *next_in_service) +{ +} +#endif + +/* + * Shift for timestamp calculations. This actually limits the maximum + * service allowed in one timestamp delta (small shift values increase it), + * the maximum total weight that can be used for the queues in the system + * (big shift values increase it), and the period of virtual time + * wraparounds. + */ +#define WFQ_SERVICE_SHIFT 22 + +/** + * bfq_gt - compare two timestamps. + * @a: first ts. + * @b: second ts. + * + * Return @a > @b, dealing with wrapping correctly. + */ +static inline int bfq_gt(u64 a, u64 b) +{ + return (s64)(a - b) > 0; +} + +static inline struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity) +{ + struct bfq_queue *bfqq = NULL; + + BUG_ON(entity == NULL); + + if (entity->my_sched_data == NULL) + bfqq = container_of(entity, struct bfq_queue, entity); + + return bfqq; +} + + +/** + * bfq_delta - map service into the virtual time domain. + * @service: amount of service. + * @weight: scale factor (weight of an entity or weight sum). + */ +static inline u64 bfq_delta(unsigned long service, + unsigned long weight) +{ + u64 d = (u64)service << WFQ_SERVICE_SHIFT; + + do_div(d, weight); + return d; +} + +/** + * bfq_calc_finish - assign the finish time to an entity. + * @entity: the entity to act upon. + * @service: the service to be charged to the entity. + */ +static inline void bfq_calc_finish(struct bfq_entity *entity, + unsigned long service) +{ + struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); + + BUG_ON(entity->weight == 0); + + entity->finish = entity->start + + bfq_delta(service, entity->weight); + + if (bfqq != NULL) { + bfq_log_bfqq(bfqq->bfqd, bfqq, + "calc_finish: serv %lu, w %d", + service, entity->weight); + bfq_log_bfqq(bfqq->bfqd, bfqq, + "calc_finish: start %llu, finish %llu, delta %llu", + entity->start, entity->finish, + bfq_delta(service, entity->weight)); + } +} + +/** + * bfq_entity_of - get an entity from a node. + * @node: the node field of the entity. + * + * Convert a node pointer to the relative entity. This is used only + * to simplify the logic of some functions and not as the generic + * conversion mechanism because, e.g., in the tree walking functions, + * the check for a %NULL value would be redundant. + */ +static inline struct bfq_entity *bfq_entity_of(struct rb_node *node) +{ + struct bfq_entity *entity = NULL; + + if (node != NULL) + entity = rb_entry(node, struct bfq_entity, rb_node); + + return entity; +} + +/** + * bfq_extract - remove an entity from a tree. + * @root: the tree root. + * @entity: the entity to remove. + */ +static inline void bfq_extract(struct rb_root *root, + struct bfq_entity *entity) +{ + BUG_ON(entity->tree != root); + + entity->tree = NULL; + rb_erase(&entity->rb_node, root); +} + +/** + * bfq_idle_extract - extract an entity from the idle tree. + * @st: the service tree of the owning @entity. + * @entity: the entity being removed. + */ +static void bfq_idle_extract(struct bfq_service_tree *st, + struct bfq_entity *entity) +{ + struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); + struct rb_node *next; + + BUG_ON(entity->tree != &st->idle); + + if (entity == st->first_idle) { + next = rb_next(&entity->rb_node); + st->first_idle = bfq_entity_of(next); + } + + if (entity == st->last_idle) { + next = rb_prev(&entity->rb_node); + st->last_idle = bfq_entity_of(next); + } + + bfq_extract(&st->idle, entity); + + if (bfqq != NULL) + list_del(&bfqq->bfqq_list); +} + +/** + * bfq_insert - generic tree insertion. + * @root: tree root. + * @entity: entity to insert. + * + * This is used for the idle and the active tree, since they are both + * ordered by finish time. + */ +static void bfq_insert(struct rb_root *root, struct bfq_entity *entity) +{ + struct bfq_entity *entry; + struct rb_node **node = &root->rb_node; + struct rb_node *parent = NULL; + + BUG_ON(entity->tree != NULL); + + while (*node != NULL) { + parent = *node; + entry = rb_entry(parent, struct bfq_entity, rb_node); + + if (bfq_gt(entry->finish, entity->finish)) + node = &parent->rb_left; + else + node = &parent->rb_right; + } + + rb_link_node(&entity->rb_node, parent, node); + rb_insert_color(&entity->rb_node, root); + + entity->tree = root; +} + +/** + * bfq_update_min - update the min_start field of a entity. + * @entity: the entity to update. + * @node: one of its children. + * + * This function is called when @entity may store an invalid value for + * min_start due to updates to the active tree. The function assumes + * that the subtree rooted at @node (which may be its left or its right + * child) has a valid min_start value. + */ +static inline void bfq_update_min(struct bfq_entity *entity, + struct rb_node *node) +{ + struct bfq_entity *child; + + if (node != NULL) { + child = rb_entry(node, struct bfq_entity, rb_node); + if (bfq_gt(entity->min_start, child->min_start)) + entity->min_start = child->min_start; + } +} + +/** + * bfq_update_active_node - recalculate min_start. + * @node: the node to update. + * + * @node may have changed position or one of its children may have moved, + * this function updates its min_start value. The left and right subtrees + * are assumed to hold a correct min_start value. + */ +static inline void bfq_update_active_node(struct rb_node *node) +{ + struct bfq_entity *entity = rb_entry(node, struct bfq_entity, rb_node); + + entity->min_start = entity->start; + bfq_update_min(entity, node->rb_right); + bfq_update_min(entity, node->rb_left); +} + +/** + * bfq_update_active_tree - update min_start for the whole active tree. + * @node: the starting node. + * + * @node must be the deepest modified node after an update. This function + * updates its min_start using the values held by its children, assuming + * that they did not change, and then updates all the nodes that may have + * changed in the path to the root. The only nodes that may have changed + * are the ones in the path or their siblings. + */ +static void bfq_update_active_tree(struct rb_node *node) +{ + struct rb_node *parent; + +up: + bfq_update_active_node(node); + + parent = rb_parent(node); + if (parent == NULL) + return; + + if (node == parent->rb_left && parent->rb_right != NULL) + bfq_update_active_node(parent->rb_right); + else if (parent->rb_left != NULL) + bfq_update_active_node(parent->rb_left); + + node = parent; + goto up; +} + +static void bfq_weights_tree_add(struct bfq_data *bfqd, + struct bfq_entity *entity, + struct rb_root *root); + +static void bfq_weights_tree_remove(struct bfq_data *bfqd, + struct bfq_entity *entity, + struct rb_root *root); + + +/** + * bfq_active_insert - insert an entity in the active tree of its + * group/device. + * @st: the service tree of the entity. + * @entity: the entity being inserted. + * + * The active tree is ordered by finish time, but an extra key is kept + * per each node, containing the minimum value for the start times of + * its children (and the node itself), so it's possible to search for + * the eligible node with the lowest finish time in logarithmic time. + */ +static void bfq_active_insert(struct bfq_service_tree *st, + struct bfq_entity *entity) +{ + struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); + struct rb_node *node = &entity->rb_node; +#ifdef CONFIG_CGROUP_BFQIO + struct bfq_sched_data *sd = NULL; + struct bfq_group *bfqg = NULL; + struct bfq_data *bfqd = NULL; +#endif + + bfq_insert(&st->active, entity); + + if (node->rb_left != NULL) + node = node->rb_left; + else if (node->rb_right != NULL) + node = node->rb_right; + + bfq_update_active_tree(node); + +#ifdef CONFIG_CGROUP_BFQIO + sd = entity->sched_data; + bfqg = container_of(sd, struct bfq_group, sched_data); + BUG_ON(!bfqg); + bfqd = (struct bfq_data *)bfqg->bfqd; +#endif + if (bfqq != NULL) + list_add(&bfqq->bfqq_list, &bfqq->bfqd->active_list); +#ifdef CONFIG_CGROUP_BFQIO + else { /* bfq_group */ + BUG_ON(!bfqd); + bfq_weights_tree_add(bfqd, entity, &bfqd->group_weights_tree); + } + if (bfqg != bfqd->root_group) { + BUG_ON(!bfqg); + BUG_ON(!bfqd); + bfqg->active_entities++; + if (bfqg->active_entities == 2) + bfqd->active_numerous_groups++; + } +#endif +} + +/** + * bfq_ioprio_to_weight - calc a weight from an ioprio. + * @ioprio: the ioprio value to convert. + */ +static inline unsigned short bfq_ioprio_to_weight(int ioprio) +{ + BUG_ON(ioprio < 0 || ioprio >= IOPRIO_BE_NR); + return IOPRIO_BE_NR - ioprio; +} + +/** + * bfq_weight_to_ioprio - calc an ioprio from a weight. + * @weight: the weight value to convert. + * + * To preserve as mush as possible the old only-ioprio user interface, + * 0 is used as an escape ioprio value for weights (numerically) equal or + * larger than IOPRIO_BE_NR + */ +static inline unsigned short bfq_weight_to_ioprio(int weight) +{ + BUG_ON(weight < BFQ_MIN_WEIGHT || weight > BFQ_MAX_WEIGHT); + return IOPRIO_BE_NR - weight < 0 ? 0 : IOPRIO_BE_NR - weight; +} + +static inline void bfq_get_entity(struct bfq_entity *entity) +{ + struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); + + if (bfqq != NULL) { + atomic_inc(&bfqq->ref); + bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d", + bfqq, atomic_read(&bfqq->ref)); + } +} + +/** + * bfq_find_deepest - find the deepest node that an extraction can modify. + * @node: the node being removed. + * + * Do the first step of an extraction in an rb tree, looking for the + * node that will replace @node, and returning the deepest node that + * the following modifications to the tree can touch. If @node is the + * last node in the tree return %NULL. + */ +static struct rb_node *bfq_find_deepest(struct rb_node *node) +{ + struct rb_node *deepest; + + if (node->rb_right == NULL && node->rb_left == NULL) + deepest = rb_parent(node); + else if (node->rb_right == NULL) + deepest = node->rb_left; + else if (node->rb_left == NULL) + deepest = node->rb_right; + else { + deepest = rb_next(node); + if (deepest->rb_right != NULL) + deepest = deepest->rb_right; + else if (rb_parent(deepest) != node) + deepest = rb_parent(deepest); + } + + return deepest; +} + +/** + * bfq_active_extract - remove an entity from the active tree. + * @st: the service_tree containing the tree. + * @entity: the entity being removed. + */ +static void bfq_active_extract(struct bfq_service_tree *st, + struct bfq_entity *entity) +{ + struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); + struct rb_node *node; +#ifdef CONFIG_CGROUP_BFQIO + struct bfq_sched_data *sd = NULL; + struct bfq_group *bfqg = NULL; + struct bfq_data *bfqd = NULL; +#endif + + node = bfq_find_deepest(&entity->rb_node); + bfq_extract(&st->active, entity); + + if (node != NULL) + bfq_update_active_tree(node); + +#ifdef CONFIG_CGROUP_BFQIO + sd = entity->sched_data; + bfqg = container_of(sd, struct bfq_group, sched_data); + BUG_ON(!bfqg); + bfqd = (struct bfq_data *)bfqg->bfqd; +#endif + if (bfqq != NULL) + list_del(&bfqq->bfqq_list); +#ifdef CONFIG_CGROUP_BFQIO + else { /* bfq_group */ + BUG_ON(!bfqd); + bfq_weights_tree_remove(bfqd, entity, + &bfqd->group_weights_tree); + } + if (bfqg != bfqd->root_group) { + BUG_ON(!bfqg); + BUG_ON(!bfqd); + BUG_ON(!bfqg->active_entities); + bfqg->active_entities--; + if (bfqg->active_entities == 1) { + BUG_ON(!bfqd->active_numerous_groups); + bfqd->active_numerous_groups--; + } + } +#endif +} + +/** + * bfq_idle_insert - insert an entity into the idle tree. + * @st: the service tree containing the tree. + * @entity: the entity to insert. + */ +static void bfq_idle_insert(struct bfq_service_tree *st, + struct bfq_entity *entity) +{ + struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); + struct bfq_entity *first_idle = st->first_idle; + struct bfq_entity *last_idle = st->last_idle; + + if (first_idle == NULL || bfq_gt(first_idle->finish, entity->finish)) + st->first_idle = entity; + if (last_idle == NULL || bfq_gt(entity->finish, last_idle->finish)) + st->last_idle = entity; + + bfq_insert(&st->idle, entity); + + if (bfqq != NULL) + list_add(&bfqq->bfqq_list, &bfqq->bfqd->idle_list); +} + +/** + * bfq_forget_entity - remove an entity from the wfq trees. + * @st: the service tree. + * @entity: the entity being removed. + * + * Update the device status and forget everything about @entity, putting + * the device reference to it, if it is a queue. Entities belonging to + * groups are not refcounted. + */ +static void bfq_forget_entity(struct bfq_service_tree *st, + struct bfq_entity *entity) +{ + struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); + struct bfq_sched_data *sd; + + BUG_ON(!entity->on_st); + + entity->on_st = 0; + st->wsum -= entity->weight; + if (bfqq != NULL) { + sd = entity->sched_data; + bfq_log_bfqq(bfqq->bfqd, bfqq, "forget_entity: %p %d", + bfqq, atomic_read(&bfqq->ref)); + bfq_put_queue(bfqq); + } +} + +/** + * bfq_put_idle_entity - release the idle tree ref of an entity. + * @st: service tree for the entity. + * @entity: the entity being released. + */ +static void bfq_put_idle_entity(struct bfq_service_tree *st, + struct bfq_entity *entity) +{ + bfq_idle_extract(st, entity); + bfq_forget_entity(st, entity); +} + +/** + * bfq_forget_idle - update the idle tree if necessary. + * @st: the service tree to act upon. + * + * To preserve the global O(log N) complexity we only remove one entry here; + * as the idle tree will not grow indefinitely this can be done safely. + */ +static void bfq_forget_idle(struct bfq_service_tree *st) +{ + struct bfq_entity *first_idle = st->first_idle; + struct bfq_entity *last_idle = st->last_idle; + + if (RB_EMPTY_ROOT(&st->active) && last_idle != NULL && + !bfq_gt(last_idle->finish, st->vtime)) { + /* + * Forget the whole idle tree, increasing the vtime past + * the last finish time of idle entities. + */ + st->vtime = last_idle->finish; + } + + if (first_idle != NULL && !bfq_gt(first_idle->finish, st->vtime)) + bfq_put_idle_entity(st, first_idle); +} + +static struct bfq_service_tree * +__bfq_entity_update_weight_prio(struct bfq_service_tree *old_st, + struct bfq_entity *entity) +{ + struct bfq_service_tree *new_st = old_st; + + if (entity->ioprio_changed) { + struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); + unsigned short prev_weight, new_weight; + struct bfq_data *bfqd = NULL; + struct rb_root *root; +#ifdef CONFIG_CGROUP_BFQIO + struct bfq_sched_data *sd; + struct bfq_group *bfqg; +#endif + + if (bfqq != NULL) + bfqd = bfqq->bfqd; +#ifdef CONFIG_CGROUP_BFQIO + else { + sd = entity->my_sched_data; + bfqg = container_of(sd, struct bfq_group, sched_data); + BUG_ON(!bfqg); + bfqd = (struct bfq_data *)bfqg->bfqd; + BUG_ON(!bfqd); + } +#endif + + BUG_ON(old_st->wsum < entity->weight); + old_st->wsum -= entity->weight; + + if (entity->new_weight != entity->orig_weight) { + if (entity->new_weight < BFQ_MIN_WEIGHT || + entity->new_weight > BFQ_MAX_WEIGHT) { + printk(KERN_CRIT "update_weight_prio: " + "new_weight %d\n", + entity->new_weight); + BUG(); + } + entity->orig_weight = entity->new_weight; + entity->ioprio = + bfq_weight_to_ioprio(entity->orig_weight); + } + + entity->ioprio_class = entity->new_ioprio_class; + entity->ioprio_changed = 0; + + /* + * NOTE: here we may be changing the weight too early, + * this will cause unfairness. The correct approach + * would have required additional complexity to defer + * weight changes to the proper time instants (i.e., + * when entity->finish <= old_st->vtime). + */ + new_st = bfq_entity_service_tree(entity); + + prev_weight = entity->weight; + new_weight = entity->orig_weight * + (bfqq != NULL ? bfqq->wr_coeff : 1); + /* + * If the weight of the entity changes, remove the entity + * from its old weight counter (if there is a counter + * associated with the entity), and add it to the counter + * associated with its new weight. + */ + if (prev_weight != new_weight) { + root = bfqq ? &bfqd->queue_weights_tree : + &bfqd->group_weights_tree; + bfq_weights_tree_remove(bfqd, entity, root); + } + entity->weight = new_weight; + /* + * Add the entity to its weights tree only if it is + * not associated with a weight-raised queue. + */ + if (prev_weight != new_weight && + (bfqq ? bfqq->wr_coeff == 1 : 1)) + /* If we get here, root has been initialized. */ + bfq_weights_tree_add(bfqd, entity, root); + + new_st->wsum += entity->weight; + + if (new_st != old_st) + entity->start = new_st->vtime; + } + + return new_st; +} + +/** + * bfq_bfqq_served - update the scheduler status after selection for + * service. + * @bfqq: the queue being served. + * @served: bytes to transfer. + * + * NOTE: this can be optimized, as the timestamps of upper level entities + * are synchronized every time a new bfqq is selected for service. By now, + * we keep it to better check consistency. + */ +static void bfq_bfqq_served(struct bfq_queue *bfqq, unsigned long served) +{ + struct bfq_entity *entity = &bfqq->entity; + struct bfq_service_tree *st; + + for_each_entity(entity) { + st = bfq_entity_service_tree(entity); + + entity->service += served; + BUG_ON(entity->service > entity->budget); + BUG_ON(st->wsum == 0); + + st->vtime += bfq_delta(served, st->wsum); + bfq_forget_idle(st); + } + bfq_log_bfqq(bfqq->bfqd, bfqq, "bfqq_served %lu secs", served); +} + +/** + * bfq_bfqq_charge_full_budget - set the service to the entity budget. + * @bfqq: the queue that needs a service update. + * + * When it's not possible to be fair in the service domain, because + * a queue is not consuming its budget fast enough (the meaning of + * fast depends on the timeout parameter), we charge it a full + * budget. In this way we should obtain a sort of time-domain + * fairness among all the seeky/slow queues. + */ +static inline void bfq_bfqq_charge_full_budget(struct bfq_queue *bfqq) +{ + struct bfq_entity *entity = &bfqq->entity; + + bfq_log_bfqq(bfqq->bfqd, bfqq, "charge_full_budget"); + + bfq_bfqq_served(bfqq, entity->budget - entity->service); +} + +/** + * __bfq_activate_entity - activate an entity. + * @entity: the entity being activated. + * + * Called whenever an entity is activated, i.e., it is not active and one + * of its children receives a new request, or has to be reactivated due to + * budget exhaustion. It uses the current budget of the entity (and the + * service received if @entity is active) of the queue to calculate its + * timestamps. + */ +static void __bfq_activate_entity(struct bfq_entity *entity) +{ + struct bfq_sched_data *sd = entity->sched_data; + struct bfq_service_tree *st = bfq_entity_service_tree(entity); + + if (entity == sd->in_service_entity) { + BUG_ON(entity->tree != NULL); + /* + * If we are requeueing the current entity we have + * to take care of not charging to it service it has + * not received. + */ + bfq_calc_finish(entity, entity->service); + entity->start = entity->finish; + sd->in_service_entity = NULL; + } else if (entity->tree == &st->active) { + /* + * Requeueing an entity due to a change of some + * next_in_service entity below it. We reuse the + * old start time. + */ + bfq_active_extract(st, entity); + } else if (entity->tree == &st->idle) { + /* + * Must be on the idle tree, bfq_idle_extract() will + * check for that. + */ + bfq_idle_extract(st, entity); + entity->start = bfq_gt(st->vtime, entity->finish) ? + st->vtime : entity->finish; + } else { + /* + * The finish time of the entity may be invalid, and + * it is in the past for sure, otherwise the queue + * would have been on the idle tree. + */ + entity->start = st->vtime; + st->wsum += entity->weight; + bfq_get_entity(entity); + + BUG_ON(entity->on_st); + entity->on_st = 1; + } + + st = __bfq_entity_update_weight_prio(st, entity); + bfq_calc_finish(entity, entity->budget); + bfq_active_insert(st, entity); +} + +/** + * bfq_activate_entity - activate an entity and its ancestors if necessary. + * @entity: the entity to activate. + * + * Activate @entity and all the entities on the path from it to the root. + */ +static void bfq_activate_entity(struct bfq_entity *entity) +{ + struct bfq_sched_data *sd; + + for_each_entity(entity) { + __bfq_activate_entity(entity); + + sd = entity->sched_data; + if (!bfq_update_next_in_service(sd)) + /* + * No need to propagate the activation to the + * upper entities, as they will be updated when + * the in-service entity is rescheduled. + */ + break; + } +} + +/** + * __bfq_deactivate_entity - deactivate an entity from its service tree. + * @entity: the entity to deactivate. + * @requeue: if false, the entity will not be put into the idle tree. + * + * Deactivate an entity, independently from its previous state. If the + * entity was not on a service tree just return, otherwise if it is on + * any scheduler tree, extract it from that tree, and if necessary + * and if the caller did not specify @requeue, put it on the idle tree. + * + * Return %1 if the caller should update the entity hierarchy, i.e., + * if the entity was in service or if it was the next_in_service for + * its sched_data; return %0 otherwise. + */ +static int __bfq_deactivate_entity(struct bfq_entity *entity, int requeue) +{ + struct bfq_sched_data *sd = entity->sched_data; + struct bfq_service_tree *st = bfq_entity_service_tree(entity); + int was_in_service = entity == sd->in_service_entity; + int ret = 0; + + if (!entity->on_st) + return 0; + + BUG_ON(was_in_service && entity->tree != NULL); + + if (was_in_service) { + bfq_calc_finish(entity, entity->service); + sd->in_service_entity = NULL; + } else if (entity->tree == &st->active) + bfq_active_extract(st, entity); + else if (entity->tree == &st->idle) + bfq_idle_extract(st, entity); + else if (entity->tree != NULL) + BUG(); + + if (was_in_service || sd->next_in_service == entity) + ret = bfq_update_next_in_service(sd); + + if (!requeue || !bfq_gt(entity->finish, st->vtime)) + bfq_forget_entity(st, entity); + else + bfq_idle_insert(st, entity); + + BUG_ON(sd->in_service_entity == entity); + BUG_ON(sd->next_in_service == entity); + + return ret; +} + +/** + * bfq_deactivate_entity - deactivate an entity. + * @entity: the entity to deactivate. + * @requeue: true if the entity can be put on the idle tree + */ +static void bfq_deactivate_entity(struct bfq_entity *entity, int requeue) +{ + struct bfq_sched_data *sd; + struct bfq_entity *parent; + + for_each_entity_safe(entity, parent) { + sd = entity->sched_data; + + if (!__bfq_deactivate_entity(entity, requeue)) + /* + * The parent entity is still backlogged, and + * we don't need to update it as it is still + * in service. + */ + break; + + if (sd->next_in_service != NULL) + /* + * The parent entity is still backlogged and + * the budgets on the path towards the root + * need to be updated. + */ + goto update; + + /* + * If we reach there the parent is no more backlogged and + * we want to propagate the dequeue upwards. + */ + requeue = 1; + } + + return; + +update: + entity = parent; + for_each_entity(entity) { + __bfq_activate_entity(entity); + + sd = entity->sched_data; + if (!bfq_update_next_in_service(sd)) + break; + } +} + +/** + * bfq_update_vtime - update vtime if necessary. + * @st: the service tree to act upon. + * + * If necessary update the service tree vtime to have at least one + * eligible entity, skipping to its start time. Assumes that the + * active tree of the device is not empty. + * + * NOTE: this hierarchical implementation updates vtimes quite often, + * we may end up with reactivated processes getting timestamps after a + * vtime skip done because we needed a ->first_active entity on some + * intermediate node. + */ +static void bfq_update_vtime(struct bfq_service_tree *st) +{ + struct bfq_entity *entry; + struct rb_node *node = st->active.rb_node; + + entry = rb_entry(node, struct bfq_entity, rb_node); + if (bfq_gt(entry->min_start, st->vtime)) { + st->vtime = entry->min_start; + bfq_forget_idle(st); + } +} + +/** + * bfq_first_active_entity - find the eligible entity with + * the smallest finish time + * @st: the service tree to select from. + * + * This function searches the first schedulable entity, starting from the + * root of the tree and going on the left every time on this side there is + * a subtree with at least one eligible (start >= vtime) entity. The path on + * the right is followed only if a) the left subtree contains no eligible + * entities and b) no eligible entity has been found yet. + */ +static struct bfq_entity *bfq_first_active_entity(struct bfq_service_tree *st) +{ + struct bfq_entity *entry, *first = NULL; + struct rb_node *node = st->active.rb_node; + + while (node != NULL) { + entry = rb_entry(node, struct bfq_entity, rb_node); +left: + if (!bfq_gt(entry->start, st->vtime)) + first = entry; + + BUG_ON(bfq_gt(entry->min_start, st->vtime)); + + if (node->rb_left != NULL) { + entry = rb_entry(node->rb_left, + struct bfq_entity, rb_node); + if (!bfq_gt(entry->min_start, st->vtime)) { + node = node->rb_left; + goto left; + } + } + if (first != NULL) + break; + node = node->rb_right; + } + + BUG_ON(first == NULL && !RB_EMPTY_ROOT(&st->active)); + return first; +} + +/** + * __bfq_lookup_next_entity - return the first eligible entity in @st. + * @st: the service tree. + * + * Update the virtual time in @st and return the first eligible entity + * it contains. + */ +static struct bfq_entity *__bfq_lookup_next_entity(struct bfq_service_tree *st, + bool force) +{ + struct bfq_entity *entity, *new_next_in_service = NULL; + + if (RB_EMPTY_ROOT(&st->active)) + return NULL; + + bfq_update_vtime(st); + entity = bfq_first_active_entity(st); + BUG_ON(bfq_gt(entity->start, st->vtime)); + + /* + * If the chosen entity does not match with the sched_data's + * next_in_service and we are forcedly serving the IDLE priority + * class tree, bubble up budget update. + */ + if (unlikely(force && entity != entity->sched_data->next_in_service)) { + new_next_in_service = entity; + for_each_entity(new_next_in_service) + bfq_update_budget(new_next_in_service); + } + + return entity; +} + +/** + * bfq_lookup_next_entity - return the first eligible entity in @sd. + * @sd: the sched_data. + * @extract: if true the returned entity will be also extracted from @sd. + * + * NOTE: since we cache the next_in_service entity at each level of the + * hierarchy, the complexity of the lookup can be decreased with + * absolutely no effort just returning the cached next_in_service value; + * we prefer to do full lookups to test the consistency of * the data + * structures. + */ +static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd, + int extract, + struct bfq_data *bfqd) +{ + struct bfq_service_tree *st = sd->service_tree; + struct bfq_entity *entity; + int i = 0; + + BUG_ON(sd->in_service_entity != NULL); + + if (bfqd != NULL && + jiffies - bfqd->bfq_class_idle_last_service > BFQ_CL_IDLE_TIMEOUT) { + entity = __bfq_lookup_next_entity(st + BFQ_IOPRIO_CLASSES - 1, + true); + if (entity != NULL) { + i = BFQ_IOPRIO_CLASSES - 1; + bfqd->bfq_class_idle_last_service = jiffies; + sd->next_in_service = entity; + } + } + for (; i < BFQ_IOPRIO_CLASSES; i++) { + entity = __bfq_lookup_next_entity(st + i, false); + if (entity != NULL) { + if (extract) { + bfq_check_next_in_service(sd, entity); + bfq_active_extract(st + i, entity); + sd->in_service_entity = entity; + sd->next_in_service = NULL; + } + break; + } + } + + return entity; +} + +/* + * Get next queue for service. + */ +static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd) +{ + struct bfq_entity *entity = NULL; + struct bfq_sched_data *sd; + struct bfq_queue *bfqq; + + BUG_ON(bfqd->in_service_queue != NULL); + + if (bfqd->busy_queues == 0) + return NULL; + + sd = &bfqd->root_group->sched_data; + for (; sd != NULL; sd = entity->my_sched_data) { + entity = bfq_lookup_next_entity(sd, 1, bfqd); + BUG_ON(entity == NULL); + entity->service = 0; + } + + bfqq = bfq_entity_to_bfqq(entity); + BUG_ON(bfqq == NULL); + + return bfqq; +} + +static void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd) +{ + if (bfqd->in_service_bic != NULL) { + put_io_context(bfqd->in_service_bic->icq.ioc); + bfqd->in_service_bic = NULL; + } + + bfqd->in_service_queue = NULL; + del_timer(&bfqd->idle_slice_timer); +} + +static void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, + int requeue) +{ + struct bfq_entity *entity = &bfqq->entity; + + if (bfqq == bfqd->in_service_queue) + __bfq_bfqd_reset_in_service(bfqd); + + bfq_deactivate_entity(entity, requeue); +} + +static void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) +{ + struct bfq_entity *entity = &bfqq->entity; + + bfq_activate_entity(entity); +} + +/* + * Called when the bfqq no longer has requests pending, remove it from + * the service tree. + */ +static void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq, + int requeue) +{ + BUG_ON(!bfq_bfqq_busy(bfqq)); + BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list)); + + bfq_log_bfqq(bfqd, bfqq, "del from busy"); + + bfq_clear_bfqq_busy(bfqq); + + BUG_ON(bfqd->busy_queues == 0); + bfqd->busy_queues--; + + if (!bfqq->dispatched) { + bfq_weights_tree_remove(bfqd, &bfqq->entity, + &bfqd->queue_weights_tree); + if (!blk_queue_nonrot(bfqd->queue)) { + BUG_ON(!bfqd->busy_in_flight_queues); + bfqd->busy_in_flight_queues--; + if (bfq_bfqq_constantly_seeky(bfqq)) { + BUG_ON(!bfqd-> + const_seeky_busy_in_flight_queues); + bfqd->const_seeky_busy_in_flight_queues--; + } + } + } + if (bfqq->wr_coeff > 1) + bfqd->wr_busy_queues--; + + bfq_deactivate_bfqq(bfqd, bfqq, requeue); +} + +/* + * Called when an inactive queue receives a new request. + */ +static void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq) +{ + BUG_ON(bfq_bfqq_busy(bfqq)); + BUG_ON(bfqq == bfqd->in_service_queue); + + bfq_log_bfqq(bfqd, bfqq, "add to busy"); + + bfq_activate_bfqq(bfqd, bfqq); + + bfq_mark_bfqq_busy(bfqq); + bfqd->busy_queues++; + + if (!bfqq->dispatched) { + if (bfqq->wr_coeff == 1) + bfq_weights_tree_add(bfqd, &bfqq->entity, + &bfqd->queue_weights_tree); + if (!blk_queue_nonrot(bfqd->queue)) { + bfqd->busy_in_flight_queues++; + if (bfq_bfqq_constantly_seeky(bfqq)) + bfqd->const_seeky_busy_in_flight_queues++; + } + } + if (bfqq->wr_coeff > 1) + bfqd->wr_busy_queues++; +} diff --git a/block/bfq.h b/block/bfq.h new file mode 100644 index 000000000..0ea164d41 --- /dev/null +++ b/block/bfq.h @@ -0,0 +1,807 @@ +/* + * BFQ-v7r8 for 4.0.0: data structures and common functions prototypes. + * + * Based on ideas and code from CFQ: + * Copyright (C) 2003 Jens Axboe + * + * Copyright (C) 2008 Fabio Checconi + * Paolo Valente + * + * Copyright (C) 2010 Paolo Valente + */ + +#ifndef _BFQ_H +#define _BFQ_H + +#include +#include +#include +#include + +#define BFQ_IOPRIO_CLASSES 3 +#define BFQ_CL_IDLE_TIMEOUT (HZ/5) + +#define BFQ_MIN_WEIGHT 1 +#define BFQ_MAX_WEIGHT 1000 + +#define BFQ_DEFAULT_QUEUE_IOPRIO 4 + +#define BFQ_DEFAULT_GRP_WEIGHT 10 +#define BFQ_DEFAULT_GRP_IOPRIO 0 +#define BFQ_DEFAULT_GRP_CLASS IOPRIO_CLASS_BE + +struct bfq_entity; + +/** + * struct bfq_service_tree - per ioprio_class service tree. + * @active: tree for active entities (i.e., those backlogged). + * @idle: tree for idle entities (i.e., those not backlogged, with V <= F_i). + * @first_idle: idle entity with minimum F_i. + * @last_idle: idle entity with maximum F_i. + * @vtime: scheduler virtual time. + * @wsum: scheduler weight sum; active and idle entities contribute to it. + * + * Each service tree represents a B-WF2Q+ scheduler on its own. Each + * ioprio_class has its own independent scheduler, and so its own + * bfq_service_tree. All the fields are protected by the queue lock + * of the containing bfqd. + */ +struct bfq_service_tree { + struct rb_root active; + struct rb_root idle; + + struct bfq_entity *first_idle; + struct bfq_entity *last_idle; + + u64 vtime; + unsigned long wsum; +}; + +/** + * struct bfq_sched_data - multi-class scheduler. + * @in_service_entity: entity in service. + * @next_in_service: head-of-the-line entity in the scheduler. + * @service_tree: array of service trees, one per ioprio_class. + * + * bfq_sched_data is the basic scheduler queue. It supports three + * ioprio_classes, and can be used either as a toplevel queue or as + * an intermediate queue on a hierarchical setup. + * @next_in_service points to the active entity of the sched_data + * service trees that will be scheduled next. + * + * The supported ioprio_classes are the same as in CFQ, in descending + * priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE. + * Requests from higher priority queues are served before all the + * requests from lower priority queues; among requests of the same + * queue requests are served according to B-WF2Q+. + * All the fields are protected by the queue lock of the containing bfqd. + */ +struct bfq_sched_data { + struct bfq_entity *in_service_entity; + struct bfq_entity *next_in_service; + struct bfq_service_tree service_tree[BFQ_IOPRIO_CLASSES]; +}; + +/** + * struct bfq_weight_counter - counter of the number of all active entities + * with a given weight. + * @weight: weight of the entities that this counter refers to. + * @num_active: number of active entities with this weight. + * @weights_node: weights tree member (see bfq_data's @queue_weights_tree + * and @group_weights_tree). + */ +struct bfq_weight_counter { + short int weight; + unsigned int num_active; + struct rb_node weights_node; +}; + +/** + * struct bfq_entity - schedulable entity. + * @rb_node: service_tree member. + * @weight_counter: pointer to the weight counter associated with this entity. + * @on_st: flag, true if the entity is on a tree (either the active or + * the idle one of its service_tree). + * @finish: B-WF2Q+ finish timestamp (aka F_i). + * @start: B-WF2Q+ start timestamp (aka S_i). + * @tree: tree the entity is enqueued into; %NULL if not on a tree. + * @min_start: minimum start time of the (active) subtree rooted at + * this entity; used for O(log N) lookups into active trees. + * @service: service received during the last round of service. + * @budget: budget used to calculate F_i; F_i = S_i + @budget / @weight. + * @weight: weight of the queue + * @parent: parent entity, for hierarchical scheduling. + * @my_sched_data: for non-leaf nodes in the cgroup hierarchy, the + * associated scheduler queue, %NULL on leaf nodes. + * @sched_data: the scheduler queue this entity belongs to. + * @ioprio: the ioprio in use. + * @new_weight: when a weight change is requested, the new weight value. + * @orig_weight: original weight, used to implement weight boosting + * @new_ioprio: when an ioprio change is requested, the new ioprio value. + * @ioprio_class: the ioprio_class in use. + * @new_ioprio_class: when an ioprio_class change is requested, the new + * ioprio_class value. + * @ioprio_changed: flag, true when the user requested a weight, ioprio or + * ioprio_class change. + * + * A bfq_entity is used to represent either a bfq_queue (leaf node in the + * cgroup hierarchy) or a bfq_group into the upper level scheduler. Each + * entity belongs to the sched_data of the parent group in the cgroup + * hierarchy. Non-leaf entities have also their own sched_data, stored + * in @my_sched_data. + * + * Each entity stores independently its priority values; this would + * allow different weights on different devices, but this + * functionality is not exported to userspace by now. Priorities and + * weights are updated lazily, first storing the new values into the + * new_* fields, then setting the @ioprio_changed flag. As soon as + * there is a transition in the entity state that allows the priority + * update to take place the effective and the requested priority + * values are synchronized. + * + * Unless cgroups are used, the weight value is calculated from the + * ioprio to export the same interface as CFQ. When dealing with + * ``well-behaved'' queues (i.e., queues that do not spend too much + * time to consume their budget and have true sequential behavior, and + * when there are no external factors breaking anticipation) the + * relative weights at each level of the cgroups hierarchy should be + * guaranteed. All the fields are protected by the queue lock of the + * containing bfqd. + */ +struct bfq_entity { + struct rb_node rb_node; + struct bfq_weight_counter *weight_counter; + + int on_st; + + u64 finish; + u64 start; + + struct rb_root *tree; + + u64 min_start; + + unsigned long service, budget; + unsigned short weight, new_weight; + unsigned short orig_weight; + + struct bfq_entity *parent; + + struct bfq_sched_data *my_sched_data; + struct bfq_sched_data *sched_data; + + unsigned short ioprio, new_ioprio; + unsigned short ioprio_class, new_ioprio_class; + + int ioprio_changed; +}; + +struct bfq_group; + +/** + * struct bfq_queue - leaf schedulable entity. + * @ref: reference counter. + * @bfqd: parent bfq_data. + * @new_bfqq: shared bfq_queue if queue is cooperating with + * one or more other queues. + * @pos_node: request-position tree member (see bfq_data's @rq_pos_tree). + * @pos_root: request-position tree root (see bfq_data's @rq_pos_tree). + * @sort_list: sorted list of pending requests. + * @next_rq: if fifo isn't expired, next request to serve. + * @queued: nr of requests queued in @sort_list. + * @allocated: currently allocated requests. + * @meta_pending: pending metadata requests. + * @fifo: fifo list of requests in sort_list. + * @entity: entity representing this queue in the scheduler. + * @max_budget: maximum budget allowed from the feedback mechanism. + * @budget_timeout: budget expiration (in jiffies). + * @dispatched: number of requests on the dispatch list or inside driver. + * @flags: status flags. + * @bfqq_list: node for active/idle bfqq list inside our bfqd. + * @burst_list_node: node for the device's burst list. + * @seek_samples: number of seeks sampled + * @seek_total: sum of the distances of the seeks sampled + * @seek_mean: mean seek distance + * @last_request_pos: position of the last request enqueued + * @requests_within_timer: number of consecutive pairs of request completion + * and arrival, such that the queue becomes idle + * after the completion, but the next request arrives + * within an idle time slice; used only if the queue's + * IO_bound has been cleared. + * @pid: pid of the process owning the queue, used for logging purposes. + * @last_wr_start_finish: start time of the current weight-raising period if + * the @bfq-queue is being weight-raised, otherwise + * finish time of the last weight-raising period + * @wr_cur_max_time: current max raising time for this queue + * @soft_rt_next_start: minimum time instant such that, only if a new + * request is enqueued after this time instant in an + * idle @bfq_queue with no outstanding requests, then + * the task associated with the queue it is deemed as + * soft real-time (see the comments to the function + * bfq_bfqq_softrt_next_start()) + * @last_idle_bklogged: time of the last transition of the @bfq_queue from + * idle to backlogged + * @service_from_backlogged: cumulative service received from the @bfq_queue + * since the last transition from idle to + * backlogged + * @bic: pointer to the bfq_io_cq owning the bfq_queue, set to %NULL if the + * queue is shared + * + * A bfq_queue is a leaf request queue; it can be associated with an + * io_context or more, if it is async or shared between cooperating + * processes. @cgroup holds a reference to the cgroup, to be sure that it + * does not disappear while a bfqq still references it (mostly to avoid + * races between request issuing and task migration followed by cgroup + * destruction). + * All the fields are protected by the queue lock of the containing bfqd. + */ +struct bfq_queue { + atomic_t ref; + struct bfq_data *bfqd; + + /* fields for cooperating queues handling */ + struct bfq_queue *new_bfqq; + struct rb_node pos_node; + struct rb_root *pos_root; + + struct rb_root sort_list; + struct request *next_rq; + int queued[2]; + int allocated[2]; + int meta_pending; + struct list_head fifo; + + struct bfq_entity entity; + + unsigned long max_budget; + unsigned long budget_timeout; + + int dispatched; + + unsigned int flags; + + struct list_head bfqq_list; + + struct hlist_node burst_list_node; + + unsigned int seek_samples; + u64 seek_total; + sector_t seek_mean; + sector_t last_request_pos; + + unsigned int requests_within_timer; + + pid_t pid; + struct bfq_io_cq *bic; + + /* weight-raising fields */ + unsigned long wr_cur_max_time; + unsigned long soft_rt_next_start; + unsigned long last_wr_start_finish; + unsigned int wr_coeff; + unsigned long last_idle_bklogged; + unsigned long service_from_backlogged; +}; + +/** + * struct bfq_ttime - per process thinktime stats. + * @ttime_total: total process thinktime + * @ttime_samples: number of thinktime samples + * @ttime_mean: average process thinktime + */ +struct bfq_ttime { + unsigned long last_end_request; + + unsigned long ttime_total; + unsigned long ttime_samples; + unsigned long ttime_mean; +}; + +/** + * struct bfq_io_cq - per (request_queue, io_context) structure. + * @icq: associated io_cq structure + * @bfqq: array of two process queues, the sync and the async + * @ttime: associated @bfq_ttime struct + * @wr_time_left: snapshot of the time left before weight raising ends + * for the sync queue associated to this process; this + * snapshot is taken to remember this value while the weight + * raising is suspended because the queue is merged with a + * shared queue, and is used to set @raising_cur_max_time + * when the queue is split from the shared queue and its + * weight is raised again + * @saved_idle_window: same purpose as the previous field for the idle + * window + * @saved_IO_bound: same purpose as the previous two fields for the I/O + * bound classification of a queue + * @saved_in_large_burst: same purpose as the previous fields for the + * value of the field keeping the queue's belonging + * to a large burst + * @was_in_burst_list: true if the queue belonged to a burst list + * before its merge with another cooperating queue + * @cooperations: counter of consecutive successful queue merges underwent + * by any of the process' @bfq_queues + * @failed_cooperations: counter of consecutive failed queue merges of any + * of the process' @bfq_queues + */ +struct bfq_io_cq { + struct io_cq icq; /* must be the first member */ + struct bfq_queue *bfqq[2]; + struct bfq_ttime ttime; + int ioprio; + + unsigned int wr_time_left; + bool saved_idle_window; + bool saved_IO_bound; + + bool saved_in_large_burst; + bool was_in_burst_list; + + unsigned int cooperations; + unsigned int failed_cooperations; +}; + +enum bfq_device_speed { + BFQ_BFQD_FAST, + BFQ_BFQD_SLOW, +}; + +/** + * struct bfq_data - per device data structure. + * @queue: request queue for the managed device. + * @root_group: root bfq_group for the device. + * @rq_pos_tree: rbtree sorted by next_request position, used when + * determining if two or more queues have interleaving + * requests (see bfq_close_cooperator()). + * @active_numerous_groups: number of bfq_groups containing more than one + * active @bfq_entity. + * @queue_weights_tree: rbtree of weight counters of @bfq_queues, sorted by + * weight. Used to keep track of whether all @bfq_queues + * have the same weight. The tree contains one counter + * for each distinct weight associated to some active + * and not weight-raised @bfq_queue (see the comments to + * the functions bfq_weights_tree_[add|remove] for + * further details). + * @group_weights_tree: rbtree of non-queue @bfq_entity weight counters, sorted + * by weight. Used to keep track of whether all + * @bfq_groups have the same weight. The tree contains + * one counter for each distinct weight associated to + * some active @bfq_group (see the comments to the + * functions bfq_weights_tree_[add|remove] for further + * details). + * @busy_queues: number of bfq_queues containing requests (including the + * queue in service, even if it is idling). + * @busy_in_flight_queues: number of @bfq_queues containing pending or + * in-flight requests, plus the @bfq_queue in + * service, even if idle but waiting for the + * possible arrival of its next sync request. This + * field is updated only if the device is rotational, + * but used only if the device is also NCQ-capable. + * The reason why the field is updated also for non- + * NCQ-capable rotational devices is related to the + * fact that the value of @hw_tag may be set also + * later than when busy_in_flight_queues may need to + * be incremented for the first time(s). Taking also + * this possibility into account, to avoid unbalanced + * increments/decrements, would imply more overhead + * than just updating busy_in_flight_queues + * regardless of the value of @hw_tag. + * @const_seeky_busy_in_flight_queues: number of constantly-seeky @bfq_queues + * (that is, seeky queues that expired + * for budget timeout at least once) + * containing pending or in-flight + * requests, including the in-service + * @bfq_queue if constantly seeky. This + * field is updated only if the device + * is rotational, but used only if the + * device is also NCQ-capable (see the + * comments to @busy_in_flight_queues). + * @wr_busy_queues: number of weight-raised busy @bfq_queues. + * @queued: number of queued requests. + * @rq_in_driver: number of requests dispatched and waiting for completion. + * @sync_flight: number of sync requests in the driver. + * @max_rq_in_driver: max number of reqs in driver in the last + * @hw_tag_samples completed requests. + * @hw_tag_samples: nr of samples used to calculate hw_tag. + * @hw_tag: flag set to one if the driver is showing a queueing behavior. + * @budgets_assigned: number of budgets assigned. + * @idle_slice_timer: timer set when idling for the next sequential request + * from the queue in service. + * @unplug_work: delayed work to restart dispatching on the request queue. + * @in_service_queue: bfq_queue in service. + * @in_service_bic: bfq_io_cq (bic) associated with the @in_service_queue. + * @last_position: on-disk position of the last served request. + * @last_budget_start: beginning of the last budget. + * @last_idling_start: beginning of the last idle slice. + * @peak_rate: peak transfer rate observed for a budget. + * @peak_rate_samples: number of samples used to calculate @peak_rate. + * @bfq_max_budget: maximum budget allotted to a bfq_queue before + * rescheduling. + * @group_list: list of all the bfq_groups active on the device. + * @active_list: list of all the bfq_queues active on the device. + * @idle_list: list of all the bfq_queues idle on the device. + * @bfq_fifo_expire: timeout for async/sync requests; when it expires + * requests are served in fifo order. + * @bfq_back_penalty: weight of backward seeks wrt forward ones. + * @bfq_back_max: maximum allowed backward seek. + * @bfq_slice_idle: maximum idling time. + * @bfq_user_max_budget: user-configured max budget value + * (0 for auto-tuning). + * @bfq_max_budget_async_rq: maximum budget (in nr of requests) allotted to + * async queues. + * @bfq_timeout: timeout for bfq_queues to consume their budget; used to + * to prevent seeky queues to impose long latencies to well + * behaved ones (this also implies that seeky queues cannot + * receive guarantees in the service domain; after a timeout + * they are charged for the whole allocated budget, to try + * to preserve a behavior reasonably fair among them, but + * without service-domain guarantees). + * @bfq_coop_thresh: number of queue merges after which a @bfq_queue is + * no more granted any weight-raising. + * @bfq_failed_cooperations: number of consecutive failed cooperation + * chances after which weight-raising is restored + * to a queue subject to more than bfq_coop_thresh + * queue merges. + * @bfq_requests_within_timer: number of consecutive requests that must be + * issued within the idle time slice to set + * again idling to a queue which was marked as + * non-I/O-bound (see the definition of the + * IO_bound flag for further details). + * @last_ins_in_burst: last time at which a queue entered the current + * burst of queues being activated shortly after + * each other; for more details about this and the + * following parameters related to a burst of + * activations, see the comments to the function + * @bfq_handle_burst. + * @bfq_burst_interval: reference time interval used to decide whether a + * queue has been activated shortly after + * @last_ins_in_burst. + * @burst_size: number of queues in the current burst of queue activations. + * @bfq_large_burst_thresh: maximum burst size above which the current + * queue-activation burst is deemed as 'large'. + * @large_burst: true if a large queue-activation burst is in progress. + * @burst_list: head of the burst list (as for the above fields, more details + * in the comments to the function bfq_handle_burst). + * @low_latency: if set to true, low-latency heuristics are enabled. + * @bfq_wr_coeff: maximum factor by which the weight of a weight-raised + * queue is multiplied. + * @bfq_wr_max_time: maximum duration of a weight-raising period (jiffies). + * @bfq_wr_rt_max_time: maximum duration for soft real-time processes. + * @bfq_wr_min_idle_time: minimum idle period after which weight-raising + * may be reactivated for a queue (in jiffies). + * @bfq_wr_min_inter_arr_async: minimum period between request arrivals + * after which weight-raising may be + * reactivated for an already busy queue + * (in jiffies). + * @bfq_wr_max_softrt_rate: max service-rate for a soft real-time queue, + * sectors per seconds. + * @RT_prod: cached value of the product R*T used for computing the maximum + * duration of the weight raising automatically. + * @device_speed: device-speed class for the low-latency heuristic. + * @oom_bfqq: fallback dummy bfqq for extreme OOM conditions. + * + * All the fields are protected by the @queue lock. + */ +struct bfq_data { + struct request_queue *queue; + + struct bfq_group *root_group; + struct rb_root rq_pos_tree; + +#ifdef CONFIG_CGROUP_BFQIO + int active_numerous_groups; +#endif + + struct rb_root queue_weights_tree; + struct rb_root group_weights_tree; + + int busy_queues; + int busy_in_flight_queues; + int const_seeky_busy_in_flight_queues; + int wr_busy_queues; + int queued; + int rq_in_driver; + int sync_flight; + + int max_rq_in_driver; + int hw_tag_samples; + int hw_tag; + + int budgets_assigned; + + struct timer_list idle_slice_timer; + struct work_struct unplug_work; + + struct bfq_queue *in_service_queue; + struct bfq_io_cq *in_service_bic; + + sector_t last_position; + + ktime_t last_budget_start; + ktime_t last_idling_start; + int peak_rate_samples; + u64 peak_rate; + unsigned long bfq_max_budget; + + struct hlist_head group_list; + struct list_head active_list; + struct list_head idle_list; + + unsigned int bfq_fifo_expire[2]; + unsigned int bfq_back_penalty; + unsigned int bfq_back_max; + unsigned int bfq_slice_idle; + u64 bfq_class_idle_last_service; + + unsigned int bfq_user_max_budget; + unsigned int bfq_max_budget_async_rq; + unsigned int bfq_timeout[2]; + + unsigned int bfq_coop_thresh; + unsigned int bfq_failed_cooperations; + unsigned int bfq_requests_within_timer; + + unsigned long last_ins_in_burst; + unsigned long bfq_burst_interval; + int burst_size; + unsigned long bfq_large_burst_thresh; + bool large_burst; + struct hlist_head burst_list; + + bool low_latency; + + /* parameters of the low_latency heuristics */ + unsigned int bfq_wr_coeff; + unsigned int bfq_wr_max_time; + unsigned int bfq_wr_rt_max_time; + unsigned int bfq_wr_min_idle_time; + unsigned long bfq_wr_min_inter_arr_async; + unsigned int bfq_wr_max_softrt_rate; + u64 RT_prod; + enum bfq_device_speed device_speed; + + struct bfq_queue oom_bfqq; +}; + +enum bfqq_state_flags { + BFQ_BFQQ_FLAG_busy = 0, /* has requests or is in service */ + BFQ_BFQQ_FLAG_wait_request, /* waiting for a request */ + BFQ_BFQQ_FLAG_must_alloc, /* must be allowed rq alloc */ + BFQ_BFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */ + BFQ_BFQQ_FLAG_idle_window, /* slice idling enabled */ + BFQ_BFQQ_FLAG_sync, /* synchronous queue */ + BFQ_BFQQ_FLAG_budget_new, /* no completion with this budget */ + BFQ_BFQQ_FLAG_IO_bound, /* + * bfqq has timed-out at least once + * having consumed at most 2/10 of + * its budget + */ + BFQ_BFQQ_FLAG_in_large_burst, /* + * bfqq activated in a large burst, + * see comments to bfq_handle_burst. + */ + BFQ_BFQQ_FLAG_constantly_seeky, /* + * bfqq has proved to be slow and + * seeky until budget timeout + */ + BFQ_BFQQ_FLAG_softrt_update, /* + * may need softrt-next-start + * update + */ + BFQ_BFQQ_FLAG_coop, /* bfqq is shared */ + BFQ_BFQQ_FLAG_split_coop, /* shared bfqq will be split */ + BFQ_BFQQ_FLAG_just_split, /* queue has just been split */ +}; + +#define BFQ_BFQQ_FNS(name) \ +static inline void bfq_mark_bfqq_##name(struct bfq_queue *bfqq) \ +{ \ + (bfqq)->flags |= (1 << BFQ_BFQQ_FLAG_##name); \ +} \ +static inline void bfq_clear_bfqq_##name(struct bfq_queue *bfqq) \ +{ \ + (bfqq)->flags &= ~(1 << BFQ_BFQQ_FLAG_##name); \ +} \ +static inline int bfq_bfqq_##name(const struct bfq_queue *bfqq) \ +{ \ + return ((bfqq)->flags & (1 << BFQ_BFQQ_FLAG_##name)) != 0; \ +} + +BFQ_BFQQ_FNS(busy); +BFQ_BFQQ_FNS(wait_request); +BFQ_BFQQ_FNS(must_alloc); +BFQ_BFQQ_FNS(fifo_expire); +BFQ_BFQQ_FNS(idle_window); +BFQ_BFQQ_FNS(sync); +BFQ_BFQQ_FNS(budget_new); +BFQ_BFQQ_FNS(IO_bound); +BFQ_BFQQ_FNS(in_large_burst); +BFQ_BFQQ_FNS(constantly_seeky); +BFQ_BFQQ_FNS(coop); +BFQ_BFQQ_FNS(split_coop); +BFQ_BFQQ_FNS(just_split); +BFQ_BFQQ_FNS(softrt_update); +#undef BFQ_BFQQ_FNS + +/* Logging facilities. */ +#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \ + blk_add_trace_msg((bfqd)->queue, "bfq%d " fmt, (bfqq)->pid, ##args) + +#define bfq_log(bfqd, fmt, args...) \ + blk_add_trace_msg((bfqd)->queue, "bfq " fmt, ##args) + +/* Expiration reasons. */ +enum bfqq_expiration { + BFQ_BFQQ_TOO_IDLE = 0, /* + * queue has been idling for + * too long + */ + BFQ_BFQQ_BUDGET_TIMEOUT, /* budget took too long to be used */ + BFQ_BFQQ_BUDGET_EXHAUSTED, /* budget consumed */ + BFQ_BFQQ_NO_MORE_REQUESTS, /* the queue has no more requests */ +}; + +#ifdef CONFIG_CGROUP_BFQIO +/** + * struct bfq_group - per (device, cgroup) data structure. + * @entity: schedulable entity to insert into the parent group sched_data. + * @sched_data: own sched_data, to contain child entities (they may be + * both bfq_queues and bfq_groups). + * @group_node: node to be inserted into the bfqio_cgroup->group_data + * list of the containing cgroup's bfqio_cgroup. + * @bfqd_node: node to be inserted into the @bfqd->group_list list + * of the groups active on the same device; used for cleanup. + * @bfqd: the bfq_data for the device this group acts upon. + * @async_bfqq: array of async queues for all the tasks belonging to + * the group, one queue per ioprio value per ioprio_class, + * except for the idle class that has only one queue. + * @async_idle_bfqq: async queue for the idle class (ioprio is ignored). + * @my_entity: pointer to @entity, %NULL for the toplevel group; used + * to avoid too many special cases during group creation/ + * migration. + * @active_entities: number of active entities belonging to the group; + * unused for the root group. Used to know whether there + * are groups with more than one active @bfq_entity + * (see the comments to the function + * bfq_bfqq_must_not_expire()). + * + * Each (device, cgroup) pair has its own bfq_group, i.e., for each cgroup + * there is a set of bfq_groups, each one collecting the lower-level + * entities belonging to the group that are acting on the same device. + * + * Locking works as follows: + * o @group_node is protected by the bfqio_cgroup lock, and is accessed + * via RCU from its readers. + * o @bfqd is protected by the queue lock, RCU is used to access it + * from the readers. + * o All the other fields are protected by the @bfqd queue lock. + */ +struct bfq_group { + struct bfq_entity entity; + struct bfq_sched_data sched_data; + + struct hlist_node group_node; + struct hlist_node bfqd_node; + + void *bfqd; + + struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR]; + struct bfq_queue *async_idle_bfqq; + + struct bfq_entity *my_entity; + + int active_entities; +}; + +/** + * struct bfqio_cgroup - bfq cgroup data structure. + * @css: subsystem state for bfq in the containing cgroup. + * @online: flag marked when the subsystem is inserted. + * @weight: cgroup weight. + * @ioprio: cgroup ioprio. + * @ioprio_class: cgroup ioprio_class. + * @lock: spinlock that protects @ioprio, @ioprio_class and @group_data. + * @group_data: list containing the bfq_group belonging to this cgroup. + * + * @group_data is accessed using RCU, with @lock protecting the updates, + * @ioprio and @ioprio_class are protected by @lock. + */ +struct bfqio_cgroup { + struct cgroup_subsys_state css; + bool online; + + unsigned short weight, ioprio, ioprio_class; + + spinlock_t lock; + struct hlist_head group_data; +}; +#else +struct bfq_group { + struct bfq_sched_data sched_data; + + struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR]; + struct bfq_queue *async_idle_bfqq; +}; +#endif + +static inline struct bfq_service_tree * +bfq_entity_service_tree(struct bfq_entity *entity) +{ + struct bfq_sched_data *sched_data = entity->sched_data; + unsigned int idx = entity->ioprio_class - 1; + + BUG_ON(idx >= BFQ_IOPRIO_CLASSES); + BUG_ON(sched_data == NULL); + + return sched_data->service_tree + idx; +} + +static inline struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, + bool is_sync) +{ + return bic->bfqq[is_sync]; +} + +static inline void bic_set_bfqq(struct bfq_io_cq *bic, + struct bfq_queue *bfqq, bool is_sync) +{ + bic->bfqq[is_sync] = bfqq; +} + +static inline struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic) +{ + return bic->icq.q->elevator->elevator_data; +} + +/** + * bfq_get_bfqd_locked - get a lock to a bfqd using a RCU protected pointer. + * @ptr: a pointer to a bfqd. + * @flags: storage for the flags to be saved. + * + * This function allows bfqg->bfqd to be protected by the + * queue lock of the bfqd they reference; the pointer is dereferenced + * under RCU, so the storage for bfqd is assured to be safe as long + * as the RCU read side critical section does not end. After the + * bfqd->queue->queue_lock is taken the pointer is rechecked, to be + * sure that no other writer accessed it. If we raced with a writer, + * the function returns NULL, with the queue unlocked, otherwise it + * returns the dereferenced pointer, with the queue locked. + */ +static inline struct bfq_data *bfq_get_bfqd_locked(void **ptr, + unsigned long *flags) +{ + struct bfq_data *bfqd; + + rcu_read_lock(); + bfqd = rcu_dereference(*(struct bfq_data **)ptr); + + if (bfqd != NULL) { + spin_lock_irqsave(bfqd->queue->queue_lock, *flags); + if (*ptr == bfqd) + goto out; + spin_unlock_irqrestore(bfqd->queue->queue_lock, *flags); + } + + bfqd = NULL; +out: + rcu_read_unlock(); + return bfqd; +} + +static inline void bfq_put_bfqd_unlock(struct bfq_data *bfqd, + unsigned long *flags) +{ + spin_unlock_irqrestore(bfqd->queue->queue_lock, *flags); +} + +static void bfq_check_ioprio_change(struct bfq_io_cq *bic); +static void bfq_put_queue(struct bfq_queue *bfqq); +static void bfq_dispatch_insert(struct request_queue *q, struct request *rq); +static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd, + struct bfq_group *bfqg, int is_sync, + struct bfq_io_cq *bic, gfp_t gfp_mask); +static void bfq_end_wr_async_queues(struct bfq_data *bfqd, + struct bfq_group *bfqg); +static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg); +static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq); + +#endif /* _BFQ_H */ diff --git a/block/bio-integrity.c b/block/bio-integrity.c new file mode 100644 index 000000000..5cbd5d9ea --- /dev/null +++ b/block/bio-integrity.c @@ -0,0 +1,517 @@ +/* + * bio-integrity.c - bio data integrity extensions + * + * Copyright (C) 2007, 2008, 2009 Oracle Corporation + * Written by: Martin K. Petersen + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, + * USA. + * + */ + +#include +#include +#include +#include +#include +#include + +#define BIP_INLINE_VECS 4 + +static struct kmem_cache *bip_slab; +static struct workqueue_struct *kintegrityd_wq; + +/** + * bio_integrity_alloc - Allocate integrity payload and attach it to bio + * @bio: bio to attach integrity metadata to + * @gfp_mask: Memory allocation mask + * @nr_vecs: Number of integrity metadata scatter-gather elements + * + * Description: This function prepares a bio for attaching integrity + * metadata. nr_vecs specifies the maximum number of pages containing + * integrity metadata that can be attached. + */ +struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, + gfp_t gfp_mask, + unsigned int nr_vecs) +{ + struct bio_integrity_payload *bip; + struct bio_set *bs = bio->bi_pool; + unsigned long idx = BIO_POOL_NONE; + unsigned inline_vecs; + + if (!bs) { + bip = kmalloc(sizeof(struct bio_integrity_payload) + + sizeof(struct bio_vec) * nr_vecs, gfp_mask); + inline_vecs = nr_vecs; + } else { + bip = mempool_alloc(bs->bio_integrity_pool, gfp_mask); + inline_vecs = BIP_INLINE_VECS; + } + + if (unlikely(!bip)) + return NULL; + + memset(bip, 0, sizeof(*bip)); + + if (nr_vecs > inline_vecs) { + bip->bip_vec = bvec_alloc(gfp_mask, nr_vecs, &idx, + bs->bvec_integrity_pool); + if (!bip->bip_vec) + goto err; + bip->bip_max_vcnt = bvec_nr_vecs(idx); + } else { + bip->bip_vec = bip->bip_inline_vecs; + bip->bip_max_vcnt = inline_vecs; + } + + bip->bip_slab = idx; + bip->bip_bio = bio; + bio->bi_integrity = bip; + bio->bi_rw |= REQ_INTEGRITY; + + return bip; +err: + mempool_free(bip, bs->bio_integrity_pool); + return NULL; +} +EXPORT_SYMBOL(bio_integrity_alloc); + +/** + * bio_integrity_free - Free bio integrity payload + * @bio: bio containing bip to be freed + * + * Description: Used to free the integrity portion of a bio. Usually + * called from bio_free(). + */ +void bio_integrity_free(struct bio *bio) +{ + struct bio_integrity_payload *bip = bio_integrity(bio); + struct bio_set *bs = bio->bi_pool; + + if (bip->bip_flags & BIP_BLOCK_INTEGRITY) + kfree(page_address(bip->bip_vec->bv_page) + + bip->bip_vec->bv_offset); + + if (bs) { + if (bip->bip_slab != BIO_POOL_NONE) + bvec_free(bs->bvec_integrity_pool, bip->bip_vec, + bip->bip_slab); + + mempool_free(bip, bs->bio_integrity_pool); + } else { + kfree(bip); + } + + bio->bi_integrity = NULL; +} +EXPORT_SYMBOL(bio_integrity_free); + +/** + * bio_integrity_add_page - Attach integrity metadata + * @bio: bio to update + * @page: page containing integrity metadata + * @len: number of bytes of integrity metadata in page + * @offset: start offset within page + * + * Description: Attach a page containing integrity metadata to bio. + */ +int bio_integrity_add_page(struct bio *bio, struct page *page, + unsigned int len, unsigned int offset) +{ + struct bio_integrity_payload *bip = bio_integrity(bio); + struct bio_vec *iv; + + if (bip->bip_vcnt >= bip->bip_max_vcnt) { + printk(KERN_ERR "%s: bip_vec full\n", __func__); + return 0; + } + + iv = bip->bip_vec + bip->bip_vcnt; + + iv->bv_page = page; + iv->bv_len = len; + iv->bv_offset = offset; + bip->bip_vcnt++; + + return len; +} +EXPORT_SYMBOL(bio_integrity_add_page); + +/** + * bio_integrity_enabled - Check whether integrity can be passed + * @bio: bio to check + * + * Description: Determines whether bio_integrity_prep() can be called + * on this bio or not. bio data direction and target device must be + * set prior to calling. The functions honors the write_generate and + * read_verify flags in sysfs. + */ +bool bio_integrity_enabled(struct bio *bio) +{ + struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); + + if (!bio_is_rw(bio)) + return false; + + /* Already protected? */ + if (bio_integrity(bio)) + return false; + + if (bi == NULL) + return false; + + if (bio_data_dir(bio) == READ && bi->verify_fn != NULL && + (bi->flags & BLK_INTEGRITY_VERIFY)) + return true; + + if (bio_data_dir(bio) == WRITE && bi->generate_fn != NULL && + (bi->flags & BLK_INTEGRITY_GENERATE)) + return true; + + return false; +} +EXPORT_SYMBOL(bio_integrity_enabled); + +/** + * bio_integrity_intervals - Return number of integrity intervals for a bio + * @bi: blk_integrity profile for device + * @sectors: Size of the bio in 512-byte sectors + * + * Description: The block layer calculates everything in 512 byte + * sectors but integrity metadata is done in terms of the data integrity + * interval size of the storage device. Convert the block layer sectors + * to the appropriate number of integrity intervals. + */ +static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi, + unsigned int sectors) +{ + return sectors >> (ilog2(bi->interval) - 9); +} + +static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi, + unsigned int sectors) +{ + return bio_integrity_intervals(bi, sectors) * bi->tuple_size; +} + +/** + * bio_integrity_process - Process integrity metadata for a bio + * @bio: bio to generate/verify integrity metadata for + * @proc_fn: Pointer to the relevant processing function + */ +static int bio_integrity_process(struct bio *bio, + integrity_processing_fn *proc_fn) +{ + struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); + struct blk_integrity_iter iter; + struct bvec_iter bviter; + struct bio_vec bv; + struct bio_integrity_payload *bip = bio_integrity(bio); + unsigned int ret = 0; + void *prot_buf = page_address(bip->bip_vec->bv_page) + + bip->bip_vec->bv_offset; + + iter.disk_name = bio->bi_bdev->bd_disk->disk_name; + iter.interval = bi->interval; + iter.seed = bip_get_seed(bip); + iter.prot_buf = prot_buf; + + bio_for_each_segment(bv, bio, bviter) { + void *kaddr = kmap_atomic(bv.bv_page); + + iter.data_buf = kaddr + bv.bv_offset; + iter.data_size = bv.bv_len; + + ret = proc_fn(&iter); + if (ret) { + kunmap_atomic(kaddr); + return ret; + } + + kunmap_atomic(kaddr); + } + return ret; +} + +/** + * bio_integrity_prep - Prepare bio for integrity I/O + * @bio: bio to prepare + * + * Description: Allocates a buffer for integrity metadata, maps the + * pages and attaches them to a bio. The bio must have data + * direction, target device and start sector set priot to calling. In + * the WRITE case, integrity metadata will be generated using the + * block device's integrity function. In the READ case, the buffer + * will be prepared for DMA and a suitable end_io handler set up. + */ +int bio_integrity_prep(struct bio *bio) +{ + struct bio_integrity_payload *bip; + struct blk_integrity *bi; + struct request_queue *q; + void *buf; + unsigned long start, end; + unsigned int len, nr_pages; + unsigned int bytes, offset, i; + unsigned int intervals; + + bi = bdev_get_integrity(bio->bi_bdev); + q = bdev_get_queue(bio->bi_bdev); + BUG_ON(bi == NULL); + BUG_ON(bio_integrity(bio)); + + intervals = bio_integrity_intervals(bi, bio_sectors(bio)); + + /* Allocate kernel buffer for protection data */ + len = intervals * bi->tuple_size; + buf = kmalloc(len, GFP_NOIO | q->bounce_gfp); + if (unlikely(buf == NULL)) { + printk(KERN_ERR "could not allocate integrity buffer\n"); + return -ENOMEM; + } + + end = (((unsigned long) buf) + len + PAGE_SIZE - 1) >> PAGE_SHIFT; + start = ((unsigned long) buf) >> PAGE_SHIFT; + nr_pages = end - start; + + /* Allocate bio integrity payload and integrity vectors */ + bip = bio_integrity_alloc(bio, GFP_NOIO, nr_pages); + if (unlikely(bip == NULL)) { + printk(KERN_ERR "could not allocate data integrity bioset\n"); + kfree(buf); + return -EIO; + } + + bip->bip_flags |= BIP_BLOCK_INTEGRITY; + bip->bip_iter.bi_size = len; + bip_set_seed(bip, bio->bi_iter.bi_sector); + + if (bi->flags & BLK_INTEGRITY_IP_CHECKSUM) + bip->bip_flags |= BIP_IP_CHECKSUM; + + /* Map it */ + offset = offset_in_page(buf); + for (i = 0 ; i < nr_pages ; i++) { + int ret; + bytes = PAGE_SIZE - offset; + + if (len <= 0) + break; + + if (bytes > len) + bytes = len; + + ret = bio_integrity_add_page(bio, virt_to_page(buf), + bytes, offset); + + if (ret == 0) + return 0; + + if (ret < bytes) + break; + + buf += bytes; + len -= bytes; + offset = 0; + } + + /* Install custom I/O completion handler if read verify is enabled */ + if (bio_data_dir(bio) == READ) { + bip->bip_end_io = bio->bi_end_io; + bio->bi_end_io = bio_integrity_endio; + } + + /* Auto-generate integrity metadata if this is a write */ + if (bio_data_dir(bio) == WRITE) + bio_integrity_process(bio, bi->generate_fn); + + return 0; +} +EXPORT_SYMBOL(bio_integrity_prep); + +/** + * bio_integrity_verify_fn - Integrity I/O completion worker + * @work: Work struct stored in bio to be verified + * + * Description: This workqueue function is called to complete a READ + * request. The function verifies the transferred integrity metadata + * and then calls the original bio end_io function. + */ +static void bio_integrity_verify_fn(struct work_struct *work) +{ + struct bio_integrity_payload *bip = + container_of(work, struct bio_integrity_payload, bip_work); + struct bio *bio = bip->bip_bio; + struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); + int error; + + error = bio_integrity_process(bio, bi->verify_fn); + + /* Restore original bio completion handler */ + bio->bi_end_io = bip->bip_end_io; + bio_endio_nodec(bio, error); +} + +/** + * bio_integrity_endio - Integrity I/O completion function + * @bio: Protected bio + * @error: Pointer to errno + * + * Description: Completion for integrity I/O + * + * Normally I/O completion is done in interrupt context. However, + * verifying I/O integrity is a time-consuming task which must be run + * in process context. This function postpones completion + * accordingly. + */ +void bio_integrity_endio(struct bio *bio, int error) +{ + struct bio_integrity_payload *bip = bio_integrity(bio); + + BUG_ON(bip->bip_bio != bio); + + /* In case of an I/O error there is no point in verifying the + * integrity metadata. Restore original bio end_io handler + * and run it. + */ + if (error) { + bio->bi_end_io = bip->bip_end_io; + bio_endio_nodec(bio, error); + + return; + } + + INIT_WORK(&bip->bip_work, bio_integrity_verify_fn); + queue_work(kintegrityd_wq, &bip->bip_work); +} +EXPORT_SYMBOL(bio_integrity_endio); + +/** + * bio_integrity_advance - Advance integrity vector + * @bio: bio whose integrity vector to update + * @bytes_done: number of data bytes that have been completed + * + * Description: This function calculates how many integrity bytes the + * number of completed data bytes correspond to and advances the + * integrity vector accordingly. + */ +void bio_integrity_advance(struct bio *bio, unsigned int bytes_done) +{ + struct bio_integrity_payload *bip = bio_integrity(bio); + struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); + unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9); + + bvec_iter_advance(bip->bip_vec, &bip->bip_iter, bytes); +} +EXPORT_SYMBOL(bio_integrity_advance); + +/** + * bio_integrity_trim - Trim integrity vector + * @bio: bio whose integrity vector to update + * @offset: offset to first data sector + * @sectors: number of data sectors + * + * Description: Used to trim the integrity vector in a cloned bio. + * The ivec will be advanced corresponding to 'offset' data sectors + * and the length will be truncated corresponding to 'len' data + * sectors. + */ +void bio_integrity_trim(struct bio *bio, unsigned int offset, + unsigned int sectors) +{ + struct bio_integrity_payload *bip = bio_integrity(bio); + struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); + + bio_integrity_advance(bio, offset << 9); + bip->bip_iter.bi_size = bio_integrity_bytes(bi, sectors); +} +EXPORT_SYMBOL(bio_integrity_trim); + +/** + * bio_integrity_clone - Callback for cloning bios with integrity metadata + * @bio: New bio + * @bio_src: Original bio + * @gfp_mask: Memory allocation mask + * + * Description: Called to allocate a bip when cloning a bio + */ +int bio_integrity_clone(struct bio *bio, struct bio *bio_src, + gfp_t gfp_mask) +{ + struct bio_integrity_payload *bip_src = bio_integrity(bio_src); + struct bio_integrity_payload *bip; + + BUG_ON(bip_src == NULL); + + bip = bio_integrity_alloc(bio, gfp_mask, bip_src->bip_vcnt); + + if (bip == NULL) + return -EIO; + + memcpy(bip->bip_vec, bip_src->bip_vec, + bip_src->bip_vcnt * sizeof(struct bio_vec)); + + bip->bip_vcnt = bip_src->bip_vcnt; + bip->bip_iter = bip_src->bip_iter; + + return 0; +} +EXPORT_SYMBOL(bio_integrity_clone); + +int bioset_integrity_create(struct bio_set *bs, int pool_size) +{ + if (bs->bio_integrity_pool) + return 0; + + bs->bio_integrity_pool = mempool_create_slab_pool(pool_size, bip_slab); + if (!bs->bio_integrity_pool) + return -1; + + bs->bvec_integrity_pool = biovec_create_pool(pool_size); + if (!bs->bvec_integrity_pool) { + mempool_destroy(bs->bio_integrity_pool); + return -1; + } + + return 0; +} +EXPORT_SYMBOL(bioset_integrity_create); + +void bioset_integrity_free(struct bio_set *bs) +{ + if (bs->bio_integrity_pool) + mempool_destroy(bs->bio_integrity_pool); + + if (bs->bvec_integrity_pool) + mempool_destroy(bs->bvec_integrity_pool); +} +EXPORT_SYMBOL(bioset_integrity_free); + +void __init bio_integrity_init(void) +{ + /* + * kintegrityd won't block much but may burn a lot of CPU cycles. + * Make it highpri CPU intensive wq with max concurrency of 1. + */ + kintegrityd_wq = alloc_workqueue("kintegrityd", WQ_MEM_RECLAIM | + WQ_HIGHPRI | WQ_CPU_INTENSIVE, 1); + if (!kintegrityd_wq) + panic("Failed to create kintegrityd\n"); + + bip_slab = kmem_cache_create("bio_integrity_payload", + sizeof(struct bio_integrity_payload) + + sizeof(struct bio_vec) * BIP_INLINE_VECS, + 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); +} diff --git a/block/bio.c b/block/bio.c new file mode 100644 index 000000000..4441522ca --- /dev/null +++ b/block/bio.c @@ -0,0 +1,2079 @@ +/* + * Copyright (C) 2001 Jens Axboe + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public Licens + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +/* + * Test patch to inline a certain number of bi_io_vec's inside the bio + * itself, to shrink a bio data allocation from two mempool calls to one + */ +#define BIO_INLINE_VECS 4 + +/* + * if you change this list, also change bvec_alloc or things will + * break badly! cannot be bigger than what you can fit into an + * unsigned short + */ +#define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) } +static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = { + BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES), +}; +#undef BV + +/* + * fs_bio_set is the bio_set containing bio and iovec memory pools used by + * IO code that does not need private memory pools. + */ +struct bio_set *fs_bio_set; +EXPORT_SYMBOL(fs_bio_set); + +/* + * Our slab pool management + */ +struct bio_slab { + struct kmem_cache *slab; + unsigned int slab_ref; + unsigned int slab_size; + char name[8]; +}; +static DEFINE_MUTEX(bio_slab_lock); +static struct bio_slab *bio_slabs; +static unsigned int bio_slab_nr, bio_slab_max; + +static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size) +{ + unsigned int sz = sizeof(struct bio) + extra_size; + struct kmem_cache *slab = NULL; + struct bio_slab *bslab, *new_bio_slabs; + unsigned int new_bio_slab_max; + unsigned int i, entry = -1; + + mutex_lock(&bio_slab_lock); + + i = 0; + while (i < bio_slab_nr) { + bslab = &bio_slabs[i]; + + if (!bslab->slab && entry == -1) + entry = i; + else if (bslab->slab_size == sz) { + slab = bslab->slab; + bslab->slab_ref++; + break; + } + i++; + } + + if (slab) + goto out_unlock; + + if (bio_slab_nr == bio_slab_max && entry == -1) { + new_bio_slab_max = bio_slab_max << 1; + new_bio_slabs = krealloc(bio_slabs, + new_bio_slab_max * sizeof(struct bio_slab), + GFP_KERNEL); + if (!new_bio_slabs) + goto out_unlock; + bio_slab_max = new_bio_slab_max; + bio_slabs = new_bio_slabs; + } + if (entry == -1) + entry = bio_slab_nr++; + + bslab = &bio_slabs[entry]; + + snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry); + slab = kmem_cache_create(bslab->name, sz, ARCH_KMALLOC_MINALIGN, + SLAB_HWCACHE_ALIGN, NULL); + if (!slab) + goto out_unlock; + + bslab->slab = slab; + bslab->slab_ref = 1; + bslab->slab_size = sz; +out_unlock: + mutex_unlock(&bio_slab_lock); + return slab; +} + +static void bio_put_slab(struct bio_set *bs) +{ + struct bio_slab *bslab = NULL; + unsigned int i; + + mutex_lock(&bio_slab_lock); + + for (i = 0; i < bio_slab_nr; i++) { + if (bs->bio_slab == bio_slabs[i].slab) { + bslab = &bio_slabs[i]; + break; + } + } + + if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n")) + goto out; + + WARN_ON(!bslab->slab_ref); + + if (--bslab->slab_ref) + goto out; + + kmem_cache_destroy(bslab->slab); + bslab->slab = NULL; + +out: + mutex_unlock(&bio_slab_lock); +} + +unsigned int bvec_nr_vecs(unsigned short idx) +{ + return bvec_slabs[idx].nr_vecs; +} + +void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx) +{ + BIO_BUG_ON(idx >= BIOVEC_NR_POOLS); + + if (idx == BIOVEC_MAX_IDX) + mempool_free(bv, pool); + else { + struct biovec_slab *bvs = bvec_slabs + idx; + + kmem_cache_free(bvs->slab, bv); + } +} + +struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx, + mempool_t *pool) +{ + struct bio_vec *bvl; + + /* + * see comment near bvec_array define! + */ + switch (nr) { + case 1: + *idx = 0; + break; + case 2 ... 4: + *idx = 1; + break; + case 5 ... 16: + *idx = 2; + break; + case 17 ... 64: + *idx = 3; + break; + case 65 ... 128: + *idx = 4; + break; + case 129 ... BIO_MAX_PAGES: + *idx = 5; + break; + default: + return NULL; + } + + /* + * idx now points to the pool we want to allocate from. only the + * 1-vec entry pool is mempool backed. + */ + if (*idx == BIOVEC_MAX_IDX) { +fallback: + bvl = mempool_alloc(pool, gfp_mask); + } else { + struct biovec_slab *bvs = bvec_slabs + *idx; + gfp_t __gfp_mask = gfp_mask & ~(__GFP_WAIT | __GFP_IO); + + /* + * Make this allocation restricted and don't dump info on + * allocation failures, since we'll fallback to the mempool + * in case of failure. + */ + __gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN; + + /* + * Try a slab allocation. If this fails and __GFP_WAIT + * is set, retry with the 1-entry mempool + */ + bvl = kmem_cache_alloc(bvs->slab, __gfp_mask); + if (unlikely(!bvl && (gfp_mask & __GFP_WAIT))) { + *idx = BIOVEC_MAX_IDX; + goto fallback; + } + } + + return bvl; +} + +static void __bio_free(struct bio *bio) +{ + bio_disassociate_task(bio); + + if (bio_integrity(bio)) + bio_integrity_free(bio); +} + +static void bio_free(struct bio *bio) +{ + struct bio_set *bs = bio->bi_pool; + void *p; + + __bio_free(bio); + + if (bs) { + if (bio_flagged(bio, BIO_OWNS_VEC)) + bvec_free(bs->bvec_pool, bio->bi_io_vec, BIO_POOL_IDX(bio)); + + /* + * If we have front padding, adjust the bio pointer before freeing + */ + p = bio; + p -= bs->front_pad; + + mempool_free(p, bs->bio_pool); + } else { + /* Bio was allocated by bio_kmalloc() */ + kfree(bio); + } +} + +void bio_init(struct bio *bio) +{ + memset(bio, 0, sizeof(*bio)); + bio->bi_flags = 1 << BIO_UPTODATE; + atomic_set(&bio->bi_remaining, 1); + atomic_set(&bio->bi_cnt, 1); +} +EXPORT_SYMBOL(bio_init); + +/** + * bio_reset - reinitialize a bio + * @bio: bio to reset + * + * Description: + * After calling bio_reset(), @bio will be in the same state as a freshly + * allocated bio returned bio bio_alloc_bioset() - the only fields that are + * preserved are the ones that are initialized by bio_alloc_bioset(). See + * comment in struct bio. + */ +void bio_reset(struct bio *bio) +{ + unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS); + + __bio_free(bio); + + memset(bio, 0, BIO_RESET_BYTES); + bio->bi_flags = flags|(1 << BIO_UPTODATE); + atomic_set(&bio->bi_remaining, 1); +} +EXPORT_SYMBOL(bio_reset); + +static void bio_chain_endio(struct bio *bio, int error) +{ + bio_endio(bio->bi_private, error); + bio_put(bio); +} + +/** + * bio_chain - chain bio completions + * @bio: the target bio + * @parent: the @bio's parent bio + * + * The caller won't have a bi_end_io called when @bio completes - instead, + * @parent's bi_end_io won't be called until both @parent and @bio have + * completed; the chained bio will also be freed when it completes. + * + * The caller must not set bi_private or bi_end_io in @bio. + */ +void bio_chain(struct bio *bio, struct bio *parent) +{ + BUG_ON(bio->bi_private || bio->bi_end_io); + + bio->bi_private = parent; + bio->bi_end_io = bio_chain_endio; + atomic_inc(&parent->bi_remaining); +} +EXPORT_SYMBOL(bio_chain); + +static void bio_alloc_rescue(struct work_struct *work) +{ + struct bio_set *bs = container_of(work, struct bio_set, rescue_work); + struct bio *bio; + + while (1) { + spin_lock(&bs->rescue_lock); + bio = bio_list_pop(&bs->rescue_list); + spin_unlock(&bs->rescue_lock); + + if (!bio) + break; + + generic_make_request(bio); + } +} + +static void punt_bios_to_rescuer(struct bio_set *bs) +{ + struct bio_list punt, nopunt; + struct bio *bio; + + /* + * In order to guarantee forward progress we must punt only bios that + * were allocated from this bio_set; otherwise, if there was a bio on + * there for a stacking driver higher up in the stack, processing it + * could require allocating bios from this bio_set, and doing that from + * our own rescuer would be bad. + * + * Since bio lists are singly linked, pop them all instead of trying to + * remove from the middle of the list: + */ + + bio_list_init(&punt); + bio_list_init(&nopunt); + + while ((bio = bio_list_pop(current->bio_list))) + bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); + + *current->bio_list = nopunt; + + spin_lock(&bs->rescue_lock); + bio_list_merge(&bs->rescue_list, &punt); + spin_unlock(&bs->rescue_lock); + + queue_work(bs->rescue_workqueue, &bs->rescue_work); +} + +/** + * bio_alloc_bioset - allocate a bio for I/O + * @gfp_mask: the GFP_ mask given to the slab allocator + * @nr_iovecs: number of iovecs to pre-allocate + * @bs: the bio_set to allocate from. + * + * Description: + * If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is + * backed by the @bs's mempool. + * + * When @bs is not NULL, if %__GFP_WAIT is set then bio_alloc will always be + * able to allocate a bio. This is due to the mempool guarantees. To make this + * work, callers must never allocate more than 1 bio at a time from this pool. + * Callers that need to allocate more than 1 bio must always submit the + * previously allocated bio for IO before attempting to allocate a new one. + * Failure to do so can cause deadlocks under memory pressure. + * + * Note that when running under generic_make_request() (i.e. any block + * driver), bios are not submitted until after you return - see the code in + * generic_make_request() that converts recursion into iteration, to prevent + * stack overflows. + * + * This would normally mean allocating multiple bios under + * generic_make_request() would be susceptible to deadlocks, but we have + * deadlock avoidance code that resubmits any blocked bios from a rescuer + * thread. + * + * However, we do not guarantee forward progress for allocations from other + * mempools. Doing multiple allocations from the same mempool under + * generic_make_request() should be avoided - instead, use bio_set's front_pad + * for per bio allocations. + * + * RETURNS: + * Pointer to new bio on success, NULL on failure. + */ +struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) +{ + gfp_t saved_gfp = gfp_mask; + unsigned front_pad; + unsigned inline_vecs; + unsigned long idx = BIO_POOL_NONE; + struct bio_vec *bvl = NULL; + struct bio *bio; + void *p; + + if (!bs) { + if (nr_iovecs > UIO_MAXIOV) + return NULL; + + p = kmalloc(sizeof(struct bio) + + nr_iovecs * sizeof(struct bio_vec), + gfp_mask); + front_pad = 0; + inline_vecs = nr_iovecs; + } else { + /* should not use nobvec bioset for nr_iovecs > 0 */ + if (WARN_ON_ONCE(!bs->bvec_pool && nr_iovecs > 0)) + return NULL; + /* + * generic_make_request() converts recursion to iteration; this + * means if we're running beneath it, any bios we allocate and + * submit will not be submitted (and thus freed) until after we + * return. + * + * This exposes us to a potential deadlock if we allocate + * multiple bios from the same bio_set() while running + * underneath generic_make_request(). If we were to allocate + * multiple bios (say a stacking block driver that was splitting + * bios), we would deadlock if we exhausted the mempool's + * reserve. + * + * We solve this, and guarantee forward progress, with a rescuer + * workqueue per bio_set. If we go to allocate and there are + * bios on current->bio_list, we first try the allocation + * without __GFP_WAIT; if that fails, we punt those bios we + * would be blocking to the rescuer workqueue before we retry + * with the original gfp_flags. + */ + + if (current->bio_list && !bio_list_empty(current->bio_list)) + gfp_mask &= ~__GFP_WAIT; + + p = mempool_alloc(bs->bio_pool, gfp_mask); + if (!p && gfp_mask != saved_gfp) { + punt_bios_to_rescuer(bs); + gfp_mask = saved_gfp; + p = mempool_alloc(bs->bio_pool, gfp_mask); + } + + front_pad = bs->front_pad; + inline_vecs = BIO_INLINE_VECS; + } + + if (unlikely(!p)) + return NULL; + + bio = p + front_pad; + bio_init(bio); + + if (nr_iovecs > inline_vecs) { + bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool); + if (!bvl && gfp_mask != saved_gfp) { + punt_bios_to_rescuer(bs); + gfp_mask = saved_gfp; + bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool); + } + + if (unlikely(!bvl)) + goto err_free; + + bio->bi_flags |= 1 << BIO_OWNS_VEC; + } else if (nr_iovecs) { + bvl = bio->bi_inline_vecs; + } + + bio->bi_pool = bs; + bio->bi_flags |= idx << BIO_POOL_OFFSET; + bio->bi_max_vecs = nr_iovecs; + bio->bi_io_vec = bvl; + return bio; + +err_free: + mempool_free(p, bs->bio_pool); + return NULL; +} +EXPORT_SYMBOL(bio_alloc_bioset); + +void zero_fill_bio(struct bio *bio) +{ + unsigned long flags; + struct bio_vec bv; + struct bvec_iter iter; + + bio_for_each_segment(bv, bio, iter) { + char *data = bvec_kmap_irq(&bv, &flags); + memset(data, 0, bv.bv_len); + flush_dcache_page(bv.bv_page); + bvec_kunmap_irq(data, &flags); + } +} +EXPORT_SYMBOL(zero_fill_bio); + +/** + * bio_put - release a reference to a bio + * @bio: bio to release reference to + * + * Description: + * Put a reference to a &struct bio, either one you have gotten with + * bio_alloc, bio_get or bio_clone. The last put of a bio will free it. + **/ +void bio_put(struct bio *bio) +{ + BIO_BUG_ON(!atomic_read(&bio->bi_cnt)); + + /* + * last put frees it + */ + if (atomic_dec_and_test(&bio->bi_cnt)) + bio_free(bio); +} +EXPORT_SYMBOL(bio_put); + +inline int bio_phys_segments(struct request_queue *q, struct bio *bio) +{ + if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) + blk_recount_segments(q, bio); + + return bio->bi_phys_segments; +} +EXPORT_SYMBOL(bio_phys_segments); + +/** + * __bio_clone_fast - clone a bio that shares the original bio's biovec + * @bio: destination bio + * @bio_src: bio to clone + * + * Clone a &bio. Caller will own the returned bio, but not + * the actual data it points to. Reference count of returned + * bio will be one. + * + * Caller must ensure that @bio_src is not freed before @bio. + */ +void __bio_clone_fast(struct bio *bio, struct bio *bio_src) +{ + BUG_ON(bio->bi_pool && BIO_POOL_IDX(bio) != BIO_POOL_NONE); + + /* + * most users will be overriding ->bi_bdev with a new target, + * so we don't set nor calculate new physical/hw segment counts here + */ + bio->bi_bdev = bio_src->bi_bdev; + bio->bi_flags |= 1 << BIO_CLONED; + bio->bi_rw = bio_src->bi_rw; + bio->bi_iter = bio_src->bi_iter; + bio->bi_io_vec = bio_src->bi_io_vec; +} +EXPORT_SYMBOL(__bio_clone_fast); + +/** + * bio_clone_fast - clone a bio that shares the original bio's biovec + * @bio: bio to clone + * @gfp_mask: allocation priority + * @bs: bio_set to allocate from + * + * Like __bio_clone_fast, only also allocates the returned bio + */ +struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs) +{ + struct bio *b; + + b = bio_alloc_bioset(gfp_mask, 0, bs); + if (!b) + return NULL; + + __bio_clone_fast(b, bio); + + if (bio_integrity(bio)) { + int ret; + + ret = bio_integrity_clone(b, bio, gfp_mask); + + if (ret < 0) { + bio_put(b); + return NULL; + } + } + + return b; +} +EXPORT_SYMBOL(bio_clone_fast); + +/** + * bio_clone_bioset - clone a bio + * @bio_src: bio to clone + * @gfp_mask: allocation priority + * @bs: bio_set to allocate from + * + * Clone bio. Caller will own the returned bio, but not the actual data it + * points to. Reference count of returned bio will be one. + */ +struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask, + struct bio_set *bs) +{ + struct bvec_iter iter; + struct bio_vec bv; + struct bio *bio; + + /* + * Pre immutable biovecs, __bio_clone() used to just do a memcpy from + * bio_src->bi_io_vec to bio->bi_io_vec. + * + * We can't do that anymore, because: + * + * - The point of cloning the biovec is to produce a bio with a biovec + * the caller can modify: bi_idx and bi_bvec_done should be 0. + * + * - The original bio could've had more than BIO_MAX_PAGES biovecs; if + * we tried to clone the whole thing bio_alloc_bioset() would fail. + * But the clone should succeed as long as the number of biovecs we + * actually need to allocate is fewer than BIO_MAX_PAGES. + * + * - Lastly, bi_vcnt should not be looked at or relied upon by code + * that does not own the bio - reason being drivers don't use it for + * iterating over the biovec anymore, so expecting it to be kept up + * to date (i.e. for clones that share the parent biovec) is just + * asking for trouble and would force extra work on + * __bio_clone_fast() anyways. + */ + + bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs); + if (!bio) + return NULL; + + bio->bi_bdev = bio_src->bi_bdev; + bio->bi_rw = bio_src->bi_rw; + bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; + bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; + + if (bio->bi_rw & REQ_DISCARD) + goto integrity_clone; + + if (bio->bi_rw & REQ_WRITE_SAME) { + bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0]; + goto integrity_clone; + } + + bio_for_each_segment(bv, bio_src, iter) + bio->bi_io_vec[bio->bi_vcnt++] = bv; + +integrity_clone: + if (bio_integrity(bio_src)) { + int ret; + + ret = bio_integrity_clone(bio, bio_src, gfp_mask); + if (ret < 0) { + bio_put(bio); + return NULL; + } + } + + return bio; +} +EXPORT_SYMBOL(bio_clone_bioset); + +/** + * bio_get_nr_vecs - return approx number of vecs + * @bdev: I/O target + * + * Return the approximate number of pages we can send to this target. + * There's no guarantee that you will be able to fit this number of pages + * into a bio, it does not account for dynamic restrictions that vary + * on offset. + */ +int bio_get_nr_vecs(struct block_device *bdev) +{ + struct request_queue *q = bdev_get_queue(bdev); + int nr_pages; + + nr_pages = min_t(unsigned, + queue_max_segments(q), + queue_max_sectors(q) / (PAGE_SIZE >> 9) + 1); + + return min_t(unsigned, nr_pages, BIO_MAX_PAGES); + +} +EXPORT_SYMBOL(bio_get_nr_vecs); + +static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page + *page, unsigned int len, unsigned int offset, + unsigned int max_sectors) +{ + int retried_segments = 0; + struct bio_vec *bvec; + + /* + * cloned bio must not modify vec list + */ + if (unlikely(bio_flagged(bio, BIO_CLONED))) + return 0; + + if (((bio->bi_iter.bi_size + len) >> 9) > max_sectors) + return 0; + + /* + * For filesystems with a blocksize smaller than the pagesize + * we will often be called with the same page as last time and + * a consecutive offset. Optimize this special case. + */ + if (bio->bi_vcnt > 0) { + struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1]; + + if (page == prev->bv_page && + offset == prev->bv_offset + prev->bv_len) { + unsigned int prev_bv_len = prev->bv_len; + prev->bv_len += len; + + if (q->merge_bvec_fn) { + struct bvec_merge_data bvm = { + /* prev_bvec is already charged in + bi_size, discharge it in order to + simulate merging updated prev_bvec + as new bvec. */ + .bi_bdev = bio->bi_bdev, + .bi_sector = bio->bi_iter.bi_sector, + .bi_size = bio->bi_iter.bi_size - + prev_bv_len, + .bi_rw = bio->bi_rw, + }; + + if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len) { + prev->bv_len -= len; + return 0; + } + } + + bio->bi_iter.bi_size += len; + goto done; + } + + /* + * If the queue doesn't support SG gaps and adding this + * offset would create a gap, disallow it. + */ + if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS) && + bvec_gap_to_prev(prev, offset)) + return 0; + } + + if (bio->bi_vcnt >= bio->bi_max_vecs) + return 0; + + /* + * setup the new entry, we might clear it again later if we + * cannot add the page + */ + bvec = &bio->bi_io_vec[bio->bi_vcnt]; + bvec->bv_page = page; + bvec->bv_len = len; + bvec->bv_offset = offset; + bio->bi_vcnt++; + bio->bi_phys_segments++; + bio->bi_iter.bi_size += len; + + /* + * Perform a recount if the number of segments is greater + * than queue_max_segments(q). + */ + + while (bio->bi_phys_segments > queue_max_segments(q)) { + + if (retried_segments) + goto failed; + + retried_segments = 1; + blk_recount_segments(q, bio); + } + + /* + * if queue has other restrictions (eg varying max sector size + * depending on offset), it can specify a merge_bvec_fn in the + * queue to get further control + */ + if (q->merge_bvec_fn) { + struct bvec_merge_data bvm = { + .bi_bdev = bio->bi_bdev, + .bi_sector = bio->bi_iter.bi_sector, + .bi_size = bio->bi_iter.bi_size - len, + .bi_rw = bio->bi_rw, + }; + + /* + * merge_bvec_fn() returns number of bytes it can accept + * at this offset + */ + if (q->merge_bvec_fn(q, &bvm, bvec) < bvec->bv_len) + goto failed; + } + + /* If we may be able to merge these biovecs, force a recount */ + if (bio->bi_vcnt > 1 && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec))) + bio->bi_flags &= ~(1 << BIO_SEG_VALID); + + done: + return len; + + failed: + bvec->bv_page = NULL; + bvec->bv_len = 0; + bvec->bv_offset = 0; + bio->bi_vcnt--; + bio->bi_iter.bi_size -= len; + blk_recount_segments(q, bio); + return 0; +} + +/** + * bio_add_pc_page - attempt to add page to bio + * @q: the target queue + * @bio: destination bio + * @page: page to add + * @len: vec entry length + * @offset: vec entry offset + * + * Attempt to add a page to the bio_vec maplist. This can fail for a + * number of reasons, such as the bio being full or target block device + * limitations. The target block device must allow bio's up to PAGE_SIZE, + * so it is always possible to add a single page to an empty bio. + * + * This should only be used by REQ_PC bios. + */ +int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page, + unsigned int len, unsigned int offset) +{ + return __bio_add_page(q, bio, page, len, offset, + queue_max_hw_sectors(q)); +} +EXPORT_SYMBOL(bio_add_pc_page); + +/** + * bio_add_page - attempt to add page to bio + * @bio: destination bio + * @page: page to add + * @len: vec entry length + * @offset: vec entry offset + * + * Attempt to add a page to the bio_vec maplist. This can fail for a + * number of reasons, such as the bio being full or target block device + * limitations. The target block device must allow bio's up to PAGE_SIZE, + * so it is always possible to add a single page to an empty bio. + */ +int bio_add_page(struct bio *bio, struct page *page, unsigned int len, + unsigned int offset) +{ + struct request_queue *q = bdev_get_queue(bio->bi_bdev); + unsigned int max_sectors; + + max_sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector); + if ((max_sectors < (len >> 9)) && !bio->bi_iter.bi_size) + max_sectors = len >> 9; + + return __bio_add_page(q, bio, page, len, offset, max_sectors); +} +EXPORT_SYMBOL(bio_add_page); + +struct submit_bio_ret { + struct completion event; + int error; +}; + +static void submit_bio_wait_endio(struct bio *bio, int error) +{ + struct submit_bio_ret *ret = bio->bi_private; + + ret->error = error; + complete(&ret->event); +} + +/** + * submit_bio_wait - submit a bio, and wait until it completes + * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead) + * @bio: The &struct bio which describes the I/O + * + * Simple wrapper around submit_bio(). Returns 0 on success, or the error from + * bio_endio() on failure. + */ +int submit_bio_wait(int rw, struct bio *bio) +{ + struct submit_bio_ret ret; + + rw |= REQ_SYNC; + init_completion(&ret.event); + bio->bi_private = &ret; + bio->bi_end_io = submit_bio_wait_endio; + submit_bio(rw, bio); + wait_for_completion(&ret.event); + + return ret.error; +} +EXPORT_SYMBOL(submit_bio_wait); + +/** + * bio_advance - increment/complete a bio by some number of bytes + * @bio: bio to advance + * @bytes: number of bytes to complete + * + * This updates bi_sector, bi_size and bi_idx; if the number of bytes to + * complete doesn't align with a bvec boundary, then bv_len and bv_offset will + * be updated on the last bvec as well. + * + * @bio will then represent the remaining, uncompleted portion of the io. + */ +void bio_advance(struct bio *bio, unsigned bytes) +{ + if (bio_integrity(bio)) + bio_integrity_advance(bio, bytes); + + bio_advance_iter(bio, &bio->bi_iter, bytes); +} +EXPORT_SYMBOL(bio_advance); + +/** + * bio_alloc_pages - allocates a single page for each bvec in a bio + * @bio: bio to allocate pages for + * @gfp_mask: flags for allocation + * + * Allocates pages up to @bio->bi_vcnt. + * + * Returns 0 on success, -ENOMEM on failure. On failure, any allocated pages are + * freed. + */ +int bio_alloc_pages(struct bio *bio, gfp_t gfp_mask) +{ + int i; + struct bio_vec *bv; + + bio_for_each_segment_all(bv, bio, i) { + bv->bv_page = alloc_page(gfp_mask); + if (!bv->bv_page) { + while (--bv >= bio->bi_io_vec) + __free_page(bv->bv_page); + return -ENOMEM; + } + } + + return 0; +} +EXPORT_SYMBOL(bio_alloc_pages); + +/** + * bio_copy_data - copy contents of data buffers from one chain of bios to + * another + * @src: source bio list + * @dst: destination bio list + * + * If @src and @dst are single bios, bi_next must be NULL - otherwise, treats + * @src and @dst as linked lists of bios. + * + * Stops when it reaches the end of either @src or @dst - that is, copies + * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios). + */ +void bio_copy_data(struct bio *dst, struct bio *src) +{ + struct bvec_iter src_iter, dst_iter; + struct bio_vec src_bv, dst_bv; + void *src_p, *dst_p; + unsigned bytes; + + src_iter = src->bi_iter; + dst_iter = dst->bi_iter; + + while (1) { + if (!src_iter.bi_size) { + src = src->bi_next; + if (!src) + break; + + src_iter = src->bi_iter; + } + + if (!dst_iter.bi_size) { + dst = dst->bi_next; + if (!dst) + break; + + dst_iter = dst->bi_iter; + } + + src_bv = bio_iter_iovec(src, src_iter); + dst_bv = bio_iter_iovec(dst, dst_iter); + + bytes = min(src_bv.bv_len, dst_bv.bv_len); + + src_p = kmap_atomic(src_bv.bv_page); + dst_p = kmap_atomic(dst_bv.bv_page); + + memcpy(dst_p + dst_bv.bv_offset, + src_p + src_bv.bv_offset, + bytes); + + kunmap_atomic(dst_p); + kunmap_atomic(src_p); + + bio_advance_iter(src, &src_iter, bytes); + bio_advance_iter(dst, &dst_iter, bytes); + } +} +EXPORT_SYMBOL(bio_copy_data); + +struct bio_map_data { + int is_our_pages; + struct iov_iter iter; + struct iovec iov[]; +}; + +static struct bio_map_data *bio_alloc_map_data(unsigned int iov_count, + gfp_t gfp_mask) +{ + if (iov_count > UIO_MAXIOV) + return NULL; + + return kmalloc(sizeof(struct bio_map_data) + + sizeof(struct iovec) * iov_count, gfp_mask); +} + +/** + * bio_copy_from_iter - copy all pages from iov_iter to bio + * @bio: The &struct bio which describes the I/O as destination + * @iter: iov_iter as source + * + * Copy all pages from iov_iter to bio. + * Returns 0 on success, or error on failure. + */ +static int bio_copy_from_iter(struct bio *bio, struct iov_iter iter) +{ + int i; + struct bio_vec *bvec; + + bio_for_each_segment_all(bvec, bio, i) { + ssize_t ret; + + ret = copy_page_from_iter(bvec->bv_page, + bvec->bv_offset, + bvec->bv_len, + &iter); + + if (!iov_iter_count(&iter)) + break; + + if (ret < bvec->bv_len) + return -EFAULT; + } + + return 0; +} + +/** + * bio_copy_to_iter - copy all pages from bio to iov_iter + * @bio: The &struct bio which describes the I/O as source + * @iter: iov_iter as destination + * + * Copy all pages from bio to iov_iter. + * Returns 0 on success, or error on failure. + */ +static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter) +{ + int i; + struct bio_vec *bvec; + + bio_for_each_segment_all(bvec, bio, i) { + ssize_t ret; + + ret = copy_page_to_iter(bvec->bv_page, + bvec->bv_offset, + bvec->bv_len, + &iter); + + if (!iov_iter_count(&iter)) + break; + + if (ret < bvec->bv_len) + return -EFAULT; + } + + return 0; +} + +static void bio_free_pages(struct bio *bio) +{ + struct bio_vec *bvec; + int i; + + bio_for_each_segment_all(bvec, bio, i) + __free_page(bvec->bv_page); +} + +/** + * bio_uncopy_user - finish previously mapped bio + * @bio: bio being terminated + * + * Free pages allocated from bio_copy_user_iov() and write back data + * to user space in case of a read. + */ +int bio_uncopy_user(struct bio *bio) +{ + struct bio_map_data *bmd = bio->bi_private; + int ret = 0; + + if (!bio_flagged(bio, BIO_NULL_MAPPED)) { + /* + * if we're in a workqueue, the request is orphaned, so + * don't copy into a random user address space, just free. + */ + if (current->mm && bio_data_dir(bio) == READ) + ret = bio_copy_to_iter(bio, bmd->iter); + if (bmd->is_our_pages) + bio_free_pages(bio); + } + kfree(bmd); + bio_put(bio); + return ret; +} +EXPORT_SYMBOL(bio_uncopy_user); + +/** + * bio_copy_user_iov - copy user data to bio + * @q: destination block queue + * @map_data: pointer to the rq_map_data holding pages (if necessary) + * @iter: iovec iterator + * @gfp_mask: memory allocation flags + * + * Prepares and returns a bio for indirect user io, bouncing data + * to/from kernel pages as necessary. Must be paired with + * call bio_uncopy_user() on io completion. + */ +struct bio *bio_copy_user_iov(struct request_queue *q, + struct rq_map_data *map_data, + const struct iov_iter *iter, + gfp_t gfp_mask) +{ + struct bio_map_data *bmd; + struct page *page; + struct bio *bio; + int i, ret; + int nr_pages = 0; + unsigned int len = iter->count; + unsigned int offset = map_data ? map_data->offset & ~PAGE_MASK : 0; + + for (i = 0; i < iter->nr_segs; i++) { + unsigned long uaddr; + unsigned long end; + unsigned long start; + + uaddr = (unsigned long) iter->iov[i].iov_base; + end = (uaddr + iter->iov[i].iov_len + PAGE_SIZE - 1) + >> PAGE_SHIFT; + start = uaddr >> PAGE_SHIFT; + + /* + * Overflow, abort + */ + if (end < start) + return ERR_PTR(-EINVAL); + + nr_pages += end - start; + } + + if (offset) + nr_pages++; + + bmd = bio_alloc_map_data(iter->nr_segs, gfp_mask); + if (!bmd) + return ERR_PTR(-ENOMEM); + + /* + * We need to do a deep copy of the iov_iter including the iovecs. + * The caller provided iov might point to an on-stack or otherwise + * shortlived one. + */ + bmd->is_our_pages = map_data ? 0 : 1; + memcpy(bmd->iov, iter->iov, sizeof(struct iovec) * iter->nr_segs); + iov_iter_init(&bmd->iter, iter->type, bmd->iov, + iter->nr_segs, iter->count); + + ret = -ENOMEM; + bio = bio_kmalloc(gfp_mask, nr_pages); + if (!bio) + goto out_bmd; + + if (iter->type & WRITE) + bio->bi_rw |= REQ_WRITE; + + ret = 0; + + if (map_data) { + nr_pages = 1 << map_data->page_order; + i = map_data->offset / PAGE_SIZE; + } + while (len) { + unsigned int bytes = PAGE_SIZE; + + bytes -= offset; + + if (bytes > len) + bytes = len; + + if (map_data) { + if (i == map_data->nr_entries * nr_pages) { + ret = -ENOMEM; + break; + } + + page = map_data->pages[i / nr_pages]; + page += (i % nr_pages); + + i++; + } else { + page = alloc_page(q->bounce_gfp | gfp_mask); + if (!page) { + ret = -ENOMEM; + break; + } + } + + if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) + break; + + len -= bytes; + offset = 0; + } + + if (ret) + goto cleanup; + + /* + * success + */ + if (((iter->type & WRITE) && (!map_data || !map_data->null_mapped)) || + (map_data && map_data->from_user)) { + ret = bio_copy_from_iter(bio, *iter); + if (ret) + goto cleanup; + } + + bio->bi_private = bmd; + return bio; +cleanup: + if (!map_data) + bio_free_pages(bio); + bio_put(bio); +out_bmd: + kfree(bmd); + return ERR_PTR(ret); +} + +/** + * bio_map_user_iov - map user iovec into bio + * @q: the struct request_queue for the bio + * @iter: iovec iterator + * @gfp_mask: memory allocation flags + * + * Map the user space address into a bio suitable for io to a block + * device. Returns an error pointer in case of error. + */ +struct bio *bio_map_user_iov(struct request_queue *q, + const struct iov_iter *iter, + gfp_t gfp_mask) +{ + int j; + int nr_pages = 0; + struct page **pages; + struct bio *bio; + int cur_page = 0; + int ret, offset; + struct iov_iter i; + struct iovec iov; + + iov_for_each(iov, i, *iter) { + unsigned long uaddr = (unsigned long) iov.iov_base; + unsigned long len = iov.iov_len; + unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; + unsigned long start = uaddr >> PAGE_SHIFT; + + /* + * Overflow, abort + */ + if (end < start) + return ERR_PTR(-EINVAL); + + nr_pages += end - start; + /* + * buffer must be aligned to at least hardsector size for now + */ + if (uaddr & queue_dma_alignment(q)) + return ERR_PTR(-EINVAL); + } + + if (!nr_pages) + return ERR_PTR(-EINVAL); + + bio = bio_kmalloc(gfp_mask, nr_pages); + if (!bio) + return ERR_PTR(-ENOMEM); + + ret = -ENOMEM; + pages = kcalloc(nr_pages, sizeof(struct page *), gfp_mask); + if (!pages) + goto out; + + iov_for_each(iov, i, *iter) { + unsigned long uaddr = (unsigned long) iov.iov_base; + unsigned long len = iov.iov_len; + unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; + unsigned long start = uaddr >> PAGE_SHIFT; + const int local_nr_pages = end - start; + const int page_limit = cur_page + local_nr_pages; + + ret = get_user_pages_fast(uaddr, local_nr_pages, + (iter->type & WRITE) != WRITE, + &pages[cur_page]); + if (ret < local_nr_pages) { + ret = -EFAULT; + goto out_unmap; + } + + offset = uaddr & ~PAGE_MASK; + for (j = cur_page; j < page_limit; j++) { + unsigned int bytes = PAGE_SIZE - offset; + + if (len <= 0) + break; + + if (bytes > len) + bytes = len; + + /* + * sorry... + */ + if (bio_add_pc_page(q, bio, pages[j], bytes, offset) < + bytes) + break; + + len -= bytes; + offset = 0; + } + + cur_page = j; + /* + * release the pages we didn't map into the bio, if any + */ + while (j < page_limit) + page_cache_release(pages[j++]); + } + + kfree(pages); + + /* + * set data direction, and check if mapped pages need bouncing + */ + if (iter->type & WRITE) + bio->bi_rw |= REQ_WRITE; + + bio->bi_flags |= (1 << BIO_USER_MAPPED); + + /* + * subtle -- if __bio_map_user() ended up bouncing a bio, + * it would normally disappear when its bi_end_io is run. + * however, we need it for the unmap, so grab an extra + * reference to it + */ + bio_get(bio); + return bio; + + out_unmap: + for (j = 0; j < nr_pages; j++) { + if (!pages[j]) + break; + page_cache_release(pages[j]); + } + out: + kfree(pages); + bio_put(bio); + return ERR_PTR(ret); +} + +static void __bio_unmap_user(struct bio *bio) +{ + struct bio_vec *bvec; + int i; + + /* + * make sure we dirty pages we wrote to + */ + bio_for_each_segment_all(bvec, bio, i) { + if (bio_data_dir(bio) == READ) + set_page_dirty_lock(bvec->bv_page); + + page_cache_release(bvec->bv_page); + } + + bio_put(bio); +} + +/** + * bio_unmap_user - unmap a bio + * @bio: the bio being unmapped + * + * Unmap a bio previously mapped by bio_map_user(). Must be called with + * a process context. + * + * bio_unmap_user() may sleep. + */ +void bio_unmap_user(struct bio *bio) +{ + __bio_unmap_user(bio); + bio_put(bio); +} +EXPORT_SYMBOL(bio_unmap_user); + +static void bio_map_kern_endio(struct bio *bio, int err) +{ + bio_put(bio); +} + +/** + * bio_map_kern - map kernel address into bio + * @q: the struct request_queue for the bio + * @data: pointer to buffer to map + * @len: length in bytes + * @gfp_mask: allocation flags for bio allocation + * + * Map the kernel address into a bio suitable for io to a block + * device. Returns an error pointer in case of error. + */ +struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len, + gfp_t gfp_mask) +{ + unsigned long kaddr = (unsigned long)data; + unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; + unsigned long start = kaddr >> PAGE_SHIFT; + const int nr_pages = end - start; + int offset, i; + struct bio *bio; + + bio = bio_kmalloc(gfp_mask, nr_pages); + if (!bio) + return ERR_PTR(-ENOMEM); + + offset = offset_in_page(kaddr); + for (i = 0; i < nr_pages; i++) { + unsigned int bytes = PAGE_SIZE - offset; + + if (len <= 0) + break; + + if (bytes > len) + bytes = len; + + if (bio_add_pc_page(q, bio, virt_to_page(data), bytes, + offset) < bytes) { + /* we don't support partial mappings */ + bio_put(bio); + return ERR_PTR(-EINVAL); + } + + data += bytes; + len -= bytes; + offset = 0; + } + + bio->bi_end_io = bio_map_kern_endio; + return bio; +} +EXPORT_SYMBOL(bio_map_kern); + +static void bio_copy_kern_endio(struct bio *bio, int err) +{ + bio_free_pages(bio); + bio_put(bio); +} + +static void bio_copy_kern_endio_read(struct bio *bio, int err) +{ + char *p = bio->bi_private; + struct bio_vec *bvec; + int i; + + bio_for_each_segment_all(bvec, bio, i) { + memcpy(p, page_address(bvec->bv_page), bvec->bv_len); + p += bvec->bv_len; + } + + bio_copy_kern_endio(bio, err); +} + +/** + * bio_copy_kern - copy kernel address into bio + * @q: the struct request_queue for the bio + * @data: pointer to buffer to copy + * @len: length in bytes + * @gfp_mask: allocation flags for bio and page allocation + * @reading: data direction is READ + * + * copy the kernel address into a bio suitable for io to a block + * device. Returns an error pointer in case of error. + */ +struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len, + gfp_t gfp_mask, int reading) +{ + unsigned long kaddr = (unsigned long)data; + unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; + unsigned long start = kaddr >> PAGE_SHIFT; + struct bio *bio; + void *p = data; + int nr_pages = 0; + + /* + * Overflow, abort + */ + if (end < start) + return ERR_PTR(-EINVAL); + + nr_pages = end - start; + bio = bio_kmalloc(gfp_mask, nr_pages); + if (!bio) + return ERR_PTR(-ENOMEM); + + while (len) { + struct page *page; + unsigned int bytes = PAGE_SIZE; + + if (bytes > len) + bytes = len; + + page = alloc_page(q->bounce_gfp | gfp_mask); + if (!page) + goto cleanup; + + if (!reading) + memcpy(page_address(page), p, bytes); + + if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) + break; + + len -= bytes; + p += bytes; + } + + if (reading) { + bio->bi_end_io = bio_copy_kern_endio_read; + bio->bi_private = data; + } else { + bio->bi_end_io = bio_copy_kern_endio; + bio->bi_rw |= REQ_WRITE; + } + + return bio; + +cleanup: + bio_free_pages(bio); + bio_put(bio); + return ERR_PTR(-ENOMEM); +} +EXPORT_SYMBOL(bio_copy_kern); + +/* + * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions + * for performing direct-IO in BIOs. + * + * The problem is that we cannot run set_page_dirty() from interrupt context + * because the required locks are not interrupt-safe. So what we can do is to + * mark the pages dirty _before_ performing IO. And in interrupt context, + * check that the pages are still dirty. If so, fine. If not, redirty them + * in process context. + * + * We special-case compound pages here: normally this means reads into hugetlb + * pages. The logic in here doesn't really work right for compound pages + * because the VM does not uniformly chase down the head page in all cases. + * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't + * handle them at all. So we skip compound pages here at an early stage. + * + * Note that this code is very hard to test under normal circumstances because + * direct-io pins the pages with get_user_pages(). This makes + * is_page_cache_freeable return false, and the VM will not clean the pages. + * But other code (eg, flusher threads) could clean the pages if they are mapped + * pagecache. + * + * Simply disabling the call to bio_set_pages_dirty() is a good way to test the + * deferred bio dirtying paths. + */ + +/* + * bio_set_pages_dirty() will mark all the bio's pages as dirty. + */ +void bio_set_pages_dirty(struct bio *bio) +{ + struct bio_vec *bvec; + int i; + + bio_for_each_segment_all(bvec, bio, i) { + struct page *page = bvec->bv_page; + + if (page && !PageCompound(page)) + set_page_dirty_lock(page); + } +} + +static void bio_release_pages(struct bio *bio) +{ + struct bio_vec *bvec; + int i; + + bio_for_each_segment_all(bvec, bio, i) { + struct page *page = bvec->bv_page; + + if (page) + put_page(page); + } +} + +/* + * bio_check_pages_dirty() will check that all the BIO's pages are still dirty. + * If they are, then fine. If, however, some pages are clean then they must + * have been written out during the direct-IO read. So we take another ref on + * the BIO and the offending pages and re-dirty the pages in process context. + * + * It is expected that bio_check_pages_dirty() will wholly own the BIO from + * here on. It will run one page_cache_release() against each page and will + * run one bio_put() against the BIO. + */ + +static void bio_dirty_fn(struct work_struct *work); + +static DECLARE_WORK(bio_dirty_work, bio_dirty_fn); +static DEFINE_SPINLOCK(bio_dirty_lock); +static struct bio *bio_dirty_list; + +/* + * This runs in process context + */ +static void bio_dirty_fn(struct work_struct *work) +{ + unsigned long flags; + struct bio *bio; + + spin_lock_irqsave(&bio_dirty_lock, flags); + bio = bio_dirty_list; + bio_dirty_list = NULL; + spin_unlock_irqrestore(&bio_dirty_lock, flags); + + while (bio) { + struct bio *next = bio->bi_private; + + bio_set_pages_dirty(bio); + bio_release_pages(bio); + bio_put(bio); + bio = next; + } +} + +void bio_check_pages_dirty(struct bio *bio) +{ + struct bio_vec *bvec; + int nr_clean_pages = 0; + int i; + + bio_for_each_segment_all(bvec, bio, i) { + struct page *page = bvec->bv_page; + + if (PageDirty(page) || PageCompound(page)) { + page_cache_release(page); + bvec->bv_page = NULL; + } else { + nr_clean_pages++; + } + } + + if (nr_clean_pages) { + unsigned long flags; + + spin_lock_irqsave(&bio_dirty_lock, flags); + bio->bi_private = bio_dirty_list; + bio_dirty_list = bio; + spin_unlock_irqrestore(&bio_dirty_lock, flags); + schedule_work(&bio_dirty_work); + } else { + bio_put(bio); + } +} + +void generic_start_io_acct(int rw, unsigned long sectors, + struct hd_struct *part) +{ + int cpu = part_stat_lock(); + + part_round_stats(cpu, part); + part_stat_inc(cpu, part, ios[rw]); + part_stat_add(cpu, part, sectors[rw], sectors); + part_inc_in_flight(part, rw); + + part_stat_unlock(); +} +EXPORT_SYMBOL(generic_start_io_acct); + +void generic_end_io_acct(int rw, struct hd_struct *part, + unsigned long start_time) +{ + unsigned long duration = jiffies - start_time; + int cpu = part_stat_lock(); + + part_stat_add(cpu, part, ticks[rw], duration); + part_round_stats(cpu, part); + part_dec_in_flight(part, rw); + + part_stat_unlock(); +} +EXPORT_SYMBOL(generic_end_io_acct); + +#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE +void bio_flush_dcache_pages(struct bio *bi) +{ + struct bio_vec bvec; + struct bvec_iter iter; + + bio_for_each_segment(bvec, bi, iter) + flush_dcache_page(bvec.bv_page); +} +EXPORT_SYMBOL(bio_flush_dcache_pages); +#endif + +/** + * bio_endio - end I/O on a bio + * @bio: bio + * @error: error, if any + * + * Description: + * bio_endio() will end I/O on the whole bio. bio_endio() is the + * preferred way to end I/O on a bio, it takes care of clearing + * BIO_UPTODATE on error. @error is 0 on success, and and one of the + * established -Exxxx (-EIO, for instance) error values in case + * something went wrong. No one should call bi_end_io() directly on a + * bio unless they own it and thus know that it has an end_io + * function. + **/ +void bio_endio(struct bio *bio, int error) +{ + while (bio) { + BUG_ON(atomic_read(&bio->bi_remaining) <= 0); + + if (error) + clear_bit(BIO_UPTODATE, &bio->bi_flags); + else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) + error = -EIO; + + if (!atomic_dec_and_test(&bio->bi_remaining)) + return; + + /* + * Need to have a real endio function for chained bios, + * otherwise various corner cases will break (like stacking + * block devices that save/restore bi_end_io) - however, we want + * to avoid unbounded recursion and blowing the stack. Tail call + * optimization would handle this, but compiling with frame + * pointers also disables gcc's sibling call optimization. + */ + if (bio->bi_end_io == bio_chain_endio) { + struct bio *parent = bio->bi_private; + bio_put(bio); + bio = parent; + } else { + if (bio->bi_end_io) + bio->bi_end_io(bio, error); + bio = NULL; + } + } +} +EXPORT_SYMBOL(bio_endio); + +/** + * bio_endio_nodec - end I/O on a bio, without decrementing bi_remaining + * @bio: bio + * @error: error, if any + * + * For code that has saved and restored bi_end_io; thing hard before using this + * function, probably you should've cloned the entire bio. + **/ +void bio_endio_nodec(struct bio *bio, int error) +{ + atomic_inc(&bio->bi_remaining); + bio_endio(bio, error); +} +EXPORT_SYMBOL(bio_endio_nodec); + +/** + * bio_split - split a bio + * @bio: bio to split + * @sectors: number of sectors to split from the front of @bio + * @gfp: gfp mask + * @bs: bio set to allocate from + * + * Allocates and returns a new bio which represents @sectors from the start of + * @bio, and updates @bio to represent the remaining sectors. + * + * Unless this is a discard request the newly allocated bio will point + * to @bio's bi_io_vec; it is the caller's responsibility to ensure that + * @bio is not freed before the split. + */ +struct bio *bio_split(struct bio *bio, int sectors, + gfp_t gfp, struct bio_set *bs) +{ + struct bio *split = NULL; + + BUG_ON(sectors <= 0); + BUG_ON(sectors >= bio_sectors(bio)); + + /* + * Discards need a mutable bio_vec to accommodate the payload + * required by the DSM TRIM and UNMAP commands. + */ + if (bio->bi_rw & REQ_DISCARD) + split = bio_clone_bioset(bio, gfp, bs); + else + split = bio_clone_fast(bio, gfp, bs); + + if (!split) + return NULL; + + split->bi_iter.bi_size = sectors << 9; + + if (bio_integrity(split)) + bio_integrity_trim(split, 0, sectors); + + bio_advance(bio, split->bi_iter.bi_size); + + return split; +} +EXPORT_SYMBOL(bio_split); + +/** + * bio_trim - trim a bio + * @bio: bio to trim + * @offset: number of sectors to trim from the front of @bio + * @size: size we want to trim @bio to, in sectors + */ +void bio_trim(struct bio *bio, int offset, int size) +{ + /* 'bio' is a cloned bio which we need to trim to match + * the given offset and size. + */ + + size <<= 9; + if (offset == 0 && size == bio->bi_iter.bi_size) + return; + + clear_bit(BIO_SEG_VALID, &bio->bi_flags); + + bio_advance(bio, offset << 9); + + bio->bi_iter.bi_size = size; +} +EXPORT_SYMBOL_GPL(bio_trim); + +/* + * create memory pools for biovec's in a bio_set. + * use the global biovec slabs created for general use. + */ +mempool_t *biovec_create_pool(int pool_entries) +{ + struct biovec_slab *bp = bvec_slabs + BIOVEC_MAX_IDX; + + return mempool_create_slab_pool(pool_entries, bp->slab); +} + +void bioset_free(struct bio_set *bs) +{ + if (bs->rescue_workqueue) + destroy_workqueue(bs->rescue_workqueue); + + if (bs->bio_pool) + mempool_destroy(bs->bio_pool); + + if (bs->bvec_pool) + mempool_destroy(bs->bvec_pool); + + bioset_integrity_free(bs); + bio_put_slab(bs); + + kfree(bs); +} +EXPORT_SYMBOL(bioset_free); + +static struct bio_set *__bioset_create(unsigned int pool_size, + unsigned int front_pad, + bool create_bvec_pool) +{ + unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec); + struct bio_set *bs; + + bs = kzalloc(sizeof(*bs), GFP_KERNEL); + if (!bs) + return NULL; + + bs->front_pad = front_pad; + + spin_lock_init(&bs->rescue_lock); + bio_list_init(&bs->rescue_list); + INIT_WORK(&bs->rescue_work, bio_alloc_rescue); + + bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad); + if (!bs->bio_slab) { + kfree(bs); + return NULL; + } + + bs->bio_pool = mempool_create_slab_pool(pool_size, bs->bio_slab); + if (!bs->bio_pool) + goto bad; + + if (create_bvec_pool) { + bs->bvec_pool = biovec_create_pool(pool_size); + if (!bs->bvec_pool) + goto bad; + } + + bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0); + if (!bs->rescue_workqueue) + goto bad; + + return bs; +bad: + bioset_free(bs); + return NULL; +} + +/** + * bioset_create - Create a bio_set + * @pool_size: Number of bio and bio_vecs to cache in the mempool + * @front_pad: Number of bytes to allocate in front of the returned bio + * + * Description: + * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller + * to ask for a number of bytes to be allocated in front of the bio. + * Front pad allocation is useful for embedding the bio inside + * another structure, to avoid allocating extra data to go with the bio. + * Note that the bio must be embedded at the END of that structure always, + * or things will break badly. + */ +struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad) +{ + return __bioset_create(pool_size, front_pad, true); +} +EXPORT_SYMBOL(bioset_create); + +/** + * bioset_create_nobvec - Create a bio_set without bio_vec mempool + * @pool_size: Number of bio to cache in the mempool + * @front_pad: Number of bytes to allocate in front of the returned bio + * + * Description: + * Same functionality as bioset_create() except that mempool is not + * created for bio_vecs. Saving some memory for bio_clone_fast() users. + */ +struct bio_set *bioset_create_nobvec(unsigned int pool_size, unsigned int front_pad) +{ + return __bioset_create(pool_size, front_pad, false); +} +EXPORT_SYMBOL(bioset_create_nobvec); + +#ifdef CONFIG_BLK_CGROUP +/** + * bio_associate_current - associate a bio with %current + * @bio: target bio + * + * Associate @bio with %current if it hasn't been associated yet. Block + * layer will treat @bio as if it were issued by %current no matter which + * task actually issues it. + * + * This function takes an extra reference of @task's io_context and blkcg + * which will be put when @bio is released. The caller must own @bio, + * ensure %current->io_context exists, and is responsible for synchronizing + * calls to this function. + */ +int bio_associate_current(struct bio *bio) +{ + struct io_context *ioc; + struct cgroup_subsys_state *css; + + if (bio->bi_ioc) + return -EBUSY; + + ioc = current->io_context; + if (!ioc) + return -ENOENT; + + /* acquire active ref on @ioc and associate */ + get_io_context_active(ioc); + bio->bi_ioc = ioc; + + /* associate blkcg if exists */ + rcu_read_lock(); + css = task_css(current, blkio_cgrp_id); + if (css && css_tryget_online(css)) + bio->bi_css = css; + rcu_read_unlock(); + + return 0; +} + +/** + * bio_disassociate_task - undo bio_associate_current() + * @bio: target bio + */ +void bio_disassociate_task(struct bio *bio) +{ + if (bio->bi_ioc) { + put_io_context(bio->bi_ioc); + bio->bi_ioc = NULL; + } + if (bio->bi_css) { + css_put(bio->bi_css); + bio->bi_css = NULL; + } +} + +#endif /* CONFIG_BLK_CGROUP */ + +static void __init biovec_init_slabs(void) +{ + int i; + + for (i = 0; i < BIOVEC_NR_POOLS; i++) { + int size; + struct biovec_slab *bvs = bvec_slabs + i; + + if (bvs->nr_vecs <= BIO_INLINE_VECS) { + bvs->slab = NULL; + continue; + } + + size = bvs->nr_vecs * sizeof(struct bio_vec); + bvs->slab = kmem_cache_create(bvs->name, size, 0, + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); + } +} + +static int __init init_bio(void) +{ + bio_slab_max = 2; + bio_slab_nr = 0; + bio_slabs = kzalloc(bio_slab_max * sizeof(struct bio_slab), GFP_KERNEL); + if (!bio_slabs) + panic("bio: can't allocate bios\n"); + + bio_integrity_init(); + biovec_init_slabs(); + + fs_bio_set = bioset_create(BIO_POOL_SIZE, 0); + if (!fs_bio_set) + panic("bio: can't allocate bios\n"); + + if (bioset_integrity_create(fs_bio_set, BIO_POOL_SIZE)) + panic("bio: can't create integrity pool\n"); + + return 0; +} +subsys_initcall(init_bio); diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c new file mode 100644 index 000000000..0ac817b75 --- /dev/null +++ b/block/blk-cgroup.c @@ -0,0 +1,1160 @@ +/* + * Common Block IO controller cgroup interface + * + * Based on ideas and code from CFQ, CFS and BFQ: + * Copyright (C) 2003 Jens Axboe + * + * Copyright (C) 2008 Fabio Checconi + * Paolo Valente + * + * Copyright (C) 2009 Vivek Goyal + * Nauman Rafique + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "blk-cgroup.h" +#include "blk.h" + +#define MAX_KEY_LEN 100 + +static DEFINE_MUTEX(blkcg_pol_mutex); + +struct blkcg blkcg_root = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT, + .cfq_leaf_weight = 2 * CFQ_WEIGHT_DEFAULT, }; +EXPORT_SYMBOL_GPL(blkcg_root); + +static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS]; + +static bool blkcg_policy_enabled(struct request_queue *q, + const struct blkcg_policy *pol) +{ + return pol && test_bit(pol->plid, q->blkcg_pols); +} + +/** + * blkg_free - free a blkg + * @blkg: blkg to free + * + * Free @blkg which may be partially allocated. + */ +static void blkg_free(struct blkcg_gq *blkg) +{ + int i; + + if (!blkg) + return; + + for (i = 0; i < BLKCG_MAX_POLS; i++) + kfree(blkg->pd[i]); + + blk_exit_rl(&blkg->rl); + kfree(blkg); +} + +/** + * blkg_alloc - allocate a blkg + * @blkcg: block cgroup the new blkg is associated with + * @q: request_queue the new blkg is associated with + * @gfp_mask: allocation mask to use + * + * Allocate a new blkg assocating @blkcg and @q. + */ +static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q, + gfp_t gfp_mask) +{ + struct blkcg_gq *blkg; + int i; + + /* alloc and init base part */ + blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node); + if (!blkg) + return NULL; + + blkg->q = q; + INIT_LIST_HEAD(&blkg->q_node); + blkg->blkcg = blkcg; + atomic_set(&blkg->refcnt, 1); + + /* root blkg uses @q->root_rl, init rl only for !root blkgs */ + if (blkcg != &blkcg_root) { + if (blk_init_rl(&blkg->rl, q, gfp_mask)) + goto err_free; + blkg->rl.blkg = blkg; + } + + for (i = 0; i < BLKCG_MAX_POLS; i++) { + struct blkcg_policy *pol = blkcg_policy[i]; + struct blkg_policy_data *pd; + + if (!blkcg_policy_enabled(q, pol)) + continue; + + /* alloc per-policy data and attach it to blkg */ + pd = kzalloc_node(pol->pd_size, gfp_mask, q->node); + if (!pd) + goto err_free; + + blkg->pd[i] = pd; + pd->blkg = blkg; + pd->plid = i; + } + + return blkg; + +err_free: + blkg_free(blkg); + return NULL; +} + +/** + * __blkg_lookup - internal version of blkg_lookup() + * @blkcg: blkcg of interest + * @q: request_queue of interest + * @update_hint: whether to update lookup hint with the result or not + * + * This is internal version and shouldn't be used by policy + * implementations. Looks up blkgs for the @blkcg - @q pair regardless of + * @q's bypass state. If @update_hint is %true, the caller should be + * holding @q->queue_lock and lookup hint is updated on success. + */ +struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q, + bool update_hint) +{ + struct blkcg_gq *blkg; + + blkg = rcu_dereference(blkcg->blkg_hint); + if (blkg && blkg->q == q) + return blkg; + + /* + * Hint didn't match. Look up from the radix tree. Note that the + * hint can only be updated under queue_lock as otherwise @blkg + * could have already been removed from blkg_tree. The caller is + * responsible for grabbing queue_lock if @update_hint. + */ + blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id); + if (blkg && blkg->q == q) { + if (update_hint) { + lockdep_assert_held(q->queue_lock); + rcu_assign_pointer(blkcg->blkg_hint, blkg); + } + return blkg; + } + + return NULL; +} + +/** + * blkg_lookup - lookup blkg for the specified blkcg - q pair + * @blkcg: blkcg of interest + * @q: request_queue of interest + * + * Lookup blkg for the @blkcg - @q pair. This function should be called + * under RCU read lock and is guaranteed to return %NULL if @q is bypassing + * - see blk_queue_bypass_start() for details. + */ +struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q) +{ + WARN_ON_ONCE(!rcu_read_lock_held()); + + if (unlikely(blk_queue_bypass(q))) + return NULL; + return __blkg_lookup(blkcg, q, false); +} +EXPORT_SYMBOL_GPL(blkg_lookup); + +/* + * If @new_blkg is %NULL, this function tries to allocate a new one as + * necessary using %GFP_ATOMIC. @new_blkg is always consumed on return. + */ +static struct blkcg_gq *blkg_create(struct blkcg *blkcg, + struct request_queue *q, + struct blkcg_gq *new_blkg) +{ + struct blkcg_gq *blkg; + int i, ret; + + WARN_ON_ONCE(!rcu_read_lock_held()); + lockdep_assert_held(q->queue_lock); + + /* blkg holds a reference to blkcg */ + if (!css_tryget_online(&blkcg->css)) { + ret = -EINVAL; + goto err_free_blkg; + } + + /* allocate */ + if (!new_blkg) { + new_blkg = blkg_alloc(blkcg, q, GFP_ATOMIC); + if (unlikely(!new_blkg)) { + ret = -ENOMEM; + goto err_put_css; + } + } + blkg = new_blkg; + + /* link parent */ + if (blkcg_parent(blkcg)) { + blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false); + if (WARN_ON_ONCE(!blkg->parent)) { + ret = -EINVAL; + goto err_put_css; + } + blkg_get(blkg->parent); + } + + /* invoke per-policy init */ + for (i = 0; i < BLKCG_MAX_POLS; i++) { + struct blkcg_policy *pol = blkcg_policy[i]; + + if (blkg->pd[i] && pol->pd_init_fn) + pol->pd_init_fn(blkg); + } + + /* insert */ + spin_lock(&blkcg->lock); + ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg); + if (likely(!ret)) { + hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); + list_add(&blkg->q_node, &q->blkg_list); + + for (i = 0; i < BLKCG_MAX_POLS; i++) { + struct blkcg_policy *pol = blkcg_policy[i]; + + if (blkg->pd[i] && pol->pd_online_fn) + pol->pd_online_fn(blkg); + } + } + blkg->online = true; + spin_unlock(&blkcg->lock); + + if (!ret) { + if (blkcg == &blkcg_root) { + q->root_blkg = blkg; + q->root_rl.blkg = blkg; + } + return blkg; + } + + /* @blkg failed fully initialized, use the usual release path */ + blkg_put(blkg); + return ERR_PTR(ret); + +err_put_css: + css_put(&blkcg->css); +err_free_blkg: + blkg_free(new_blkg); + return ERR_PTR(ret); +} + +/** + * blkg_lookup_create - lookup blkg, try to create one if not there + * @blkcg: blkcg of interest + * @q: request_queue of interest + * + * Lookup blkg for the @blkcg - @q pair. If it doesn't exist, try to + * create one. blkg creation is performed recursively from blkcg_root such + * that all non-root blkg's have access to the parent blkg. This function + * should be called under RCU read lock and @q->queue_lock. + * + * Returns pointer to the looked up or created blkg on success, ERR_PTR() + * value on error. If @q is dead, returns ERR_PTR(-EINVAL). If @q is not + * dead and bypassing, returns ERR_PTR(-EBUSY). + */ +struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, + struct request_queue *q) +{ + struct blkcg_gq *blkg; + + WARN_ON_ONCE(!rcu_read_lock_held()); + lockdep_assert_held(q->queue_lock); + + /* + * This could be the first entry point of blkcg implementation and + * we shouldn't allow anything to go through for a bypassing queue. + */ + if (unlikely(blk_queue_bypass(q))) + return ERR_PTR(blk_queue_dying(q) ? -EINVAL : -EBUSY); + + blkg = __blkg_lookup(blkcg, q, true); + if (blkg) + return blkg; + + /* + * Create blkgs walking down from blkcg_root to @blkcg, so that all + * non-root blkgs have access to their parents. + */ + while (true) { + struct blkcg *pos = blkcg; + struct blkcg *parent = blkcg_parent(blkcg); + + while (parent && !__blkg_lookup(parent, q, false)) { + pos = parent; + parent = blkcg_parent(parent); + } + + blkg = blkg_create(pos, q, NULL); + if (pos == blkcg || IS_ERR(blkg)) + return blkg; + } +} +EXPORT_SYMBOL_GPL(blkg_lookup_create); + +static void blkg_destroy(struct blkcg_gq *blkg) +{ + struct blkcg *blkcg = blkg->blkcg; + int i; + + lockdep_assert_held(blkg->q->queue_lock); + lockdep_assert_held(&blkcg->lock); + + /* Something wrong if we are trying to remove same group twice */ + WARN_ON_ONCE(list_empty(&blkg->q_node)); + WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node)); + + for (i = 0; i < BLKCG_MAX_POLS; i++) { + struct blkcg_policy *pol = blkcg_policy[i]; + + if (blkg->pd[i] && pol->pd_offline_fn) + pol->pd_offline_fn(blkg); + } + blkg->online = false; + + radix_tree_delete(&blkcg->blkg_tree, blkg->q->id); + list_del_init(&blkg->q_node); + hlist_del_init_rcu(&blkg->blkcg_node); + + /* + * Both setting lookup hint to and clearing it from @blkg are done + * under queue_lock. If it's not pointing to @blkg now, it never + * will. Hint assignment itself can race safely. + */ + if (rcu_access_pointer(blkcg->blkg_hint) == blkg) + rcu_assign_pointer(blkcg->blkg_hint, NULL); + + /* + * If root blkg is destroyed. Just clear the pointer since root_rl + * does not take reference on root blkg. + */ + if (blkcg == &blkcg_root) { + blkg->q->root_blkg = NULL; + blkg->q->root_rl.blkg = NULL; + } + + /* + * Put the reference taken at the time of creation so that when all + * queues are gone, group can be destroyed. + */ + blkg_put(blkg); +} + +/** + * blkg_destroy_all - destroy all blkgs associated with a request_queue + * @q: request_queue of interest + * + * Destroy all blkgs associated with @q. + */ +static void blkg_destroy_all(struct request_queue *q) +{ + struct blkcg_gq *blkg, *n; + + lockdep_assert_held(q->queue_lock); + + list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { + struct blkcg *blkcg = blkg->blkcg; + + spin_lock(&blkcg->lock); + blkg_destroy(blkg); + spin_unlock(&blkcg->lock); + } +} + +/* + * A group is RCU protected, but having an rcu lock does not mean that one + * can access all the fields of blkg and assume these are valid. For + * example, don't try to follow throtl_data and request queue links. + * + * Having a reference to blkg under an rcu allows accesses to only values + * local to groups like group stats and group rate limits. + */ +void __blkg_release_rcu(struct rcu_head *rcu_head) +{ + struct blkcg_gq *blkg = container_of(rcu_head, struct blkcg_gq, rcu_head); + int i; + + /* tell policies that this one is being freed */ + for (i = 0; i < BLKCG_MAX_POLS; i++) { + struct blkcg_policy *pol = blkcg_policy[i]; + + if (blkg->pd[i] && pol->pd_exit_fn) + pol->pd_exit_fn(blkg); + } + + /* release the blkcg and parent blkg refs this blkg has been holding */ + css_put(&blkg->blkcg->css); + if (blkg->parent) + blkg_put(blkg->parent); + + blkg_free(blkg); +} +EXPORT_SYMBOL_GPL(__blkg_release_rcu); + +/* + * The next function used by blk_queue_for_each_rl(). It's a bit tricky + * because the root blkg uses @q->root_rl instead of its own rl. + */ +struct request_list *__blk_queue_next_rl(struct request_list *rl, + struct request_queue *q) +{ + struct list_head *ent; + struct blkcg_gq *blkg; + + /* + * Determine the current blkg list_head. The first entry is + * root_rl which is off @q->blkg_list and mapped to the head. + */ + if (rl == &q->root_rl) { + ent = &q->blkg_list; + /* There are no more block groups, hence no request lists */ + if (list_empty(ent)) + return NULL; + } else { + blkg = container_of(rl, struct blkcg_gq, rl); + ent = &blkg->q_node; + } + + /* walk to the next list_head, skip root blkcg */ + ent = ent->next; + if (ent == &q->root_blkg->q_node) + ent = ent->next; + if (ent == &q->blkg_list) + return NULL; + + blkg = container_of(ent, struct blkcg_gq, q_node); + return &blkg->rl; +} + +static int blkcg_reset_stats(struct cgroup_subsys_state *css, + struct cftype *cftype, u64 val) +{ + struct blkcg *blkcg = css_to_blkcg(css); + struct blkcg_gq *blkg; + int i; + + /* + * XXX: We invoke cgroup_add/rm_cftypes() under blkcg_pol_mutex + * which ends up putting cgroup's internal cgroup_tree_mutex under + * it; however, cgroup_tree_mutex is nested above cgroup file + * active protection and grabbing blkcg_pol_mutex from a cgroup + * file operation creates a possible circular dependency. cgroup + * internal locking is planned to go through further simplification + * and this issue should go away soon. For now, let's trylock + * blkcg_pol_mutex and restart the write on failure. + * + * http://lkml.kernel.org/g/5363C04B.4010400@oracle.com + */ + if (!mutex_trylock(&blkcg_pol_mutex)) + return restart_syscall(); + spin_lock_irq(&blkcg->lock); + + /* + * Note that stat reset is racy - it doesn't synchronize against + * stat updates. This is a debug feature which shouldn't exist + * anyway. If you get hit by a race, retry. + */ + hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { + for (i = 0; i < BLKCG_MAX_POLS; i++) { + struct blkcg_policy *pol = blkcg_policy[i]; + + if (blkcg_policy_enabled(blkg->q, pol) && + pol->pd_reset_stats_fn) + pol->pd_reset_stats_fn(blkg); + } + } + + spin_unlock_irq(&blkcg->lock); + mutex_unlock(&blkcg_pol_mutex); + return 0; +} + +static const char *blkg_dev_name(struct blkcg_gq *blkg) +{ + /* some drivers (floppy) instantiate a queue w/o disk registered */ + if (blkg->q->backing_dev_info.dev) + return dev_name(blkg->q->backing_dev_info.dev); + return NULL; +} + +/** + * blkcg_print_blkgs - helper for printing per-blkg data + * @sf: seq_file to print to + * @blkcg: blkcg of interest + * @prfill: fill function to print out a blkg + * @pol: policy in question + * @data: data to be passed to @prfill + * @show_total: to print out sum of prfill return values or not + * + * This function invokes @prfill on each blkg of @blkcg if pd for the + * policy specified by @pol exists. @prfill is invoked with @sf, the + * policy data and @data and the matching queue lock held. If @show_total + * is %true, the sum of the return values from @prfill is printed with + * "Total" label at the end. + * + * This is to be used to construct print functions for + * cftype->read_seq_string method. + */ +void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, + u64 (*prfill)(struct seq_file *, + struct blkg_policy_data *, int), + const struct blkcg_policy *pol, int data, + bool show_total) +{ + struct blkcg_gq *blkg; + u64 total = 0; + + rcu_read_lock(); + hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { + spin_lock_irq(blkg->q->queue_lock); + if (blkcg_policy_enabled(blkg->q, pol)) + total += prfill(sf, blkg->pd[pol->plid], data); + spin_unlock_irq(blkg->q->queue_lock); + } + rcu_read_unlock(); + + if (show_total) + seq_printf(sf, "Total %llu\n", (unsigned long long)total); +} +EXPORT_SYMBOL_GPL(blkcg_print_blkgs); + +/** + * __blkg_prfill_u64 - prfill helper for a single u64 value + * @sf: seq_file to print to + * @pd: policy private data of interest + * @v: value to print + * + * Print @v to @sf for the device assocaited with @pd. + */ +u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v) +{ + const char *dname = blkg_dev_name(pd->blkg); + + if (!dname) + return 0; + + seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v); + return v; +} +EXPORT_SYMBOL_GPL(__blkg_prfill_u64); + +/** + * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat + * @sf: seq_file to print to + * @pd: policy private data of interest + * @rwstat: rwstat to print + * + * Print @rwstat to @sf for the device assocaited with @pd. + */ +u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, + const struct blkg_rwstat *rwstat) +{ + static const char *rwstr[] = { + [BLKG_RWSTAT_READ] = "Read", + [BLKG_RWSTAT_WRITE] = "Write", + [BLKG_RWSTAT_SYNC] = "Sync", + [BLKG_RWSTAT_ASYNC] = "Async", + }; + const char *dname = blkg_dev_name(pd->blkg); + u64 v; + int i; + + if (!dname) + return 0; + + for (i = 0; i < BLKG_RWSTAT_NR; i++) + seq_printf(sf, "%s %s %llu\n", dname, rwstr[i], + (unsigned long long)rwstat->cnt[i]); + + v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE]; + seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v); + return v; +} +EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat); + +/** + * blkg_prfill_stat - prfill callback for blkg_stat + * @sf: seq_file to print to + * @pd: policy private data of interest + * @off: offset to the blkg_stat in @pd + * + * prfill callback for printing a blkg_stat. + */ +u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off) +{ + return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off)); +} +EXPORT_SYMBOL_GPL(blkg_prfill_stat); + +/** + * blkg_prfill_rwstat - prfill callback for blkg_rwstat + * @sf: seq_file to print to + * @pd: policy private data of interest + * @off: offset to the blkg_rwstat in @pd + * + * prfill callback for printing a blkg_rwstat. + */ +u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, + int off) +{ + struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off); + + return __blkg_prfill_rwstat(sf, pd, &rwstat); +} +EXPORT_SYMBOL_GPL(blkg_prfill_rwstat); + +/** + * blkg_stat_recursive_sum - collect hierarchical blkg_stat + * @pd: policy private data of interest + * @off: offset to the blkg_stat in @pd + * + * Collect the blkg_stat specified by @off from @pd and all its online + * descendants and return the sum. The caller must be holding the queue + * lock for online tests. + */ +u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off) +{ + struct blkcg_policy *pol = blkcg_policy[pd->plid]; + struct blkcg_gq *pos_blkg; + struct cgroup_subsys_state *pos_css; + u64 sum = 0; + + lockdep_assert_held(pd->blkg->q->queue_lock); + + rcu_read_lock(); + blkg_for_each_descendant_pre(pos_blkg, pos_css, pd_to_blkg(pd)) { + struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol); + struct blkg_stat *stat = (void *)pos_pd + off; + + if (pos_blkg->online) + sum += blkg_stat_read(stat); + } + rcu_read_unlock(); + + return sum; +} +EXPORT_SYMBOL_GPL(blkg_stat_recursive_sum); + +/** + * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat + * @pd: policy private data of interest + * @off: offset to the blkg_stat in @pd + * + * Collect the blkg_rwstat specified by @off from @pd and all its online + * descendants and return the sum. The caller must be holding the queue + * lock for online tests. + */ +struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd, + int off) +{ + struct blkcg_policy *pol = blkcg_policy[pd->plid]; + struct blkcg_gq *pos_blkg; + struct cgroup_subsys_state *pos_css; + struct blkg_rwstat sum = { }; + int i; + + lockdep_assert_held(pd->blkg->q->queue_lock); + + rcu_read_lock(); + blkg_for_each_descendant_pre(pos_blkg, pos_css, pd_to_blkg(pd)) { + struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol); + struct blkg_rwstat *rwstat = (void *)pos_pd + off; + struct blkg_rwstat tmp; + + if (!pos_blkg->online) + continue; + + tmp = blkg_rwstat_read(rwstat); + + for (i = 0; i < BLKG_RWSTAT_NR; i++) + sum.cnt[i] += tmp.cnt[i]; + } + rcu_read_unlock(); + + return sum; +} +EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum); + +/** + * blkg_conf_prep - parse and prepare for per-blkg config update + * @blkcg: target block cgroup + * @pol: target policy + * @input: input string + * @ctx: blkg_conf_ctx to be filled + * + * Parse per-blkg config update from @input and initialize @ctx with the + * result. @ctx->blkg points to the blkg to be updated and @ctx->v the new + * value. This function returns with RCU read lock and queue lock held and + * must be paired with blkg_conf_finish(). + */ +int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, + const char *input, struct blkg_conf_ctx *ctx) + __acquires(rcu) __acquires(disk->queue->queue_lock) +{ + struct gendisk *disk; + struct blkcg_gq *blkg; + unsigned int major, minor; + unsigned long long v; + int part, ret; + + if (sscanf(input, "%u:%u %llu", &major, &minor, &v) != 3) + return -EINVAL; + + disk = get_gendisk(MKDEV(major, minor), &part); + if (!disk || part) + return -EINVAL; + + rcu_read_lock(); + spin_lock_irq(disk->queue->queue_lock); + + if (blkcg_policy_enabled(disk->queue, pol)) + blkg = blkg_lookup_create(blkcg, disk->queue); + else + blkg = ERR_PTR(-EINVAL); + + if (IS_ERR(blkg)) { + ret = PTR_ERR(blkg); + rcu_read_unlock(); + spin_unlock_irq(disk->queue->queue_lock); + put_disk(disk); + /* + * If queue was bypassing, we should retry. Do so after a + * short msleep(). It isn't strictly necessary but queue + * can be bypassing for some time and it's always nice to + * avoid busy looping. + */ + if (ret == -EBUSY) { + msleep(10); + ret = restart_syscall(); + } + return ret; + } + + ctx->disk = disk; + ctx->blkg = blkg; + ctx->v = v; + return 0; +} +EXPORT_SYMBOL_GPL(blkg_conf_prep); + +/** + * blkg_conf_finish - finish up per-blkg config update + * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep() + * + * Finish up after per-blkg config update. This function must be paired + * with blkg_conf_prep(). + */ +void blkg_conf_finish(struct blkg_conf_ctx *ctx) + __releases(ctx->disk->queue->queue_lock) __releases(rcu) +{ + spin_unlock_irq(ctx->disk->queue->queue_lock); + rcu_read_unlock(); + put_disk(ctx->disk); +} +EXPORT_SYMBOL_GPL(blkg_conf_finish); + +struct cftype blkcg_files[] = { + { + .name = "reset_stats", + .write_u64 = blkcg_reset_stats, + }, + { } /* terminate */ +}; + +/** + * blkcg_css_offline - cgroup css_offline callback + * @css: css of interest + * + * This function is called when @css is about to go away and responsible + * for shooting down all blkgs associated with @css. blkgs should be + * removed while holding both q and blkcg locks. As blkcg lock is nested + * inside q lock, this function performs reverse double lock dancing. + * + * This is the blkcg counterpart of ioc_release_fn(). + */ +static void blkcg_css_offline(struct cgroup_subsys_state *css) +{ + struct blkcg *blkcg = css_to_blkcg(css); + + spin_lock_irq(&blkcg->lock); + + while (!hlist_empty(&blkcg->blkg_list)) { + struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first, + struct blkcg_gq, blkcg_node); + struct request_queue *q = blkg->q; + + if (spin_trylock(q->queue_lock)) { + blkg_destroy(blkg); + spin_unlock(q->queue_lock); + } else { + spin_unlock_irq(&blkcg->lock); + cpu_relax(); + spin_lock_irq(&blkcg->lock); + } + } + + spin_unlock_irq(&blkcg->lock); +} + +static void blkcg_css_free(struct cgroup_subsys_state *css) +{ + struct blkcg *blkcg = css_to_blkcg(css); + + if (blkcg != &blkcg_root) + kfree(blkcg); +} + +static struct cgroup_subsys_state * +blkcg_css_alloc(struct cgroup_subsys_state *parent_css) +{ + struct blkcg *blkcg; + + if (!parent_css) { + blkcg = &blkcg_root; + goto done; + } + + blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); + if (!blkcg) + return ERR_PTR(-ENOMEM); + + blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT; + blkcg->cfq_leaf_weight = CFQ_WEIGHT_DEFAULT; +done: + spin_lock_init(&blkcg->lock); + INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC); + INIT_HLIST_HEAD(&blkcg->blkg_list); + + return &blkcg->css; +} + +/** + * blkcg_init_queue - initialize blkcg part of request queue + * @q: request_queue to initialize + * + * Called from blk_alloc_queue_node(). Responsible for initializing blkcg + * part of new request_queue @q. + * + * RETURNS: + * 0 on success, -errno on failure. + */ +int blkcg_init_queue(struct request_queue *q) +{ + might_sleep(); + + return blk_throtl_init(q); +} + +/** + * blkcg_drain_queue - drain blkcg part of request_queue + * @q: request_queue to drain + * + * Called from blk_drain_queue(). Responsible for draining blkcg part. + */ +void blkcg_drain_queue(struct request_queue *q) +{ + lockdep_assert_held(q->queue_lock); + + /* + * @q could be exiting and already have destroyed all blkgs as + * indicated by NULL root_blkg. If so, don't confuse policies. + */ + if (!q->root_blkg) + return; + + blk_throtl_drain(q); +} + +/** + * blkcg_exit_queue - exit and release blkcg part of request_queue + * @q: request_queue being released + * + * Called from blk_release_queue(). Responsible for exiting blkcg part. + */ +void blkcg_exit_queue(struct request_queue *q) +{ + spin_lock_irq(q->queue_lock); + blkg_destroy_all(q); + spin_unlock_irq(q->queue_lock); + + blk_throtl_exit(q); +} + +/* + * We cannot support shared io contexts, as we have no mean to support + * two tasks with the same ioc in two different groups without major rework + * of the main cic data structures. For now we allow a task to change + * its cgroup only if it's the only owner of its ioc. + */ +static int blkcg_can_attach(struct cgroup_subsys_state *css, + struct cgroup_taskset *tset) +{ + struct task_struct *task; + struct io_context *ioc; + int ret = 0; + + /* task_lock() is needed to avoid races with exit_io_context() */ + cgroup_taskset_for_each(task, tset) { + task_lock(task); + ioc = task->io_context; + if (ioc && atomic_read(&ioc->nr_tasks) > 1) + ret = -EINVAL; + task_unlock(task); + if (ret) + break; + } + return ret; +} + +struct cgroup_subsys blkio_cgrp_subsys = { + .css_alloc = blkcg_css_alloc, + .css_offline = blkcg_css_offline, + .css_free = blkcg_css_free, + .can_attach = blkcg_can_attach, + .legacy_cftypes = blkcg_files, +#ifdef CONFIG_MEMCG + /* + * This ensures that, if available, memcg is automatically enabled + * together on the default hierarchy so that the owner cgroup can + * be retrieved from writeback pages. + */ + .depends_on = 1 << memory_cgrp_id, +#endif +}; +EXPORT_SYMBOL_GPL(blkio_cgrp_subsys); + +/** + * blkcg_activate_policy - activate a blkcg policy on a request_queue + * @q: request_queue of interest + * @pol: blkcg policy to activate + * + * Activate @pol on @q. Requires %GFP_KERNEL context. @q goes through + * bypass mode to populate its blkgs with policy_data for @pol. + * + * Activation happens with @q bypassed, so nobody would be accessing blkgs + * from IO path. Update of each blkg is protected by both queue and blkcg + * locks so that holding either lock and testing blkcg_policy_enabled() is + * always enough for dereferencing policy data. + * + * The caller is responsible for synchronizing [de]activations and policy + * [un]registerations. Returns 0 on success, -errno on failure. + */ +int blkcg_activate_policy(struct request_queue *q, + const struct blkcg_policy *pol) +{ + LIST_HEAD(pds); + struct blkcg_gq *blkg, *new_blkg; + struct blkg_policy_data *pd, *n; + int cnt = 0, ret; + bool preloaded; + + if (blkcg_policy_enabled(q, pol)) + return 0; + + /* preallocations for root blkg */ + new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL); + if (!new_blkg) + return -ENOMEM; + + blk_queue_bypass_start(q); + + preloaded = !radix_tree_preload(GFP_KERNEL); + + /* + * Make sure the root blkg exists and count the existing blkgs. As + * @q is bypassing at this point, blkg_lookup_create() can't be + * used. Open code it. + */ + spin_lock_irq(q->queue_lock); + + rcu_read_lock(); + blkg = __blkg_lookup(&blkcg_root, q, false); + if (blkg) + blkg_free(new_blkg); + else + blkg = blkg_create(&blkcg_root, q, new_blkg); + rcu_read_unlock(); + + if (preloaded) + radix_tree_preload_end(); + + if (IS_ERR(blkg)) { + ret = PTR_ERR(blkg); + goto out_unlock; + } + + list_for_each_entry(blkg, &q->blkg_list, q_node) + cnt++; + + spin_unlock_irq(q->queue_lock); + + /* allocate policy_data for all existing blkgs */ + while (cnt--) { + pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node); + if (!pd) { + ret = -ENOMEM; + goto out_free; + } + list_add_tail(&pd->alloc_node, &pds); + } + + /* + * Install the allocated pds. With @q bypassing, no new blkg + * should have been created while the queue lock was dropped. + */ + spin_lock_irq(q->queue_lock); + + list_for_each_entry(blkg, &q->blkg_list, q_node) { + if (WARN_ON(list_empty(&pds))) { + /* umm... this shouldn't happen, just abort */ + ret = -ENOMEM; + goto out_unlock; + } + pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node); + list_del_init(&pd->alloc_node); + + /* grab blkcg lock too while installing @pd on @blkg */ + spin_lock(&blkg->blkcg->lock); + + blkg->pd[pol->plid] = pd; + pd->blkg = blkg; + pd->plid = pol->plid; + pol->pd_init_fn(blkg); + + spin_unlock(&blkg->blkcg->lock); + } + + __set_bit(pol->plid, q->blkcg_pols); + ret = 0; +out_unlock: + spin_unlock_irq(q->queue_lock); +out_free: + blk_queue_bypass_end(q); + list_for_each_entry_safe(pd, n, &pds, alloc_node) + kfree(pd); + return ret; +} +EXPORT_SYMBOL_GPL(blkcg_activate_policy); + +/** + * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue + * @q: request_queue of interest + * @pol: blkcg policy to deactivate + * + * Deactivate @pol on @q. Follows the same synchronization rules as + * blkcg_activate_policy(). + */ +void blkcg_deactivate_policy(struct request_queue *q, + const struct blkcg_policy *pol) +{ + struct blkcg_gq *blkg; + + if (!blkcg_policy_enabled(q, pol)) + return; + + blk_queue_bypass_start(q); + spin_lock_irq(q->queue_lock); + + __clear_bit(pol->plid, q->blkcg_pols); + + /* if no policy is left, no need for blkgs - shoot them down */ + if (bitmap_empty(q->blkcg_pols, BLKCG_MAX_POLS)) + blkg_destroy_all(q); + + list_for_each_entry(blkg, &q->blkg_list, q_node) { + /* grab blkcg lock too while removing @pd from @blkg */ + spin_lock(&blkg->blkcg->lock); + + if (pol->pd_offline_fn) + pol->pd_offline_fn(blkg); + if (pol->pd_exit_fn) + pol->pd_exit_fn(blkg); + + kfree(blkg->pd[pol->plid]); + blkg->pd[pol->plid] = NULL; + + spin_unlock(&blkg->blkcg->lock); + } + + spin_unlock_irq(q->queue_lock); + blk_queue_bypass_end(q); +} +EXPORT_SYMBOL_GPL(blkcg_deactivate_policy); + +/** + * blkcg_policy_register - register a blkcg policy + * @pol: blkcg policy to register + * + * Register @pol with blkcg core. Might sleep and @pol may be modified on + * successful registration. Returns 0 on success and -errno on failure. + */ +int blkcg_policy_register(struct blkcg_policy *pol) +{ + int i, ret; + + if (WARN_ON(pol->pd_size < sizeof(struct blkg_policy_data))) + return -EINVAL; + + mutex_lock(&blkcg_pol_mutex); + + /* find an empty slot */ + ret = -ENOSPC; + for (i = 0; i < BLKCG_MAX_POLS; i++) + if (!blkcg_policy[i]) + break; + if (i >= BLKCG_MAX_POLS) + goto out_unlock; + + /* register and update blkgs */ + pol->plid = i; + blkcg_policy[i] = pol; + + /* everything is in place, add intf files for the new policy */ + if (pol->cftypes) + WARN_ON(cgroup_add_legacy_cftypes(&blkio_cgrp_subsys, + pol->cftypes)); + ret = 0; +out_unlock: + mutex_unlock(&blkcg_pol_mutex); + return ret; +} +EXPORT_SYMBOL_GPL(blkcg_policy_register); + +/** + * blkcg_policy_unregister - unregister a blkcg policy + * @pol: blkcg policy to unregister + * + * Undo blkcg_policy_register(@pol). Might sleep. + */ +void blkcg_policy_unregister(struct blkcg_policy *pol) +{ + mutex_lock(&blkcg_pol_mutex); + + if (WARN_ON(blkcg_policy[pol->plid] != pol)) + goto out_unlock; + + /* kill the intf files first */ + if (pol->cftypes) + cgroup_rm_cftypes(pol->cftypes); + + /* unregister and update blkgs */ + blkcg_policy[pol->plid] = NULL; +out_unlock: + mutex_unlock(&blkcg_pol_mutex); +} +EXPORT_SYMBOL_GPL(blkcg_policy_unregister); diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h new file mode 100644 index 000000000..c567865b5 --- /dev/null +++ b/block/blk-cgroup.h @@ -0,0 +1,603 @@ +#ifndef _BLK_CGROUP_H +#define _BLK_CGROUP_H +/* + * Common Block IO controller cgroup interface + * + * Based on ideas and code from CFQ, CFS and BFQ: + * Copyright (C) 2003 Jens Axboe + * + * Copyright (C) 2008 Fabio Checconi + * Paolo Valente + * + * Copyright (C) 2009 Vivek Goyal + * Nauman Rafique + */ + +#include +#include +#include +#include +#include +#include + +/* Max limits for throttle policy */ +#define THROTL_IOPS_MAX UINT_MAX + +/* CFQ specific, out here for blkcg->cfq_weight */ +#define CFQ_WEIGHT_MIN 10 +#define CFQ_WEIGHT_MAX 1000 +#define CFQ_WEIGHT_DEFAULT 500 + +#ifdef CONFIG_BLK_CGROUP + +enum blkg_rwstat_type { + BLKG_RWSTAT_READ, + BLKG_RWSTAT_WRITE, + BLKG_RWSTAT_SYNC, + BLKG_RWSTAT_ASYNC, + + BLKG_RWSTAT_NR, + BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR, +}; + +struct blkcg_gq; + +struct blkcg { + struct cgroup_subsys_state css; + spinlock_t lock; + + struct radix_tree_root blkg_tree; + struct blkcg_gq *blkg_hint; + struct hlist_head blkg_list; + + /* TODO: per-policy storage in blkcg */ + unsigned int cfq_weight; /* belongs to cfq */ + unsigned int cfq_leaf_weight; +}; + +struct blkg_stat { + struct u64_stats_sync syncp; + uint64_t cnt; +}; + +struct blkg_rwstat { + struct u64_stats_sync syncp; + uint64_t cnt[BLKG_RWSTAT_NR]; +}; + +/* + * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a + * request_queue (q). This is used by blkcg policies which need to track + * information per blkcg - q pair. + * + * There can be multiple active blkcg policies and each has its private + * data on each blkg, the size of which is determined by + * blkcg_policy->pd_size. blkcg core allocates and frees such areas + * together with blkg and invokes pd_init/exit_fn() methods. + * + * Such private data must embed struct blkg_policy_data (pd) at the + * beginning and pd_size can't be smaller than pd. + */ +struct blkg_policy_data { + /* the blkg and policy id this per-policy data belongs to */ + struct blkcg_gq *blkg; + int plid; + + /* used during policy activation */ + struct list_head alloc_node; +}; + +/* association between a blk cgroup and a request queue */ +struct blkcg_gq { + /* Pointer to the associated request_queue */ + struct request_queue *q; + struct list_head q_node; + struct hlist_node blkcg_node; + struct blkcg *blkcg; + + /* all non-root blkcg_gq's are guaranteed to have access to parent */ + struct blkcg_gq *parent; + + /* request allocation list for this blkcg-q pair */ + struct request_list rl; + + /* reference count */ + atomic_t refcnt; + + /* is this blkg online? protected by both blkcg and q locks */ + bool online; + + struct blkg_policy_data *pd[BLKCG_MAX_POLS]; + + struct rcu_head rcu_head; +}; + +typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg); +typedef void (blkcg_pol_online_pd_fn)(struct blkcg_gq *blkg); +typedef void (blkcg_pol_offline_pd_fn)(struct blkcg_gq *blkg); +typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg); +typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg); + +struct blkcg_policy { + int plid; + /* policy specific private data size */ + size_t pd_size; + /* cgroup files for the policy */ + struct cftype *cftypes; + + /* operations */ + blkcg_pol_init_pd_fn *pd_init_fn; + blkcg_pol_online_pd_fn *pd_online_fn; + blkcg_pol_offline_pd_fn *pd_offline_fn; + blkcg_pol_exit_pd_fn *pd_exit_fn; + blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn; +}; + +extern struct blkcg blkcg_root; + +struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q); +struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, + struct request_queue *q); +int blkcg_init_queue(struct request_queue *q); +void blkcg_drain_queue(struct request_queue *q); +void blkcg_exit_queue(struct request_queue *q); + +/* Blkio controller policy registration */ +int blkcg_policy_register(struct blkcg_policy *pol); +void blkcg_policy_unregister(struct blkcg_policy *pol); +int blkcg_activate_policy(struct request_queue *q, + const struct blkcg_policy *pol); +void blkcg_deactivate_policy(struct request_queue *q, + const struct blkcg_policy *pol); + +void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, + u64 (*prfill)(struct seq_file *, + struct blkg_policy_data *, int), + const struct blkcg_policy *pol, int data, + bool show_total); +u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v); +u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, + const struct blkg_rwstat *rwstat); +u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off); +u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, + int off); + +u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off); +struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd, + int off); + +struct blkg_conf_ctx { + struct gendisk *disk; + struct blkcg_gq *blkg; + u64 v; +}; + +int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, + const char *input, struct blkg_conf_ctx *ctx); +void blkg_conf_finish(struct blkg_conf_ctx *ctx); + + +static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css) +{ + return css ? container_of(css, struct blkcg, css) : NULL; +} + +static inline struct blkcg *task_blkcg(struct task_struct *tsk) +{ + return css_to_blkcg(task_css(tsk, blkio_cgrp_id)); +} + +static inline struct blkcg *bio_blkcg(struct bio *bio) +{ + if (bio && bio->bi_css) + return css_to_blkcg(bio->bi_css); + return task_blkcg(current); +} + +/** + * blkcg_parent - get the parent of a blkcg + * @blkcg: blkcg of interest + * + * Return the parent blkcg of @blkcg. Can be called anytime. + */ +static inline struct blkcg *blkcg_parent(struct blkcg *blkcg) +{ + return css_to_blkcg(blkcg->css.parent); +} + +/** + * blkg_to_pdata - get policy private data + * @blkg: blkg of interest + * @pol: policy of interest + * + * Return pointer to private data associated with the @blkg-@pol pair. + */ +static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, + struct blkcg_policy *pol) +{ + return blkg ? blkg->pd[pol->plid] : NULL; +} + +/** + * pdata_to_blkg - get blkg associated with policy private data + * @pd: policy private data of interest + * + * @pd is policy private data. Determine the blkg it's associated with. + */ +static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) +{ + return pd ? pd->blkg : NULL; +} + +/** + * blkg_path - format cgroup path of blkg + * @blkg: blkg of interest + * @buf: target buffer + * @buflen: target buffer length + * + * Format the path of the cgroup of @blkg into @buf. + */ +static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen) +{ + char *p; + + p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen); + if (!p) { + strncpy(buf, "", buflen); + return -ENAMETOOLONG; + } + + memmove(buf, p, buf + buflen - p); + return 0; +} + +/** + * blkg_get - get a blkg reference + * @blkg: blkg to get + * + * The caller should be holding an existing reference. + */ +static inline void blkg_get(struct blkcg_gq *blkg) +{ + WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0); + atomic_inc(&blkg->refcnt); +} + +void __blkg_release_rcu(struct rcu_head *rcu); + +/** + * blkg_put - put a blkg reference + * @blkg: blkg to put + */ +static inline void blkg_put(struct blkcg_gq *blkg) +{ + WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0); + if (atomic_dec_and_test(&blkg->refcnt)) + call_rcu(&blkg->rcu_head, __blkg_release_rcu); +} + +struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q, + bool update_hint); + +/** + * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants + * @d_blkg: loop cursor pointing to the current descendant + * @pos_css: used for iteration + * @p_blkg: target blkg to walk descendants of + * + * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU + * read locked. If called under either blkcg or queue lock, the iteration + * is guaranteed to include all and only online blkgs. The caller may + * update @pos_css by calling css_rightmost_descendant() to skip subtree. + * @p_blkg is included in the iteration and the first node to be visited. + */ +#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \ + css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \ + if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \ + (p_blkg)->q, false))) + +/** + * blkg_for_each_descendant_post - post-order walk of a blkg's descendants + * @d_blkg: loop cursor pointing to the current descendant + * @pos_css: used for iteration + * @p_blkg: target blkg to walk descendants of + * + * Similar to blkg_for_each_descendant_pre() but performs post-order + * traversal instead. Synchronization rules are the same. @p_blkg is + * included in the iteration and the last node to be visited. + */ +#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \ + css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \ + if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \ + (p_blkg)->q, false))) + +/** + * blk_get_rl - get request_list to use + * @q: request_queue of interest + * @bio: bio which will be attached to the allocated request (may be %NULL) + * + * The caller wants to allocate a request from @q to use for @bio. Find + * the request_list to use and obtain a reference on it. Should be called + * under queue_lock. This function is guaranteed to return non-%NULL + * request_list. + */ +static inline struct request_list *blk_get_rl(struct request_queue *q, + struct bio *bio) +{ + struct blkcg *blkcg; + struct blkcg_gq *blkg; + + rcu_read_lock(); + + blkcg = bio_blkcg(bio); + + /* bypass blkg lookup and use @q->root_rl directly for root */ + if (blkcg == &blkcg_root) + goto root_rl; + + /* + * Try to use blkg->rl. blkg lookup may fail under memory pressure + * or if either the blkcg or queue is going away. Fall back to + * root_rl in such cases. + */ + blkg = blkg_lookup_create(blkcg, q); + if (unlikely(IS_ERR(blkg))) + goto root_rl; + + blkg_get(blkg); + rcu_read_unlock(); + return &blkg->rl; +root_rl: + rcu_read_unlock(); + return &q->root_rl; +} + +/** + * blk_put_rl - put request_list + * @rl: request_list to put + * + * Put the reference acquired by blk_get_rl(). Should be called under + * queue_lock. + */ +static inline void blk_put_rl(struct request_list *rl) +{ + /* root_rl may not have blkg set */ + if (rl->blkg && rl->blkg->blkcg != &blkcg_root) + blkg_put(rl->blkg); +} + +/** + * blk_rq_set_rl - associate a request with a request_list + * @rq: request of interest + * @rl: target request_list + * + * Associate @rq with @rl so that accounting and freeing can know the + * request_list @rq came from. + */ +static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) +{ + rq->rl = rl; +} + +/** + * blk_rq_rl - return the request_list a request came from + * @rq: request of interest + * + * Return the request_list @rq is allocated from. + */ +static inline struct request_list *blk_rq_rl(struct request *rq) +{ + return rq->rl; +} + +struct request_list *__blk_queue_next_rl(struct request_list *rl, + struct request_queue *q); +/** + * blk_queue_for_each_rl - iterate through all request_lists of a request_queue + * + * Should be used under queue_lock. + */ +#define blk_queue_for_each_rl(rl, q) \ + for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q))) + +static inline void blkg_stat_init(struct blkg_stat *stat) +{ + u64_stats_init(&stat->syncp); +} + +/** + * blkg_stat_add - add a value to a blkg_stat + * @stat: target blkg_stat + * @val: value to add + * + * Add @val to @stat. The caller is responsible for synchronizing calls to + * this function. + */ +static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val) +{ + u64_stats_update_begin(&stat->syncp); + stat->cnt += val; + u64_stats_update_end(&stat->syncp); +} + +/** + * blkg_stat_read - read the current value of a blkg_stat + * @stat: blkg_stat to read + * + * Read the current value of @stat. This function can be called without + * synchroniztion and takes care of u64 atomicity. + */ +static inline uint64_t blkg_stat_read(struct blkg_stat *stat) +{ + unsigned int start; + uint64_t v; + + do { + start = u64_stats_fetch_begin_irq(&stat->syncp); + v = stat->cnt; + } while (u64_stats_fetch_retry_irq(&stat->syncp, start)); + + return v; +} + +/** + * blkg_stat_reset - reset a blkg_stat + * @stat: blkg_stat to reset + */ +static inline void blkg_stat_reset(struct blkg_stat *stat) +{ + stat->cnt = 0; +} + +/** + * blkg_stat_merge - merge a blkg_stat into another + * @to: the destination blkg_stat + * @from: the source + * + * Add @from's count to @to. + */ +static inline void blkg_stat_merge(struct blkg_stat *to, struct blkg_stat *from) +{ + blkg_stat_add(to, blkg_stat_read(from)); +} + +static inline void blkg_rwstat_init(struct blkg_rwstat *rwstat) +{ + u64_stats_init(&rwstat->syncp); +} + +/** + * blkg_rwstat_add - add a value to a blkg_rwstat + * @rwstat: target blkg_rwstat + * @rw: mask of REQ_{WRITE|SYNC} + * @val: value to add + * + * Add @val to @rwstat. The counters are chosen according to @rw. The + * caller is responsible for synchronizing calls to this function. + */ +static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat, + int rw, uint64_t val) +{ + u64_stats_update_begin(&rwstat->syncp); + + if (rw & REQ_WRITE) + rwstat->cnt[BLKG_RWSTAT_WRITE] += val; + else + rwstat->cnt[BLKG_RWSTAT_READ] += val; + if (rw & REQ_SYNC) + rwstat->cnt[BLKG_RWSTAT_SYNC] += val; + else + rwstat->cnt[BLKG_RWSTAT_ASYNC] += val; + + u64_stats_update_end(&rwstat->syncp); +} + +/** + * blkg_rwstat_read - read the current values of a blkg_rwstat + * @rwstat: blkg_rwstat to read + * + * Read the current snapshot of @rwstat and return it as the return value. + * This function can be called without synchronization and takes care of + * u64 atomicity. + */ +static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat) +{ + unsigned int start; + struct blkg_rwstat tmp; + + do { + start = u64_stats_fetch_begin_irq(&rwstat->syncp); + tmp = *rwstat; + } while (u64_stats_fetch_retry_irq(&rwstat->syncp, start)); + + return tmp; +} + +/** + * blkg_rwstat_total - read the total count of a blkg_rwstat + * @rwstat: blkg_rwstat to read + * + * Return the total count of @rwstat regardless of the IO direction. This + * function can be called without synchronization and takes care of u64 + * atomicity. + */ +static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat) +{ + struct blkg_rwstat tmp = blkg_rwstat_read(rwstat); + + return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE]; +} + +/** + * blkg_rwstat_reset - reset a blkg_rwstat + * @rwstat: blkg_rwstat to reset + */ +static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat) +{ + memset(rwstat->cnt, 0, sizeof(rwstat->cnt)); +} + +/** + * blkg_rwstat_merge - merge a blkg_rwstat into another + * @to: the destination blkg_rwstat + * @from: the source + * + * Add @from's counts to @to. + */ +static inline void blkg_rwstat_merge(struct blkg_rwstat *to, + struct blkg_rwstat *from) +{ + struct blkg_rwstat v = blkg_rwstat_read(from); + int i; + + u64_stats_update_begin(&to->syncp); + for (i = 0; i < BLKG_RWSTAT_NR; i++) + to->cnt[i] += v.cnt[i]; + u64_stats_update_end(&to->syncp); +} + +#else /* CONFIG_BLK_CGROUP */ + +struct cgroup; +struct blkcg; + +struct blkg_policy_data { +}; + +struct blkcg_gq { +}; + +struct blkcg_policy { +}; + +static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; } +static inline int blkcg_init_queue(struct request_queue *q) { return 0; } +static inline void blkcg_drain_queue(struct request_queue *q) { } +static inline void blkcg_exit_queue(struct request_queue *q) { } +static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; } +static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { } +static inline int blkcg_activate_policy(struct request_queue *q, + const struct blkcg_policy *pol) { return 0; } +static inline void blkcg_deactivate_policy(struct request_queue *q, + const struct blkcg_policy *pol) { } + +static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; } + +static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, + struct blkcg_policy *pol) { return NULL; } +static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; } +static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; } +static inline void blkg_get(struct blkcg_gq *blkg) { } +static inline void blkg_put(struct blkcg_gq *blkg) { } + +static inline struct request_list *blk_get_rl(struct request_queue *q, + struct bio *bio) { return &q->root_rl; } +static inline void blk_put_rl(struct request_list *rl) { } +static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { } +static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; } + +#define blk_queue_for_each_rl(rl, q) \ + for ((rl) = &(q)->root_rl; (rl); (rl) = NULL) + +#endif /* CONFIG_BLK_CGROUP */ +#endif /* _BLK_CGROUP_H */ diff --git a/block/blk-core.c b/block/blk-core.c new file mode 100644 index 000000000..4c6e9ced9 --- /dev/null +++ b/block/blk-core.c @@ -0,0 +1,3371 @@ +/* + * Copyright (C) 1991, 1992 Linus Torvalds + * Copyright (C) 1994, Karl Keyte: Added support for disk statistics + * Elevator latency, (C) 2000 Andrea Arcangeli SuSE + * Queue request tables / lock, selectable elevator, Jens Axboe + * kernel-doc documentation started by NeilBrown + * - July2000 + * bio rewrite, highmem i/o, etc, Jens Axboe - may 2001 + */ + +/* + * This handles all read/write requests to block devices + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CREATE_TRACE_POINTS +#include + +#include "blk.h" +#include "blk-cgroup.h" +#include "blk-mq.h" + +EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); +EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); +EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); +EXPORT_TRACEPOINT_SYMBOL_GPL(block_split); +EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug); + +DEFINE_IDA(blk_queue_ida); + +int trap_non_toi_io; + +/* + * For the allocated request tables + */ +struct kmem_cache *request_cachep = NULL; + +/* + * For queue allocation + */ +struct kmem_cache *blk_requestq_cachep; + +/* + * Controlling structure to kblockd + */ +static struct workqueue_struct *kblockd_workqueue; + +void blk_queue_congestion_threshold(struct request_queue *q) +{ + int nr; + + nr = q->nr_requests - (q->nr_requests / 8) + 1; + if (nr > q->nr_requests) + nr = q->nr_requests; + q->nr_congestion_on = nr; + + nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1; + if (nr < 1) + nr = 1; + q->nr_congestion_off = nr; +} + +/** + * blk_get_backing_dev_info - get the address of a queue's backing_dev_info + * @bdev: device + * + * Locates the passed device's request queue and returns the address of its + * backing_dev_info. This function can only be called if @bdev is opened + * and the return value is never NULL. + */ +struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev) +{ + struct request_queue *q = bdev_get_queue(bdev); + + return &q->backing_dev_info; +} +EXPORT_SYMBOL(blk_get_backing_dev_info); + +void blk_rq_init(struct request_queue *q, struct request *rq) +{ + memset(rq, 0, sizeof(*rq)); + + INIT_LIST_HEAD(&rq->queuelist); + INIT_LIST_HEAD(&rq->timeout_list); + rq->cpu = -1; + rq->q = q; + rq->__sector = (sector_t) -1; + INIT_HLIST_NODE(&rq->hash); + RB_CLEAR_NODE(&rq->rb_node); + rq->cmd = rq->__cmd; + rq->cmd_len = BLK_MAX_CDB; + rq->tag = -1; + rq->start_time = jiffies; + set_start_time_ns(rq); + rq->part = NULL; +} +EXPORT_SYMBOL(blk_rq_init); + +static void req_bio_endio(struct request *rq, struct bio *bio, + unsigned int nbytes, int error) +{ + if (error) + clear_bit(BIO_UPTODATE, &bio->bi_flags); + else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) + error = -EIO; + + if (unlikely(rq->cmd_flags & REQ_QUIET)) + set_bit(BIO_QUIET, &bio->bi_flags); + + bio_advance(bio, nbytes); + + /* don't actually finish bio if it's part of flush sequence */ + if (bio->bi_iter.bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ)) + bio_endio(bio, error); +} + +void blk_dump_rq_flags(struct request *rq, char *msg) +{ + int bit; + + printk(KERN_INFO "%s: dev %s: type=%x, flags=%llx\n", msg, + rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, + (unsigned long long) rq->cmd_flags); + + printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n", + (unsigned long long)blk_rq_pos(rq), + blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); + printk(KERN_INFO " bio %p, biotail %p, len %u\n", + rq->bio, rq->biotail, blk_rq_bytes(rq)); + + if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { + printk(KERN_INFO " cdb: "); + for (bit = 0; bit < BLK_MAX_CDB; bit++) + printk("%02x ", rq->cmd[bit]); + printk("\n"); + } +} +EXPORT_SYMBOL(blk_dump_rq_flags); + +static void blk_delay_work(struct work_struct *work) +{ + struct request_queue *q; + + q = container_of(work, struct request_queue, delay_work.work); + spin_lock_irq(q->queue_lock); + __blk_run_queue(q); + spin_unlock_irq(q->queue_lock); +} + +/** + * blk_delay_queue - restart queueing after defined interval + * @q: The &struct request_queue in question + * @msecs: Delay in msecs + * + * Description: + * Sometimes queueing needs to be postponed for a little while, to allow + * resources to come back. This function will make sure that queueing is + * restarted around the specified time. Queue lock must be held. + */ +void blk_delay_queue(struct request_queue *q, unsigned long msecs) +{ + if (likely(!blk_queue_dead(q))) + queue_delayed_work(kblockd_workqueue, &q->delay_work, + msecs_to_jiffies(msecs)); +} +EXPORT_SYMBOL(blk_delay_queue); + +/** + * blk_start_queue - restart a previously stopped queue + * @q: The &struct request_queue in question + * + * Description: + * blk_start_queue() will clear the stop flag on the queue, and call + * the request_fn for the queue if it was in a stopped state when + * entered. Also see blk_stop_queue(). Queue lock must be held. + **/ +void blk_start_queue(struct request_queue *q) +{ + WARN_ON(!irqs_disabled()); + + queue_flag_clear(QUEUE_FLAG_STOPPED, q); + __blk_run_queue(q); +} +EXPORT_SYMBOL(blk_start_queue); + +/** + * blk_stop_queue - stop a queue + * @q: The &struct request_queue in question + * + * Description: + * The Linux block layer assumes that a block driver will consume all + * entries on the request queue when the request_fn strategy is called. + * Often this will not happen, because of hardware limitations (queue + * depth settings). If a device driver gets a 'queue full' response, + * or if it simply chooses not to queue more I/O at one point, it can + * call this function to prevent the request_fn from being called until + * the driver has signalled it's ready to go again. This happens by calling + * blk_start_queue() to restart queue operations. Queue lock must be held. + **/ +void blk_stop_queue(struct request_queue *q) +{ + cancel_delayed_work(&q->delay_work); + queue_flag_set(QUEUE_FLAG_STOPPED, q); +} +EXPORT_SYMBOL(blk_stop_queue); + +/** + * blk_sync_queue - cancel any pending callbacks on a queue + * @q: the queue + * + * Description: + * The block layer may perform asynchronous callback activity + * on a queue, such as calling the unplug function after a timeout. + * A block device may call blk_sync_queue to ensure that any + * such activity is cancelled, thus allowing it to release resources + * that the callbacks might use. The caller must already have made sure + * that its ->make_request_fn will not re-add plugging prior to calling + * this function. + * + * This function does not cancel any asynchronous activity arising + * out of elevator or throttling code. That would require elevator_exit() + * and blkcg_exit_queue() to be called with queue lock initialized. + * + */ +void blk_sync_queue(struct request_queue *q) +{ + del_timer_sync(&q->timeout); + + if (q->mq_ops) { + struct blk_mq_hw_ctx *hctx; + int i; + + queue_for_each_hw_ctx(q, hctx, i) { + cancel_delayed_work_sync(&hctx->run_work); + cancel_delayed_work_sync(&hctx->delay_work); + } + } else { + cancel_delayed_work_sync(&q->delay_work); + } +} +EXPORT_SYMBOL(blk_sync_queue); + +/** + * __blk_run_queue_uncond - run a queue whether or not it has been stopped + * @q: The queue to run + * + * Description: + * Invoke request handling on a queue if there are any pending requests. + * May be used to restart request handling after a request has completed. + * This variant runs the queue whether or not the queue has been + * stopped. Must be called with the queue lock held and interrupts + * disabled. See also @blk_run_queue. + */ +inline void __blk_run_queue_uncond(struct request_queue *q) +{ + if (unlikely(blk_queue_dead(q))) + return; + + /* + * Some request_fn implementations, e.g. scsi_request_fn(), unlock + * the queue lock internally. As a result multiple threads may be + * running such a request function concurrently. Keep track of the + * number of active request_fn invocations such that blk_drain_queue() + * can wait until all these request_fn calls have finished. + */ + q->request_fn_active++; + q->request_fn(q); + q->request_fn_active--; +} + +/** + * __blk_run_queue - run a single device queue + * @q: The queue to run + * + * Description: + * See @blk_run_queue. This variant must be called with the queue lock + * held and interrupts disabled. + */ +void __blk_run_queue(struct request_queue *q) +{ + if (unlikely(blk_queue_stopped(q))) + return; + + __blk_run_queue_uncond(q); +} +EXPORT_SYMBOL(__blk_run_queue); + +/** + * blk_run_queue_async - run a single device queue in workqueue context + * @q: The queue to run + * + * Description: + * Tells kblockd to perform the equivalent of @blk_run_queue on behalf + * of us. The caller must hold the queue lock. + */ +void blk_run_queue_async(struct request_queue *q) +{ + if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q))) + mod_delayed_work(kblockd_workqueue, &q->delay_work, 0); +} +EXPORT_SYMBOL(blk_run_queue_async); + +/** + * blk_run_queue - run a single device queue + * @q: The queue to run + * + * Description: + * Invoke request handling on this queue, if it has pending work to do. + * May be used to restart queueing when a request has completed. + */ +void blk_run_queue(struct request_queue *q) +{ + unsigned long flags; + + spin_lock_irqsave(q->queue_lock, flags); + __blk_run_queue(q); + spin_unlock_irqrestore(q->queue_lock, flags); +} +EXPORT_SYMBOL(blk_run_queue); + +void blk_put_queue(struct request_queue *q) +{ + kobject_put(&q->kobj); +} +EXPORT_SYMBOL(blk_put_queue); + +/** + * __blk_drain_queue - drain requests from request_queue + * @q: queue to drain + * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV + * + * Drain requests from @q. If @drain_all is set, all requests are drained. + * If not, only ELVPRIV requests are drained. The caller is responsible + * for ensuring that no new requests which need to be drained are queued. + */ +static void __blk_drain_queue(struct request_queue *q, bool drain_all) + __releases(q->queue_lock) + __acquires(q->queue_lock) +{ + int i; + + lockdep_assert_held(q->queue_lock); + + while (true) { + bool drain = false; + + /* + * The caller might be trying to drain @q before its + * elevator is initialized. + */ + if (q->elevator) + elv_drain_elevator(q); + + blkcg_drain_queue(q); + + /* + * This function might be called on a queue which failed + * driver init after queue creation or is not yet fully + * active yet. Some drivers (e.g. fd and loop) get unhappy + * in such cases. Kick queue iff dispatch queue has + * something on it and @q has request_fn set. + */ + if (!list_empty(&q->queue_head) && q->request_fn) + __blk_run_queue(q); + + drain |= q->nr_rqs_elvpriv; + drain |= q->request_fn_active; + + /* + * Unfortunately, requests are queued at and tracked from + * multiple places and there's no single counter which can + * be drained. Check all the queues and counters. + */ + if (drain_all) { + struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL); + drain |= !list_empty(&q->queue_head); + for (i = 0; i < 2; i++) { + drain |= q->nr_rqs[i]; + drain |= q->in_flight[i]; + if (fq) + drain |= !list_empty(&fq->flush_queue[i]); + } + } + + if (!drain) + break; + + spin_unlock_irq(q->queue_lock); + + msleep(10); + + spin_lock_irq(q->queue_lock); + } + + /* + * With queue marked dead, any woken up waiter will fail the + * allocation path, so the wakeup chaining is lost and we're + * left with hung waiters. We need to wake up those waiters. + */ + if (q->request_fn) { + struct request_list *rl; + + blk_queue_for_each_rl(rl, q) + for (i = 0; i < ARRAY_SIZE(rl->wait); i++) + wake_up_all(&rl->wait[i]); + } +} + +/** + * blk_queue_bypass_start - enter queue bypass mode + * @q: queue of interest + * + * In bypass mode, only the dispatch FIFO queue of @q is used. This + * function makes @q enter bypass mode and drains all requests which were + * throttled or issued before. On return, it's guaranteed that no request + * is being throttled or has ELVPRIV set and blk_queue_bypass() %true + * inside queue or RCU read lock. + */ +void blk_queue_bypass_start(struct request_queue *q) +{ + spin_lock_irq(q->queue_lock); + q->bypass_depth++; + queue_flag_set(QUEUE_FLAG_BYPASS, q); + spin_unlock_irq(q->queue_lock); + + /* + * Queues start drained. Skip actual draining till init is + * complete. This avoids lenghty delays during queue init which + * can happen many times during boot. + */ + if (blk_queue_init_done(q)) { + spin_lock_irq(q->queue_lock); + __blk_drain_queue(q, false); + spin_unlock_irq(q->queue_lock); + + /* ensure blk_queue_bypass() is %true inside RCU read lock */ + synchronize_rcu(); + } +} +EXPORT_SYMBOL_GPL(blk_queue_bypass_start); + +/** + * blk_queue_bypass_end - leave queue bypass mode + * @q: queue of interest + * + * Leave bypass mode and restore the normal queueing behavior. + */ +void blk_queue_bypass_end(struct request_queue *q) +{ + spin_lock_irq(q->queue_lock); + if (!--q->bypass_depth) + queue_flag_clear(QUEUE_FLAG_BYPASS, q); + WARN_ON_ONCE(q->bypass_depth < 0); + spin_unlock_irq(q->queue_lock); +} +EXPORT_SYMBOL_GPL(blk_queue_bypass_end); + +void blk_set_queue_dying(struct request_queue *q) +{ + queue_flag_set_unlocked(QUEUE_FLAG_DYING, q); + + if (q->mq_ops) + blk_mq_wake_waiters(q); + else { + struct request_list *rl; + + blk_queue_for_each_rl(rl, q) { + if (rl->rq_pool) { + wake_up(&rl->wait[BLK_RW_SYNC]); + wake_up(&rl->wait[BLK_RW_ASYNC]); + } + } + } +} +EXPORT_SYMBOL_GPL(blk_set_queue_dying); + +/** + * blk_cleanup_queue - shutdown a request queue + * @q: request queue to shutdown + * + * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and + * put it. All future requests will be failed immediately with -ENODEV. + */ +void blk_cleanup_queue(struct request_queue *q) +{ + spinlock_t *lock = q->queue_lock; + + /* mark @q DYING, no new request or merges will be allowed afterwards */ + mutex_lock(&q->sysfs_lock); + blk_set_queue_dying(q); + spin_lock_irq(lock); + + /* + * A dying queue is permanently in bypass mode till released. Note + * that, unlike blk_queue_bypass_start(), we aren't performing + * synchronize_rcu() after entering bypass mode to avoid the delay + * as some drivers create and destroy a lot of queues while + * probing. This is still safe because blk_release_queue() will be + * called only after the queue refcnt drops to zero and nothing, + * RCU or not, would be traversing the queue by then. + */ + q->bypass_depth++; + queue_flag_set(QUEUE_FLAG_BYPASS, q); + + queue_flag_set(QUEUE_FLAG_NOMERGES, q); + queue_flag_set(QUEUE_FLAG_NOXMERGES, q); + queue_flag_set(QUEUE_FLAG_DYING, q); + spin_unlock_irq(lock); + mutex_unlock(&q->sysfs_lock); + + /* + * Drain all requests queued before DYING marking. Set DEAD flag to + * prevent that q->request_fn() gets invoked after draining finished. + */ + if (q->mq_ops) { + blk_mq_freeze_queue(q); + spin_lock_irq(lock); + } else { + spin_lock_irq(lock); + __blk_drain_queue(q, true); + } + queue_flag_set(QUEUE_FLAG_DEAD, q); + spin_unlock_irq(lock); + + /* @q won't process any more request, flush async actions */ + del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer); + blk_sync_queue(q); + + if (q->mq_ops) + blk_mq_free_queue(q); + + spin_lock_irq(lock); + if (q->queue_lock != &q->__queue_lock) + q->queue_lock = &q->__queue_lock; + spin_unlock_irq(lock); + + bdi_destroy(&q->backing_dev_info); + + /* @q is and will stay empty, shutdown and put */ + blk_put_queue(q); +} +EXPORT_SYMBOL(blk_cleanup_queue); + +/* Allocate memory local to the request queue */ +static void *alloc_request_struct(gfp_t gfp_mask, void *data) +{ + int nid = (int)(long)data; + return kmem_cache_alloc_node(request_cachep, gfp_mask, nid); +} + +static void free_request_struct(void *element, void *unused) +{ + kmem_cache_free(request_cachep, element); +} + +int blk_init_rl(struct request_list *rl, struct request_queue *q, + gfp_t gfp_mask) +{ + if (unlikely(rl->rq_pool)) + return 0; + + rl->q = q; + rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; + rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; + init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); + init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); + + rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, alloc_request_struct, + free_request_struct, + (void *)(long)q->node, gfp_mask, + q->node); + if (!rl->rq_pool) + return -ENOMEM; + + return 0; +} + +void blk_exit_rl(struct request_list *rl) +{ + if (rl->rq_pool) + mempool_destroy(rl->rq_pool); +} + +struct request_queue *blk_alloc_queue(gfp_t gfp_mask) +{ + return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE); +} +EXPORT_SYMBOL(blk_alloc_queue); + +struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) +{ + struct request_queue *q; + int err; + + q = kmem_cache_alloc_node(blk_requestq_cachep, + gfp_mask | __GFP_ZERO, node_id); + if (!q) + return NULL; + + q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask); + if (q->id < 0) + goto fail_q; + + q->backing_dev_info.ra_pages = + (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; + q->backing_dev_info.state = 0; + q->backing_dev_info.capabilities = 0; + q->backing_dev_info.name = "block"; + q->node = node_id; + + err = bdi_init(&q->backing_dev_info); + if (err) + goto fail_id; + + setup_timer(&q->backing_dev_info.laptop_mode_wb_timer, + laptop_mode_timer_fn, (unsigned long) q); + setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); + INIT_LIST_HEAD(&q->queue_head); + INIT_LIST_HEAD(&q->timeout_list); + INIT_LIST_HEAD(&q->icq_list); +#ifdef CONFIG_BLK_CGROUP + INIT_LIST_HEAD(&q->blkg_list); +#endif + INIT_DELAYED_WORK(&q->delay_work, blk_delay_work); + + kobject_init(&q->kobj, &blk_queue_ktype); + + mutex_init(&q->sysfs_lock); + spin_lock_init(&q->__queue_lock); + + /* + * By default initialize queue_lock to internal lock and driver can + * override it later if need be. + */ + q->queue_lock = &q->__queue_lock; + + /* + * A queue starts its life with bypass turned on to avoid + * unnecessary bypass on/off overhead and nasty surprises during + * init. The initial bypass will be finished when the queue is + * registered by blk_register_queue(). + */ + q->bypass_depth = 1; + __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags); + + init_waitqueue_head(&q->mq_freeze_wq); + + if (blkcg_init_queue(q)) + goto fail_bdi; + + return q; + +fail_bdi: + bdi_destroy(&q->backing_dev_info); +fail_id: + ida_simple_remove(&blk_queue_ida, q->id); +fail_q: + kmem_cache_free(blk_requestq_cachep, q); + return NULL; +} +EXPORT_SYMBOL(blk_alloc_queue_node); + +/** + * blk_init_queue - prepare a request queue for use with a block device + * @rfn: The function to be called to process requests that have been + * placed on the queue. + * @lock: Request queue spin lock + * + * Description: + * If a block device wishes to use the standard request handling procedures, + * which sorts requests and coalesces adjacent requests, then it must + * call blk_init_queue(). The function @rfn will be called when there + * are requests on the queue that need to be processed. If the device + * supports plugging, then @rfn may not be called immediately when requests + * are available on the queue, but may be called at some time later instead. + * Plugged queues are generally unplugged when a buffer belonging to one + * of the requests on the queue is needed, or due to memory pressure. + * + * @rfn is not required, or even expected, to remove all requests off the + * queue, but only as many as it can handle at a time. If it does leave + * requests on the queue, it is responsible for arranging that the requests + * get dealt with eventually. + * + * The queue spin lock must be held while manipulating the requests on the + * request queue; this lock will be taken also from interrupt context, so irq + * disabling is needed for it. + * + * Function returns a pointer to the initialized request queue, or %NULL if + * it didn't succeed. + * + * Note: + * blk_init_queue() must be paired with a blk_cleanup_queue() call + * when the block device is deactivated (such as at module unload). + **/ + +struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock) +{ + return blk_init_queue_node(rfn, lock, NUMA_NO_NODE); +} +EXPORT_SYMBOL(blk_init_queue); + +struct request_queue * +blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) +{ + struct request_queue *uninit_q, *q; + + uninit_q = blk_alloc_queue_node(GFP_KERNEL, node_id); + if (!uninit_q) + return NULL; + + q = blk_init_allocated_queue(uninit_q, rfn, lock); + if (!q) + blk_cleanup_queue(uninit_q); + + return q; +} +EXPORT_SYMBOL(blk_init_queue_node); + +static void blk_queue_bio(struct request_queue *q, struct bio *bio); + +struct request_queue * +blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn, + spinlock_t *lock) +{ + if (!q) + return NULL; + + q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, 0); + if (!q->fq) + return NULL; + + if (blk_init_rl(&q->root_rl, q, GFP_KERNEL)) + goto fail; + + q->request_fn = rfn; + q->prep_rq_fn = NULL; + q->unprep_rq_fn = NULL; + q->queue_flags |= QUEUE_FLAG_DEFAULT; + + /* Override internal queue lock with supplied lock pointer */ + if (lock) + q->queue_lock = lock; + + /* + * This also sets hw/phys segments, boundary and size + */ + blk_queue_make_request(q, blk_queue_bio); + + q->sg_reserved_size = INT_MAX; + + /* Protect q->elevator from elevator_change */ + mutex_lock(&q->sysfs_lock); + + /* init elevator */ + if (elevator_init(q, NULL)) { + mutex_unlock(&q->sysfs_lock); + goto fail; + } + + mutex_unlock(&q->sysfs_lock); + + return q; + +fail: + blk_free_flush_queue(q->fq); + return NULL; +} +EXPORT_SYMBOL(blk_init_allocated_queue); + +bool blk_get_queue(struct request_queue *q) +{ + if (likely(!blk_queue_dying(q))) { + __blk_get_queue(q); + return true; + } + + return false; +} +EXPORT_SYMBOL(blk_get_queue); + +static inline void blk_free_request(struct request_list *rl, struct request *rq) +{ + if (rq->cmd_flags & REQ_ELVPRIV) { + elv_put_request(rl->q, rq); + if (rq->elv.icq) + put_io_context(rq->elv.icq->ioc); + } + + mempool_free(rq, rl->rq_pool); +} + +/* + * ioc_batching returns true if the ioc is a valid batching request and + * should be given priority access to a request. + */ +static inline int ioc_batching(struct request_queue *q, struct io_context *ioc) +{ + if (!ioc) + return 0; + + /* + * Make sure the process is able to allocate at least 1 request + * even if the batch times out, otherwise we could theoretically + * lose wakeups. + */ + return ioc->nr_batch_requests == q->nr_batching || + (ioc->nr_batch_requests > 0 + && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME)); +} + +/* + * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This + * will cause the process to be a "batcher" on all queues in the system. This + * is the behaviour we want though - once it gets a wakeup it should be given + * a nice run. + */ +static void ioc_set_batching(struct request_queue *q, struct io_context *ioc) +{ + if (!ioc || ioc_batching(q, ioc)) + return; + + ioc->nr_batch_requests = q->nr_batching; + ioc->last_waited = jiffies; +} + +static void __freed_request(struct request_list *rl, int sync) +{ + struct request_queue *q = rl->q; + + /* + * bdi isn't aware of blkcg yet. As all async IOs end up root + * blkcg anyway, just use root blkcg state. + */ + if (rl == &q->root_rl && + rl->count[sync] < queue_congestion_off_threshold(q)) + blk_clear_queue_congested(q, sync); + + if (rl->count[sync] + 1 <= q->nr_requests) { + if (waitqueue_active(&rl->wait[sync])) + wake_up(&rl->wait[sync]); + + blk_clear_rl_full(rl, sync); + } +} + +/* + * A request has just been released. Account for it, update the full and + * congestion status, wake up any waiters. Called under q->queue_lock. + */ +static void freed_request(struct request_list *rl, unsigned int flags) +{ + struct request_queue *q = rl->q; + int sync = rw_is_sync(flags); + + q->nr_rqs[sync]--; + rl->count[sync]--; + if (flags & REQ_ELVPRIV) + q->nr_rqs_elvpriv--; + + __freed_request(rl, sync); + + if (unlikely(rl->starved[sync ^ 1])) + __freed_request(rl, sync ^ 1); +} + +int blk_update_nr_requests(struct request_queue *q, unsigned int nr) +{ + struct request_list *rl; + + spin_lock_irq(q->queue_lock); + q->nr_requests = nr; + blk_queue_congestion_threshold(q); + + /* congestion isn't cgroup aware and follows root blkcg for now */ + rl = &q->root_rl; + + if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q)) + blk_set_queue_congested(q, BLK_RW_SYNC); + else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q)) + blk_clear_queue_congested(q, BLK_RW_SYNC); + + if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q)) + blk_set_queue_congested(q, BLK_RW_ASYNC); + else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q)) + blk_clear_queue_congested(q, BLK_RW_ASYNC); + + blk_queue_for_each_rl(rl, q) { + if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { + blk_set_rl_full(rl, BLK_RW_SYNC); + } else { + blk_clear_rl_full(rl, BLK_RW_SYNC); + wake_up(&rl->wait[BLK_RW_SYNC]); + } + + if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { + blk_set_rl_full(rl, BLK_RW_ASYNC); + } else { + blk_clear_rl_full(rl, BLK_RW_ASYNC); + wake_up(&rl->wait[BLK_RW_ASYNC]); + } + } + + spin_unlock_irq(q->queue_lock); + return 0; +} + +/* + * Determine if elevator data should be initialized when allocating the + * request associated with @bio. + */ +static bool blk_rq_should_init_elevator(struct bio *bio) +{ + if (!bio) + return true; + + /* + * Flush requests do not use the elevator so skip initialization. + * This allows a request to share the flush and elevator data. + */ + if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) + return false; + + return true; +} + +/** + * rq_ioc - determine io_context for request allocation + * @bio: request being allocated is for this bio (can be %NULL) + * + * Determine io_context to use for request allocation for @bio. May return + * %NULL if %current->io_context doesn't exist. + */ +static struct io_context *rq_ioc(struct bio *bio) +{ +#ifdef CONFIG_BLK_CGROUP + if (bio && bio->bi_ioc) + return bio->bi_ioc; +#endif + return current->io_context; +} + +/** + * __get_request - get a free request + * @rl: request list to allocate from + * @rw_flags: RW and SYNC flags + * @bio: bio to allocate request for (can be %NULL) + * @gfp_mask: allocation mask + * + * Get a free request from @q. This function may fail under memory + * pressure or if @q is dead. + * + * Must be called with @q->queue_lock held and, + * Returns ERR_PTR on failure, with @q->queue_lock held. + * Returns request pointer on success, with @q->queue_lock *not held*. + */ +static struct request *__get_request(struct request_list *rl, int rw_flags, + struct bio *bio, gfp_t gfp_mask) +{ + struct request_queue *q = rl->q; + struct request *rq; + struct elevator_type *et = q->elevator->type; + struct io_context *ioc = rq_ioc(bio); + struct io_cq *icq = NULL; + const bool is_sync = rw_is_sync(rw_flags) != 0; + int may_queue; + + if (unlikely(blk_queue_dying(q))) + return ERR_PTR(-ENODEV); + + may_queue = elv_may_queue(q, rw_flags); + if (may_queue == ELV_MQUEUE_NO) + goto rq_starved; + + if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) { + if (rl->count[is_sync]+1 >= q->nr_requests) { + /* + * The queue will fill after this allocation, so set + * it as full, and mark this process as "batching". + * This process will be allowed to complete a batch of + * requests, others will be blocked. + */ + if (!blk_rl_full(rl, is_sync)) { + ioc_set_batching(q, ioc); + blk_set_rl_full(rl, is_sync); + } else { + if (may_queue != ELV_MQUEUE_MUST + && !ioc_batching(q, ioc)) { + /* + * The queue is full and the allocating + * process is not a "batcher", and not + * exempted by the IO scheduler + */ + return ERR_PTR(-ENOMEM); + } + } + } + /* + * bdi isn't aware of blkcg yet. As all async IOs end up + * root blkcg anyway, just use root blkcg state. + */ + if (rl == &q->root_rl) + blk_set_queue_congested(q, is_sync); + } + + /* + * Only allow batching queuers to allocate up to 50% over the defined + * limit of requests, otherwise we could have thousands of requests + * allocated with any setting of ->nr_requests + */ + if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) + return ERR_PTR(-ENOMEM); + + q->nr_rqs[is_sync]++; + rl->count[is_sync]++; + rl->starved[is_sync] = 0; + + /* + * Decide whether the new request will be managed by elevator. If + * so, mark @rw_flags and increment elvpriv. Non-zero elvpriv will + * prevent the current elevator from being destroyed until the new + * request is freed. This guarantees icq's won't be destroyed and + * makes creating new ones safe. + * + * Also, lookup icq while holding queue_lock. If it doesn't exist, + * it will be created after releasing queue_lock. + */ + if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) { + rw_flags |= REQ_ELVPRIV; + q->nr_rqs_elvpriv++; + if (et->icq_cache && ioc) + icq = ioc_lookup_icq(ioc, q); + } + + if (blk_queue_io_stat(q)) + rw_flags |= REQ_IO_STAT; + spin_unlock_irq(q->queue_lock); + + /* allocate and init request */ + rq = mempool_alloc(rl->rq_pool, gfp_mask); + if (!rq) + goto fail_alloc; + + blk_rq_init(q, rq); + blk_rq_set_rl(rq, rl); + rq->cmd_flags = rw_flags | REQ_ALLOCED; + + /* init elvpriv */ + if (rw_flags & REQ_ELVPRIV) { + if (unlikely(et->icq_cache && !icq)) { + if (ioc) + icq = ioc_create_icq(ioc, q, gfp_mask); + if (!icq) + goto fail_elvpriv; + } + + rq->elv.icq = icq; + if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) + goto fail_elvpriv; + + /* @rq->elv.icq holds io_context until @rq is freed */ + if (icq) + get_io_context(icq->ioc); + } +out: + /* + * ioc may be NULL here, and ioc_batching will be false. That's + * OK, if the queue is under the request limit then requests need + * not count toward the nr_batch_requests limit. There will always + * be some limit enforced by BLK_BATCH_TIME. + */ + if (ioc_batching(q, ioc)) + ioc->nr_batch_requests--; + + trace_block_getrq(q, bio, rw_flags & 1); + return rq; + +fail_elvpriv: + /* + * elvpriv init failed. ioc, icq and elvpriv aren't mempool backed + * and may fail indefinitely under memory pressure and thus + * shouldn't stall IO. Treat this request as !elvpriv. This will + * disturb iosched and blkcg but weird is bettern than dead. + */ + printk_ratelimited(KERN_WARNING "%s: dev %s: request aux data allocation failed, iosched may be disturbed\n", + __func__, dev_name(q->backing_dev_info.dev)); + + rq->cmd_flags &= ~REQ_ELVPRIV; + rq->elv.icq = NULL; + + spin_lock_irq(q->queue_lock); + q->nr_rqs_elvpriv--; + spin_unlock_irq(q->queue_lock); + goto out; + +fail_alloc: + /* + * Allocation failed presumably due to memory. Undo anything we + * might have messed up. + * + * Allocating task should really be put onto the front of the wait + * queue, but this is pretty rare. + */ + spin_lock_irq(q->queue_lock); + freed_request(rl, rw_flags); + + /* + * in the very unlikely event that allocation failed and no + * requests for this direction was pending, mark us starved so that + * freeing of a request in the other direction will notice + * us. another possible fix would be to split the rq mempool into + * READ and WRITE + */ +rq_starved: + if (unlikely(rl->count[is_sync] == 0)) + rl->starved[is_sync] = 1; + return ERR_PTR(-ENOMEM); +} + +/** + * get_request - get a free request + * @q: request_queue to allocate request from + * @rw_flags: RW and SYNC flags + * @bio: bio to allocate request for (can be %NULL) + * @gfp_mask: allocation mask + * + * Get a free request from @q. If %__GFP_WAIT is set in @gfp_mask, this + * function keeps retrying under memory pressure and fails iff @q is dead. + * + * Must be called with @q->queue_lock held and, + * Returns ERR_PTR on failure, with @q->queue_lock held. + * Returns request pointer on success, with @q->queue_lock *not held*. + */ +static struct request *get_request(struct request_queue *q, int rw_flags, + struct bio *bio, gfp_t gfp_mask) +{ + const bool is_sync = rw_is_sync(rw_flags) != 0; + DEFINE_WAIT(wait); + struct request_list *rl; + struct request *rq; + + rl = blk_get_rl(q, bio); /* transferred to @rq on success */ +retry: + rq = __get_request(rl, rw_flags, bio, gfp_mask); + if (!IS_ERR(rq)) + return rq; + + if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dying(q))) { + blk_put_rl(rl); + return rq; + } + + /* wait on @rl and retry */ + prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, + TASK_UNINTERRUPTIBLE); + + trace_block_sleeprq(q, bio, rw_flags & 1); + + spin_unlock_irq(q->queue_lock); + io_schedule(); + + /* + * After sleeping, we become a "batching" process and will be able + * to allocate at least one request, and up to a big batch of them + * for a small period time. See ioc_batching, ioc_set_batching + */ + ioc_set_batching(q, current->io_context); + + spin_lock_irq(q->queue_lock); + finish_wait(&rl->wait[is_sync], &wait); + + goto retry; +} + +static struct request *blk_old_get_request(struct request_queue *q, int rw, + gfp_t gfp_mask) +{ + struct request *rq; + + BUG_ON(rw != READ && rw != WRITE); + + /* create ioc upfront */ + create_io_context(gfp_mask, q->node); + + spin_lock_irq(q->queue_lock); + rq = get_request(q, rw, NULL, gfp_mask); + if (IS_ERR(rq)) + spin_unlock_irq(q->queue_lock); + /* q->queue_lock is unlocked at this point */ + + return rq; +} + +struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) +{ + if (q->mq_ops) + return blk_mq_alloc_request(q, rw, gfp_mask, false); + else + return blk_old_get_request(q, rw, gfp_mask); +} +EXPORT_SYMBOL(blk_get_request); + +/** + * blk_make_request - given a bio, allocate a corresponding struct request. + * @q: target request queue + * @bio: The bio describing the memory mappings that will be submitted for IO. + * It may be a chained-bio properly constructed by block/bio layer. + * @gfp_mask: gfp flags to be used for memory allocation + * + * blk_make_request is the parallel of generic_make_request for BLOCK_PC + * type commands. Where the struct request needs to be farther initialized by + * the caller. It is passed a &struct bio, which describes the memory info of + * the I/O transfer. + * + * The caller of blk_make_request must make sure that bi_io_vec + * are set to describe the memory buffers. That bio_data_dir() will return + * the needed direction of the request. (And all bio's in the passed bio-chain + * are properly set accordingly) + * + * If called under none-sleepable conditions, mapped bio buffers must not + * need bouncing, by calling the appropriate masked or flagged allocator, + * suitable for the target device. Otherwise the call to blk_queue_bounce will + * BUG. + * + * WARNING: When allocating/cloning a bio-chain, careful consideration should be + * given to how you allocate bios. In particular, you cannot use __GFP_WAIT for + * anything but the first bio in the chain. Otherwise you risk waiting for IO + * completion of a bio that hasn't been submitted yet, thus resulting in a + * deadlock. Alternatively bios should be allocated using bio_kmalloc() instead + * of bio_alloc(), as that avoids the mempool deadlock. + * If possible a big IO should be split into smaller parts when allocation + * fails. Partial allocation should not be an error, or you risk a live-lock. + */ +struct request *blk_make_request(struct request_queue *q, struct bio *bio, + gfp_t gfp_mask) +{ + struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask); + + if (IS_ERR(rq)) + return rq; + + blk_rq_set_block_pc(rq); + + for_each_bio(bio) { + struct bio *bounce_bio = bio; + int ret; + + blk_queue_bounce(q, &bounce_bio); + ret = blk_rq_append_bio(q, rq, bounce_bio); + if (unlikely(ret)) { + blk_put_request(rq); + return ERR_PTR(ret); + } + } + + return rq; +} +EXPORT_SYMBOL(blk_make_request); + +/** + * blk_rq_set_block_pc - initialize a request to type BLOCK_PC + * @rq: request to be initialized + * + */ +void blk_rq_set_block_pc(struct request *rq) +{ + rq->cmd_type = REQ_TYPE_BLOCK_PC; + rq->__data_len = 0; + rq->__sector = (sector_t) -1; + rq->bio = rq->biotail = NULL; + memset(rq->__cmd, 0, sizeof(rq->__cmd)); +} +EXPORT_SYMBOL(blk_rq_set_block_pc); + +/** + * blk_requeue_request - put a request back on queue + * @q: request queue where request should be inserted + * @rq: request to be inserted + * + * Description: + * Drivers often keep queueing requests until the hardware cannot accept + * more, when that condition happens we need to put the request back + * on the queue. Must be called with queue lock held. + */ +void blk_requeue_request(struct request_queue *q, struct request *rq) +{ + blk_delete_timer(rq); + blk_clear_rq_complete(rq); + trace_block_rq_requeue(q, rq); + + if (rq->cmd_flags & REQ_QUEUED) + blk_queue_end_tag(q, rq); + + BUG_ON(blk_queued_rq(rq)); + + elv_requeue_request(q, rq); +} +EXPORT_SYMBOL(blk_requeue_request); + +static void add_acct_request(struct request_queue *q, struct request *rq, + int where) +{ + blk_account_io_start(rq, true); + __elv_add_request(q, rq, where); +} + +static void part_round_stats_single(int cpu, struct hd_struct *part, + unsigned long now) +{ + int inflight; + + if (now == part->stamp) + return; + + inflight = part_in_flight(part); + if (inflight) { + __part_stat_add(cpu, part, time_in_queue, + inflight * (now - part->stamp)); + __part_stat_add(cpu, part, io_ticks, (now - part->stamp)); + } + part->stamp = now; +} + +/** + * part_round_stats() - Round off the performance stats on a struct disk_stats. + * @cpu: cpu number for stats access + * @part: target partition + * + * The average IO queue length and utilisation statistics are maintained + * by observing the current state of the queue length and the amount of + * time it has been in this state for. + * + * Normally, that accounting is done on IO completion, but that can result + * in more than a second's worth of IO being accounted for within any one + * second, leading to >100% utilisation. To deal with that, we call this + * function to do a round-off before returning the results when reading + * /proc/diskstats. This accounts immediately for all queue usage up to + * the current jiffies and restarts the counters again. + */ +void part_round_stats(int cpu, struct hd_struct *part) +{ + unsigned long now = jiffies; + + if (part->partno) + part_round_stats_single(cpu, &part_to_disk(part)->part0, now); + part_round_stats_single(cpu, part, now); +} +EXPORT_SYMBOL_GPL(part_round_stats); + +#ifdef CONFIG_PM +static void blk_pm_put_request(struct request *rq) +{ + if (rq->q->dev && !(rq->cmd_flags & REQ_PM) && !--rq->q->nr_pending) + pm_runtime_mark_last_busy(rq->q->dev); +} +#else +static inline void blk_pm_put_request(struct request *rq) {} +#endif + +/* + * queue lock must be held + */ +void __blk_put_request(struct request_queue *q, struct request *req) +{ + if (unlikely(!q)) + return; + + if (q->mq_ops) { + blk_mq_free_request(req); + return; + } + + blk_pm_put_request(req); + + elv_completed_request(q, req); + + /* this is a bio leak */ + WARN_ON(req->bio != NULL); + + /* + * Request may not have originated from ll_rw_blk. if not, + * it didn't come out of our reserved rq pools + */ + if (req->cmd_flags & REQ_ALLOCED) { + unsigned int flags = req->cmd_flags; + struct request_list *rl = blk_rq_rl(req); + + BUG_ON(!list_empty(&req->queuelist)); + BUG_ON(ELV_ON_HASH(req)); + + blk_free_request(rl, req); + freed_request(rl, flags); + blk_put_rl(rl); + } +} +EXPORT_SYMBOL_GPL(__blk_put_request); + +void blk_put_request(struct request *req) +{ + struct request_queue *q = req->q; + + if (q->mq_ops) + blk_mq_free_request(req); + else { + unsigned long flags; + + spin_lock_irqsave(q->queue_lock, flags); + __blk_put_request(q, req); + spin_unlock_irqrestore(q->queue_lock, flags); + } +} +EXPORT_SYMBOL(blk_put_request); + +/** + * blk_add_request_payload - add a payload to a request + * @rq: request to update + * @page: page backing the payload + * @len: length of the payload. + * + * This allows to later add a payload to an already submitted request by + * a block driver. The driver needs to take care of freeing the payload + * itself. + * + * Note that this is a quite horrible hack and nothing but handling of + * discard requests should ever use it. + */ +void blk_add_request_payload(struct request *rq, struct page *page, + unsigned int len) +{ + struct bio *bio = rq->bio; + + bio->bi_io_vec->bv_page = page; + bio->bi_io_vec->bv_offset = 0; + bio->bi_io_vec->bv_len = len; + + bio->bi_iter.bi_size = len; + bio->bi_vcnt = 1; + bio->bi_phys_segments = 1; + + rq->__data_len = rq->resid_len = len; + rq->nr_phys_segments = 1; +} +EXPORT_SYMBOL_GPL(blk_add_request_payload); + +bool bio_attempt_back_merge(struct request_queue *q, struct request *req, + struct bio *bio) +{ + const int ff = bio->bi_rw & REQ_FAILFAST_MASK; + + if (!ll_back_merge_fn(q, req, bio)) + return false; + + trace_block_bio_backmerge(q, req, bio); + + if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) + blk_rq_set_mixed_merge(req); + + req->biotail->bi_next = bio; + req->biotail = bio; + req->__data_len += bio->bi_iter.bi_size; + req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); + + blk_account_io_start(req, false); + return true; +} + +bool bio_attempt_front_merge(struct request_queue *q, struct request *req, + struct bio *bio) +{ + const int ff = bio->bi_rw & REQ_FAILFAST_MASK; + + if (!ll_front_merge_fn(q, req, bio)) + return false; + + trace_block_bio_frontmerge(q, req, bio); + + if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) + blk_rq_set_mixed_merge(req); + + bio->bi_next = req->bio; + req->bio = bio; + + req->__sector = bio->bi_iter.bi_sector; + req->__data_len += bio->bi_iter.bi_size; + req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); + + blk_account_io_start(req, false); + return true; +} + +/** + * blk_attempt_plug_merge - try to merge with %current's plugged list + * @q: request_queue new bio is being queued at + * @bio: new bio being queued + * @request_count: out parameter for number of traversed plugged requests + * + * Determine whether @bio being queued on @q can be merged with a request + * on %current's plugged list. Returns %true if merge was successful, + * otherwise %false. + * + * Plugging coalesces IOs from the same issuer for the same purpose without + * going through @q->queue_lock. As such it's more of an issuing mechanism + * than scheduling, and the request, while may have elvpriv data, is not + * added on the elevator at this point. In addition, we don't have + * reliable access to the elevator outside queue lock. Only check basic + * merging parameters without querying the elevator. + * + * Caller must ensure !blk_queue_nomerges(q) beforehand. + */ +bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, + unsigned int *request_count) +{ + struct blk_plug *plug; + struct request *rq; + bool ret = false; + struct list_head *plug_list; + + plug = current->plug; + if (!plug) + goto out; + *request_count = 0; + + if (q->mq_ops) + plug_list = &plug->mq_list; + else + plug_list = &plug->list; + + list_for_each_entry_reverse(rq, plug_list, queuelist) { + int el_ret; + + if (rq->q == q) + (*request_count)++; + + if (rq->q != q || !blk_rq_merge_ok(rq, bio)) + continue; + + el_ret = blk_try_merge(rq, bio); + if (el_ret == ELEVATOR_BACK_MERGE) { + ret = bio_attempt_back_merge(q, rq, bio); + if (ret) + break; + } else if (el_ret == ELEVATOR_FRONT_MERGE) { + ret = bio_attempt_front_merge(q, rq, bio); + if (ret) + break; + } + } +out: + return ret; +} + +void init_request_from_bio(struct request *req, struct bio *bio) +{ + req->cmd_type = REQ_TYPE_FS; + + req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK; + if (bio->bi_rw & REQ_RAHEAD) + req->cmd_flags |= REQ_FAILFAST_MASK; + + req->errors = 0; + req->__sector = bio->bi_iter.bi_sector; + req->ioprio = bio_prio(bio); + blk_rq_bio_prep(req->q, req, bio); +} + +static void blk_queue_bio(struct request_queue *q, struct bio *bio) +{ + const bool sync = !!(bio->bi_rw & REQ_SYNC); + struct blk_plug *plug; + int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT; + struct request *req; + unsigned int request_count = 0; + + /* + * low level driver can indicate that it wants pages above a + * certain limit bounced to low memory (ie for highmem, or even + * ISA dma in theory) + */ + blk_queue_bounce(q, &bio); + + if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { + bio_endio(bio, -EIO); + return; + } + + if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) { + spin_lock_irq(q->queue_lock); + where = ELEVATOR_INSERT_FLUSH; + goto get_rq; + } + + /* + * Check if we can merge with the plugged list before grabbing + * any locks. + */ + if (!blk_queue_nomerges(q) && + blk_attempt_plug_merge(q, bio, &request_count)) + return; + + spin_lock_irq(q->queue_lock); + + el_ret = elv_merge(q, &req, bio); + if (el_ret == ELEVATOR_BACK_MERGE) { + if (bio_attempt_back_merge(q, req, bio)) { + elv_bio_merged(q, req, bio); + if (!attempt_back_merge(q, req)) + elv_merged_request(q, req, el_ret); + goto out_unlock; + } + } else if (el_ret == ELEVATOR_FRONT_MERGE) { + if (bio_attempt_front_merge(q, req, bio)) { + elv_bio_merged(q, req, bio); + if (!attempt_front_merge(q, req)) + elv_merged_request(q, req, el_ret); + goto out_unlock; + } + } + +get_rq: + /* + * This sync check and mask will be re-done in init_request_from_bio(), + * but we need to set it earlier to expose the sync flag to the + * rq allocator and io schedulers. + */ + rw_flags = bio_data_dir(bio); + if (sync) + rw_flags |= REQ_SYNC; + + /* + * Grab a free request. This is might sleep but can not fail. + * Returns with the queue unlocked. + */ + req = get_request(q, rw_flags, bio, GFP_NOIO); + if (IS_ERR(req)) { + bio_endio(bio, PTR_ERR(req)); /* @q is dead */ + goto out_unlock; + } + + /* + * After dropping the lock and possibly sleeping here, our request + * may now be mergeable after it had proven unmergeable (above). + * We don't worry about that case for efficiency. It won't happen + * often, and the elevators are able to handle it. + */ + init_request_from_bio(req, bio); + + if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags)) + req->cpu = raw_smp_processor_id(); + + plug = current->plug; + if (plug) { + /* + * If this is the first request added after a plug, fire + * of a plug trace. + */ + if (!request_count) + trace_block_plug(q); + else { + if (request_count >= BLK_MAX_REQUEST_COUNT) { + blk_flush_plug_list(plug, false); + trace_block_plug(q); + } + } + list_add_tail(&req->queuelist, &plug->list); + blk_account_io_start(req, true); + } else { + spin_lock_irq(q->queue_lock); + add_acct_request(q, req, where); + __blk_run_queue(q); +out_unlock: + spin_unlock_irq(q->queue_lock); + } +} + +/* + * If bio->bi_dev is a partition, remap the location + */ +static inline void blk_partition_remap(struct bio *bio) +{ + struct block_device *bdev = bio->bi_bdev; + + if (bio_sectors(bio) && bdev != bdev->bd_contains) { + struct hd_struct *p = bdev->bd_part; + + bio->bi_iter.bi_sector += p->start_sect; + bio->bi_bdev = bdev->bd_contains; + + trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio, + bdev->bd_dev, + bio->bi_iter.bi_sector - p->start_sect); + } +} + +static void handle_bad_sector(struct bio *bio) +{ + char b[BDEVNAME_SIZE]; + + printk(KERN_INFO "attempt to access beyond end of device\n"); + printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n", + bdevname(bio->bi_bdev, b), + bio->bi_rw, + (unsigned long long)bio_end_sector(bio), + (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9)); + + set_bit(BIO_EOF, &bio->bi_flags); +} + +#ifdef CONFIG_FAIL_MAKE_REQUEST + +static DECLARE_FAULT_ATTR(fail_make_request); + +static int __init setup_fail_make_request(char *str) +{ + return setup_fault_attr(&fail_make_request, str); +} +__setup("fail_make_request=", setup_fail_make_request); + +static bool should_fail_request(struct hd_struct *part, unsigned int bytes) +{ + return part->make_it_fail && should_fail(&fail_make_request, bytes); +} + +static int __init fail_make_request_debugfs(void) +{ + struct dentry *dir = fault_create_debugfs_attr("fail_make_request", + NULL, &fail_make_request); + + return PTR_ERR_OR_ZERO(dir); +} + +late_initcall(fail_make_request_debugfs); + +#else /* CONFIG_FAIL_MAKE_REQUEST */ + +static inline bool should_fail_request(struct hd_struct *part, + unsigned int bytes) +{ + return false; +} + +#endif /* CONFIG_FAIL_MAKE_REQUEST */ + +/* + * Check whether this bio extends beyond the end of the device. + */ +static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors) +{ + sector_t maxsector; + + if (!nr_sectors) + return 0; + + /* Test device or partition size, when known. */ + maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9; + if (maxsector) { + sector_t sector = bio->bi_iter.bi_sector; + + if (maxsector < nr_sectors || maxsector - nr_sectors < sector) { + /* + * This may well happen - the kernel calls bread() + * without checking the size of the device, e.g., when + * mounting a device. + */ + handle_bad_sector(bio); + return 1; + } + } + + return 0; +} + +static noinline_for_stack bool +generic_make_request_checks(struct bio *bio) +{ + struct request_queue *q; + int nr_sectors = bio_sectors(bio); + int err = -EIO; + char b[BDEVNAME_SIZE]; + struct hd_struct *part; + + might_sleep(); + + if (bio_check_eod(bio, nr_sectors)) + goto end_io; + + q = bdev_get_queue(bio->bi_bdev); + if (unlikely(!q)) { + printk(KERN_ERR + "generic_make_request: Trying to access " + "nonexistent block-device %s (%Lu)\n", + bdevname(bio->bi_bdev, b), + (long long) bio->bi_iter.bi_sector); + goto end_io; + } + + if (likely(bio_is_rw(bio) && + nr_sectors > queue_max_hw_sectors(q))) { + printk(KERN_ERR "bio too big device %s (%u > %u)\n", + bdevname(bio->bi_bdev, b), + bio_sectors(bio), + queue_max_hw_sectors(q)); + goto end_io; + } + + part = bio->bi_bdev->bd_part; + if (should_fail_request(part, bio->bi_iter.bi_size) || + should_fail_request(&part_to_disk(part)->part0, + bio->bi_iter.bi_size)) + goto end_io; + + /* + * If this device has partitions, remap block n + * of partition p to block n+start(p) of the disk. + */ + blk_partition_remap(bio); + + if (bio_check_eod(bio, nr_sectors)) + goto end_io; + + /* + * Filter flush bio's early so that make_request based + * drivers without flush support don't have to worry + * about them. + */ + if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) { + bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA); + if (!nr_sectors) { + err = 0; + goto end_io; + } + } + + if ((bio->bi_rw & REQ_DISCARD) && + (!blk_queue_discard(q) || + ((bio->bi_rw & REQ_SECURE) && !blk_queue_secdiscard(q)))) { + err = -EOPNOTSUPP; + goto end_io; + } + + if (bio->bi_rw & REQ_WRITE_SAME && !bdev_write_same(bio->bi_bdev)) { + err = -EOPNOTSUPP; + goto end_io; + } + + /* + * Various block parts want %current->io_context and lazy ioc + * allocation ends up trading a lot of pain for a small amount of + * memory. Just allocate it upfront. This may fail and block + * layer knows how to live with it. + */ + create_io_context(GFP_ATOMIC, q->node); + + if (blk_throtl_bio(q, bio)) + return false; /* throttled, will be resubmitted later */ + + trace_block_bio_queue(q, bio); + return true; + +end_io: + bio_endio(bio, err); + return false; +} + +/** + * generic_make_request - hand a buffer to its device driver for I/O + * @bio: The bio describing the location in memory and on the device. + * + * generic_make_request() is used to make I/O requests of block + * devices. It is passed a &struct bio, which describes the I/O that needs + * to be done. + * + * generic_make_request() does not return any status. The + * success/failure status of the request, along with notification of + * completion, is delivered asynchronously through the bio->bi_end_io + * function described (one day) else where. + * + * The caller of generic_make_request must make sure that bi_io_vec + * are set to describe the memory buffer, and that bi_dev and bi_sector are + * set to describe the device address, and the + * bi_end_io and optionally bi_private are set to describe how + * completion notification should be signaled. + * + * generic_make_request and the drivers it calls may use bi_next if this + * bio happens to be merged with someone else, and may resubmit the bio to + * a lower device by calling into generic_make_request recursively, which + * means the bio should NOT be touched after the call to ->make_request_fn. + */ +void generic_make_request(struct bio *bio) +{ + struct bio_list bio_list_on_stack; + + if (!generic_make_request_checks(bio)) + return; + + /* + * We only want one ->make_request_fn to be active at a time, else + * stack usage with stacked devices could be a problem. So use + * current->bio_list to keep a list of requests submited by a + * make_request_fn function. current->bio_list is also used as a + * flag to say if generic_make_request is currently active in this + * task or not. If it is NULL, then no make_request is active. If + * it is non-NULL, then a make_request is active, and new requests + * should be added at the tail + */ + if (current->bio_list) { + bio_list_add(current->bio_list, bio); + return; + } + + /* following loop may be a bit non-obvious, and so deserves some + * explanation. + * Before entering the loop, bio->bi_next is NULL (as all callers + * ensure that) so we have a list with a single bio. + * We pretend that we have just taken it off a longer list, so + * we assign bio_list to a pointer to the bio_list_on_stack, + * thus initialising the bio_list of new bios to be + * added. ->make_request() may indeed add some more bios + * through a recursive call to generic_make_request. If it + * did, we find a non-NULL value in bio_list and re-enter the loop + * from the top. In this case we really did just take the bio + * of the top of the list (no pretending) and so remove it from + * bio_list, and call into ->make_request() again. + */ + BUG_ON(bio->bi_next); + bio_list_init(&bio_list_on_stack); + current->bio_list = &bio_list_on_stack; + do { + struct request_queue *q = bdev_get_queue(bio->bi_bdev); + + q->make_request_fn(q, bio); + + bio = bio_list_pop(current->bio_list); + } while (bio); + current->bio_list = NULL; /* deactivate */ +} +EXPORT_SYMBOL(generic_make_request); + +/** + * submit_bio - submit a bio to the block device layer for I/O + * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead) + * @bio: The &struct bio which describes the I/O + * + * submit_bio() is very similar in purpose to generic_make_request(), and + * uses that function to do most of the work. Both are fairly rough + * interfaces; @bio must be presetup and ready for I/O. + * + */ +void submit_bio(int rw, struct bio *bio) +{ + bio->bi_rw |= rw; + + if (unlikely(trap_non_toi_io)) + BUG_ON(!(bio->bi_flags & BIO_TOI)); + + /* + * If it's a regular read/write or a barrier with data attached, + * go through the normal accounting stuff before submission. + */ + if (bio_has_data(bio)) { + unsigned int count; + + if (unlikely(rw & REQ_WRITE_SAME)) + count = bdev_logical_block_size(bio->bi_bdev) >> 9; + else + count = bio_sectors(bio); + + if (rw & WRITE) { + count_vm_events(PGPGOUT, count); + } else { + task_io_account_read(bio->bi_iter.bi_size); + count_vm_events(PGPGIN, count); + } + + if (unlikely(block_dump)) { + char b[BDEVNAME_SIZE]; + printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n", + current->comm, task_pid_nr(current), + (rw & WRITE) ? "WRITE" : "READ", + (unsigned long long)bio->bi_iter.bi_sector, + bdevname(bio->bi_bdev, b), + count); + } + } + + generic_make_request(bio); +} +EXPORT_SYMBOL(submit_bio); + +/** + * blk_rq_check_limits - Helper function to check a request for the queue limit + * @q: the queue + * @rq: the request being checked + * + * Description: + * @rq may have been made based on weaker limitations of upper-level queues + * in request stacking drivers, and it may violate the limitation of @q. + * Since the block layer and the underlying device driver trust @rq + * after it is inserted to @q, it should be checked against @q before + * the insertion using this generic function. + * + * This function should also be useful for request stacking drivers + * in some cases below, so export this function. + * Request stacking drivers like request-based dm may change the queue + * limits while requests are in the queue (e.g. dm's table swapping). + * Such request stacking drivers should check those requests against + * the new queue limits again when they dispatch those requests, + * although such checkings are also done against the old queue limits + * when submitting requests. + */ +int blk_rq_check_limits(struct request_queue *q, struct request *rq) +{ + if (!rq_mergeable(rq)) + return 0; + + if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) { + printk(KERN_ERR "%s: over max size limit.\n", __func__); + return -EIO; + } + + /* + * queue's settings related to segment counting like q->bounce_pfn + * may differ from that of other stacking queues. + * Recalculate it to check the request correctly on this queue's + * limitation. + */ + blk_recalc_rq_segments(rq); + if (rq->nr_phys_segments > queue_max_segments(q)) { + printk(KERN_ERR "%s: over max segments limit.\n", __func__); + return -EIO; + } + + return 0; +} +EXPORT_SYMBOL_GPL(blk_rq_check_limits); + +/** + * blk_insert_cloned_request - Helper for stacking drivers to submit a request + * @q: the queue to submit the request + * @rq: the request being queued + */ +int blk_insert_cloned_request(struct request_queue *q, struct request *rq) +{ + unsigned long flags; + int where = ELEVATOR_INSERT_BACK; + + if (blk_rq_check_limits(q, rq)) + return -EIO; + + if (rq->rq_disk && + should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq))) + return -EIO; + + if (q->mq_ops) { + if (blk_queue_io_stat(q)) + blk_account_io_start(rq, true); + blk_mq_insert_request(rq, false, true, true); + return 0; + } + + spin_lock_irqsave(q->queue_lock, flags); + if (unlikely(blk_queue_dying(q))) { + spin_unlock_irqrestore(q->queue_lock, flags); + return -ENODEV; + } + + /* + * Submitting request must be dequeued before calling this function + * because it will be linked to another request_queue + */ + BUG_ON(blk_queued_rq(rq)); + + if (rq->cmd_flags & (REQ_FLUSH|REQ_FUA)) + where = ELEVATOR_INSERT_FLUSH; + + add_acct_request(q, rq, where); + if (where == ELEVATOR_INSERT_FLUSH) + __blk_run_queue(q); + spin_unlock_irqrestore(q->queue_lock, flags); + + return 0; +} +EXPORT_SYMBOL_GPL(blk_insert_cloned_request); + +/** + * blk_rq_err_bytes - determine number of bytes till the next failure boundary + * @rq: request to examine + * + * Description: + * A request could be merge of IOs which require different failure + * handling. This function determines the number of bytes which + * can be failed from the beginning of the request without + * crossing into area which need to be retried further. + * + * Return: + * The number of bytes to fail. + * + * Context: + * queue_lock must be held. + */ +unsigned int blk_rq_err_bytes(const struct request *rq) +{ + unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; + unsigned int bytes = 0; + struct bio *bio; + + if (!(rq->cmd_flags & REQ_MIXED_MERGE)) + return blk_rq_bytes(rq); + + /* + * Currently the only 'mixing' which can happen is between + * different fastfail types. We can safely fail portions + * which have all the failfast bits that the first one has - + * the ones which are at least as eager to fail as the first + * one. + */ + for (bio = rq->bio; bio; bio = bio->bi_next) { + if ((bio->bi_rw & ff) != ff) + break; + bytes += bio->bi_iter.bi_size; + } + + /* this could lead to infinite loop */ + BUG_ON(blk_rq_bytes(rq) && !bytes); + return bytes; +} +EXPORT_SYMBOL_GPL(blk_rq_err_bytes); + +void blk_account_io_completion(struct request *req, unsigned int bytes) +{ + if (blk_do_io_stat(req)) { + const int rw = rq_data_dir(req); + struct hd_struct *part; + int cpu; + + cpu = part_stat_lock(); + part = req->part; + part_stat_add(cpu, part, sectors[rw], bytes >> 9); + part_stat_unlock(); + } +} + +void blk_account_io_done(struct request *req) +{ + /* + * Account IO completion. flush_rq isn't accounted as a + * normal IO on queueing nor completion. Accounting the + * containing request is enough. + */ + if (blk_do_io_stat(req) && !(req->cmd_flags & REQ_FLUSH_SEQ)) { + unsigned long duration = jiffies - req->start_time; + const int rw = rq_data_dir(req); + struct hd_struct *part; + int cpu; + + cpu = part_stat_lock(); + part = req->part; + + part_stat_inc(cpu, part, ios[rw]); + part_stat_add(cpu, part, ticks[rw], duration); + part_round_stats(cpu, part); + part_dec_in_flight(part, rw); + + hd_struct_put(part); + part_stat_unlock(); + } +} + +#ifdef CONFIG_PM +/* + * Don't process normal requests when queue is suspended + * or in the process of suspending/resuming + */ +static struct request *blk_pm_peek_request(struct request_queue *q, + struct request *rq) +{ + if (q->dev && (q->rpm_status == RPM_SUSPENDED || + (q->rpm_status != RPM_ACTIVE && !(rq->cmd_flags & REQ_PM)))) + return NULL; + else + return rq; +} +#else +static inline struct request *blk_pm_peek_request(struct request_queue *q, + struct request *rq) +{ + return rq; +} +#endif + +void blk_account_io_start(struct request *rq, bool new_io) +{ + struct hd_struct *part; + int rw = rq_data_dir(rq); + int cpu; + + if (!blk_do_io_stat(rq)) + return; + + cpu = part_stat_lock(); + + if (!new_io) { + part = rq->part; + part_stat_inc(cpu, part, merges[rw]); + } else { + part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq)); + if (!hd_struct_try_get(part)) { + /* + * The partition is already being removed, + * the request will be accounted on the disk only + * + * We take a reference on disk->part0 although that + * partition will never be deleted, so we can treat + * it as any other partition. + */ + part = &rq->rq_disk->part0; + hd_struct_get(part); + } + part_round_stats(cpu, part); + part_inc_in_flight(part, rw); + rq->part = part; + } + + part_stat_unlock(); +} + +/** + * blk_peek_request - peek at the top of a request queue + * @q: request queue to peek at + * + * Description: + * Return the request at the top of @q. The returned request + * should be started using blk_start_request() before LLD starts + * processing it. + * + * Return: + * Pointer to the request at the top of @q if available. Null + * otherwise. + * + * Context: + * queue_lock must be held. + */ +struct request *blk_peek_request(struct request_queue *q) +{ + struct request *rq; + int ret; + + while ((rq = __elv_next_request(q)) != NULL) { + + rq = blk_pm_peek_request(q, rq); + if (!rq) + break; + + if (!(rq->cmd_flags & REQ_STARTED)) { + /* + * This is the first time the device driver + * sees this request (possibly after + * requeueing). Notify IO scheduler. + */ + if (rq->cmd_flags & REQ_SORTED) + elv_activate_rq(q, rq); + + /* + * just mark as started even if we don't start + * it, a request that has been delayed should + * not be passed by new incoming requests + */ + rq->cmd_flags |= REQ_STARTED; + trace_block_rq_issue(q, rq); + } + + if (!q->boundary_rq || q->boundary_rq == rq) { + q->end_sector = rq_end_sector(rq); + q->boundary_rq = NULL; + } + + if (rq->cmd_flags & REQ_DONTPREP) + break; + + if (q->dma_drain_size && blk_rq_bytes(rq)) { + /* + * make sure space for the drain appears we + * know we can do this because max_hw_segments + * has been adjusted to be one fewer than the + * device can handle + */ + rq->nr_phys_segments++; + } + + if (!q->prep_rq_fn) + break; + + ret = q->prep_rq_fn(q, rq); + if (ret == BLKPREP_OK) { + break; + } else if (ret == BLKPREP_DEFER) { + /* + * the request may have been (partially) prepped. + * we need to keep this request in the front to + * avoid resource deadlock. REQ_STARTED will + * prevent other fs requests from passing this one. + */ + if (q->dma_drain_size && blk_rq_bytes(rq) && + !(rq->cmd_flags & REQ_DONTPREP)) { + /* + * remove the space for the drain we added + * so that we don't add it again + */ + --rq->nr_phys_segments; + } + + rq = NULL; + break; + } else if (ret == BLKPREP_KILL) { + rq->cmd_flags |= REQ_QUIET; + /* + * Mark this request as started so we don't trigger + * any debug logic in the end I/O path. + */ + blk_start_request(rq); + __blk_end_request_all(rq, -EIO); + } else { + printk(KERN_ERR "%s: bad return=%d\n", __func__, ret); + break; + } + } + + return rq; +} +EXPORT_SYMBOL(blk_peek_request); + +void blk_dequeue_request(struct request *rq) +{ + struct request_queue *q = rq->q; + + BUG_ON(list_empty(&rq->queuelist)); + BUG_ON(ELV_ON_HASH(rq)); + + list_del_init(&rq->queuelist); + + /* + * the time frame between a request being removed from the lists + * and to it is freed is accounted as io that is in progress at + * the driver side. + */ + if (blk_account_rq(rq)) { + q->in_flight[rq_is_sync(rq)]++; + set_io_start_time_ns(rq); + } +} + +/** + * blk_start_request - start request processing on the driver + * @req: request to dequeue + * + * Description: + * Dequeue @req and start timeout timer on it. This hands off the + * request to the driver. + * + * Block internal functions which don't want to start timer should + * call blk_dequeue_request(). + * + * Context: + * queue_lock must be held. + */ +void blk_start_request(struct request *req) +{ + blk_dequeue_request(req); + + /* + * We are now handing the request to the hardware, initialize + * resid_len to full count and add the timeout handler. + */ + req->resid_len = blk_rq_bytes(req); + if (unlikely(blk_bidi_rq(req))) + req->next_rq->resid_len = blk_rq_bytes(req->next_rq); + + BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags)); + blk_add_timer(req); +} +EXPORT_SYMBOL(blk_start_request); + +/** + * blk_fetch_request - fetch a request from a request queue + * @q: request queue to fetch a request from + * + * Description: + * Return the request at the top of @q. The request is started on + * return and LLD can start processing it immediately. + * + * Return: + * Pointer to the request at the top of @q if available. Null + * otherwise. + * + * Context: + * queue_lock must be held. + */ +struct request *blk_fetch_request(struct request_queue *q) +{ + struct request *rq; + + rq = blk_peek_request(q); + if (rq) + blk_start_request(rq); + return rq; +} +EXPORT_SYMBOL(blk_fetch_request); + +/** + * blk_update_request - Special helper function for request stacking drivers + * @req: the request being processed + * @error: %0 for success, < %0 for error + * @nr_bytes: number of bytes to complete @req + * + * Description: + * Ends I/O on a number of bytes attached to @req, but doesn't complete + * the request structure even if @req doesn't have leftover. + * If @req has leftover, sets it up for the next range of segments. + * + * This special helper function is only for request stacking drivers + * (e.g. request-based dm) so that they can handle partial completion. + * Actual device drivers should use blk_end_request instead. + * + * Passing the result of blk_rq_bytes() as @nr_bytes guarantees + * %false return from this function. + * + * Return: + * %false - this request doesn't have any more data + * %true - this request has more data + **/ +bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) +{ + int total_bytes; + + trace_block_rq_complete(req->q, req, nr_bytes); + + if (!req->bio) + return false; + + /* + * For fs requests, rq is just carrier of independent bio's + * and each partial completion should be handled separately. + * Reset per-request error on each partial completion. + * + * TODO: tj: This is too subtle. It would be better to let + * low level drivers do what they see fit. + */ + if (req->cmd_type == REQ_TYPE_FS) + req->errors = 0; + + if (error && req->cmd_type == REQ_TYPE_FS && + !(req->cmd_flags & REQ_QUIET)) { + char *error_type; + + switch (error) { + case -ENOLINK: + error_type = "recoverable transport"; + break; + case -EREMOTEIO: + error_type = "critical target"; + break; + case -EBADE: + error_type = "critical nexus"; + break; + case -ETIMEDOUT: + error_type = "timeout"; + break; + case -ENOSPC: + error_type = "critical space allocation"; + break; + case -ENODATA: + error_type = "critical medium"; + break; + case -EIO: + default: + error_type = "I/O"; + break; + } + printk_ratelimited(KERN_ERR "%s: %s error, dev %s, sector %llu\n", + __func__, error_type, req->rq_disk ? + req->rq_disk->disk_name : "?", + (unsigned long long)blk_rq_pos(req)); + + } + + blk_account_io_completion(req, nr_bytes); + + total_bytes = 0; + while (req->bio) { + struct bio *bio = req->bio; + unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes); + + if (bio_bytes == bio->bi_iter.bi_size) + req->bio = bio->bi_next; + + req_bio_endio(req, bio, bio_bytes, error); + + total_bytes += bio_bytes; + nr_bytes -= bio_bytes; + + if (!nr_bytes) + break; + } + + /* + * completely done + */ + if (!req->bio) { + /* + * Reset counters so that the request stacking driver + * can find how many bytes remain in the request + * later. + */ + req->__data_len = 0; + return false; + } + + req->__data_len -= total_bytes; + + /* update sector only for requests with clear definition of sector */ + if (req->cmd_type == REQ_TYPE_FS) + req->__sector += total_bytes >> 9; + + /* mixed attributes always follow the first bio */ + if (req->cmd_flags & REQ_MIXED_MERGE) { + req->cmd_flags &= ~REQ_FAILFAST_MASK; + req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK; + } + + /* + * If total number of sectors is less than the first segment + * size, something has gone terribly wrong. + */ + if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { + blk_dump_rq_flags(req, "request botched"); + req->__data_len = blk_rq_cur_bytes(req); + } + + /* recalculate the number of segments */ + blk_recalc_rq_segments(req); + + return true; +} +EXPORT_SYMBOL_GPL(blk_update_request); + +static bool blk_update_bidi_request(struct request *rq, int error, + unsigned int nr_bytes, + unsigned int bidi_bytes) +{ + if (blk_update_request(rq, error, nr_bytes)) + return true; + + /* Bidi request must be completed as a whole */ + if (unlikely(blk_bidi_rq(rq)) && + blk_update_request(rq->next_rq, error, bidi_bytes)) + return true; + + if (blk_queue_add_random(rq->q)) + add_disk_randomness(rq->rq_disk); + + return false; +} + +/** + * blk_unprep_request - unprepare a request + * @req: the request + * + * This function makes a request ready for complete resubmission (or + * completion). It happens only after all error handling is complete, + * so represents the appropriate moment to deallocate any resources + * that were allocated to the request in the prep_rq_fn. The queue + * lock is held when calling this. + */ +void blk_unprep_request(struct request *req) +{ + struct request_queue *q = req->q; + + req->cmd_flags &= ~REQ_DONTPREP; + if (q->unprep_rq_fn) + q->unprep_rq_fn(q, req); +} +EXPORT_SYMBOL_GPL(blk_unprep_request); + +/* + * queue lock must be held + */ +void blk_finish_request(struct request *req, int error) +{ + if (req->cmd_flags & REQ_QUEUED) + blk_queue_end_tag(req->q, req); + + BUG_ON(blk_queued_rq(req)); + + if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS) + laptop_io_completion(&req->q->backing_dev_info); + + blk_delete_timer(req); + + if (req->cmd_flags & REQ_DONTPREP) + blk_unprep_request(req); + + blk_account_io_done(req); + + if (req->end_io) + req->end_io(req, error); + else { + if (blk_bidi_rq(req)) + __blk_put_request(req->next_rq->q, req->next_rq); + + __blk_put_request(req->q, req); + } +} +EXPORT_SYMBOL(blk_finish_request); + +/** + * blk_end_bidi_request - Complete a bidi request + * @rq: the request to complete + * @error: %0 for success, < %0 for error + * @nr_bytes: number of bytes to complete @rq + * @bidi_bytes: number of bytes to complete @rq->next_rq + * + * Description: + * Ends I/O on a number of bytes attached to @rq and @rq->next_rq. + * Drivers that supports bidi can safely call this member for any + * type of request, bidi or uni. In the later case @bidi_bytes is + * just ignored. + * + * Return: + * %false - we are done with this request + * %true - still buffers pending for this request + **/ +static bool blk_end_bidi_request(struct request *rq, int error, + unsigned int nr_bytes, unsigned int bidi_bytes) +{ + struct request_queue *q = rq->q; + unsigned long flags; + + if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) + return true; + + spin_lock_irqsave(q->queue_lock, flags); + blk_finish_request(rq, error); + spin_unlock_irqrestore(q->queue_lock, flags); + + return false; +} + +/** + * __blk_end_bidi_request - Complete a bidi request with queue lock held + * @rq: the request to complete + * @error: %0 for success, < %0 for error + * @nr_bytes: number of bytes to complete @rq + * @bidi_bytes: number of bytes to complete @rq->next_rq + * + * Description: + * Identical to blk_end_bidi_request() except that queue lock is + * assumed to be locked on entry and remains so on return. + * + * Return: + * %false - we are done with this request + * %true - still buffers pending for this request + **/ +bool __blk_end_bidi_request(struct request *rq, int error, + unsigned int nr_bytes, unsigned int bidi_bytes) +{ + if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) + return true; + + blk_finish_request(rq, error); + + return false; +} + +/** + * blk_end_request - Helper function for drivers to complete the request. + * @rq: the request being processed + * @error: %0 for success, < %0 for error + * @nr_bytes: number of bytes to complete + * + * Description: + * Ends I/O on a number of bytes attached to @rq. + * If @rq has leftover, sets it up for the next range of segments. + * + * Return: + * %false - we are done with this request + * %true - still buffers pending for this request + **/ +bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes) +{ + return blk_end_bidi_request(rq, error, nr_bytes, 0); +} +EXPORT_SYMBOL(blk_end_request); + +/** + * blk_end_request_all - Helper function for drives to finish the request. + * @rq: the request to finish + * @error: %0 for success, < %0 for error + * + * Description: + * Completely finish @rq. + */ +void blk_end_request_all(struct request *rq, int error) +{ + bool pending; + unsigned int bidi_bytes = 0; + + if (unlikely(blk_bidi_rq(rq))) + bidi_bytes = blk_rq_bytes(rq->next_rq); + + pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); + BUG_ON(pending); +} +EXPORT_SYMBOL(blk_end_request_all); + +/** + * blk_end_request_cur - Helper function to finish the current request chunk. + * @rq: the request to finish the current chunk for + * @error: %0 for success, < %0 for error + * + * Description: + * Complete the current consecutively mapped chunk from @rq. + * + * Return: + * %false - we are done with this request + * %true - still buffers pending for this request + */ +bool blk_end_request_cur(struct request *rq, int error) +{ + return blk_end_request(rq, error, blk_rq_cur_bytes(rq)); +} +EXPORT_SYMBOL(blk_end_request_cur); + +/** + * blk_end_request_err - Finish a request till the next failure boundary. + * @rq: the request to finish till the next failure boundary for + * @error: must be negative errno + * + * Description: + * Complete @rq till the next failure boundary. + * + * Return: + * %false - we are done with this request + * %true - still buffers pending for this request + */ +bool blk_end_request_err(struct request *rq, int error) +{ + WARN_ON(error >= 0); + return blk_end_request(rq, error, blk_rq_err_bytes(rq)); +} +EXPORT_SYMBOL_GPL(blk_end_request_err); + +/** + * __blk_end_request - Helper function for drivers to complete the request. + * @rq: the request being processed + * @error: %0 for success, < %0 for error + * @nr_bytes: number of bytes to complete + * + * Description: + * Must be called with queue lock held unlike blk_end_request(). + * + * Return: + * %false - we are done with this request + * %true - still buffers pending for this request + **/ +bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes) +{ + return __blk_end_bidi_request(rq, error, nr_bytes, 0); +} +EXPORT_SYMBOL(__blk_end_request); + +/** + * __blk_end_request_all - Helper function for drives to finish the request. + * @rq: the request to finish + * @error: %0 for success, < %0 for error + * + * Description: + * Completely finish @rq. Must be called with queue lock held. + */ +void __blk_end_request_all(struct request *rq, int error) +{ + bool pending; + unsigned int bidi_bytes = 0; + + if (unlikely(blk_bidi_rq(rq))) + bidi_bytes = blk_rq_bytes(rq->next_rq); + + pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); + BUG_ON(pending); +} +EXPORT_SYMBOL(__blk_end_request_all); + +/** + * __blk_end_request_cur - Helper function to finish the current request chunk. + * @rq: the request to finish the current chunk for + * @error: %0 for success, < %0 for error + * + * Description: + * Complete the current consecutively mapped chunk from @rq. Must + * be called with queue lock held. + * + * Return: + * %false - we are done with this request + * %true - still buffers pending for this request + */ +bool __blk_end_request_cur(struct request *rq, int error) +{ + return __blk_end_request(rq, error, blk_rq_cur_bytes(rq)); +} +EXPORT_SYMBOL(__blk_end_request_cur); + +/** + * __blk_end_request_err - Finish a request till the next failure boundary. + * @rq: the request to finish till the next failure boundary for + * @error: must be negative errno + * + * Description: + * Complete @rq till the next failure boundary. Must be called + * with queue lock held. + * + * Return: + * %false - we are done with this request + * %true - still buffers pending for this request + */ +bool __blk_end_request_err(struct request *rq, int error) +{ + WARN_ON(error >= 0); + return __blk_end_request(rq, error, blk_rq_err_bytes(rq)); +} +EXPORT_SYMBOL_GPL(__blk_end_request_err); + +void blk_rq_bio_prep(struct request_queue *q, struct request *rq, + struct bio *bio) +{ + /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */ + rq->cmd_flags |= bio->bi_rw & REQ_WRITE; + + if (bio_has_data(bio)) + rq->nr_phys_segments = bio_phys_segments(q, bio); + + rq->__data_len = bio->bi_iter.bi_size; + rq->bio = rq->biotail = bio; + + if (bio->bi_bdev) + rq->rq_disk = bio->bi_bdev->bd_disk; +} + +#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE +/** + * rq_flush_dcache_pages - Helper function to flush all pages in a request + * @rq: the request to be flushed + * + * Description: + * Flush all pages in @rq. + */ +void rq_flush_dcache_pages(struct request *rq) +{ + struct req_iterator iter; + struct bio_vec bvec; + + rq_for_each_segment(bvec, rq, iter) + flush_dcache_page(bvec.bv_page); +} +EXPORT_SYMBOL_GPL(rq_flush_dcache_pages); +#endif + +/** + * blk_lld_busy - Check if underlying low-level drivers of a device are busy + * @q : the queue of the device being checked + * + * Description: + * Check if underlying low-level drivers of a device are busy. + * If the drivers want to export their busy state, they must set own + * exporting function using blk_queue_lld_busy() first. + * + * Basically, this function is used only by request stacking drivers + * to stop dispatching requests to underlying devices when underlying + * devices are busy. This behavior helps more I/O merging on the queue + * of the request stacking driver and prevents I/O throughput regression + * on burst I/O load. + * + * Return: + * 0 - Not busy (The request stacking driver should dispatch request) + * 1 - Busy (The request stacking driver should stop dispatching request) + */ +int blk_lld_busy(struct request_queue *q) +{ + if (q->lld_busy_fn) + return q->lld_busy_fn(q); + + return 0; +} +EXPORT_SYMBOL_GPL(blk_lld_busy); + +/** + * blk_rq_unprep_clone - Helper function to free all bios in a cloned request + * @rq: the clone request to be cleaned up + * + * Description: + * Free all bios in @rq for a cloned request. + */ +void blk_rq_unprep_clone(struct request *rq) +{ + struct bio *bio; + + while ((bio = rq->bio) != NULL) { + rq->bio = bio->bi_next; + + bio_put(bio); + } +} +EXPORT_SYMBOL_GPL(blk_rq_unprep_clone); + +/* + * Copy attributes of the original request to the clone request. + * The actual data parts (e.g. ->cmd, ->sense) are not copied. + */ +static void __blk_rq_prep_clone(struct request *dst, struct request *src) +{ + dst->cpu = src->cpu; + dst->cmd_flags |= (src->cmd_flags & REQ_CLONE_MASK) | REQ_NOMERGE; + dst->cmd_type = src->cmd_type; + dst->__sector = blk_rq_pos(src); + dst->__data_len = blk_rq_bytes(src); + dst->nr_phys_segments = src->nr_phys_segments; + dst->ioprio = src->ioprio; + dst->extra_len = src->extra_len; +} + +/** + * blk_rq_prep_clone - Helper function to setup clone request + * @rq: the request to be setup + * @rq_src: original request to be cloned + * @bs: bio_set that bios for clone are allocated from + * @gfp_mask: memory allocation mask for bio + * @bio_ctr: setup function to be called for each clone bio. + * Returns %0 for success, non %0 for failure. + * @data: private data to be passed to @bio_ctr + * + * Description: + * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq. + * The actual data parts of @rq_src (e.g. ->cmd, ->sense) + * are not copied, and copying such parts is the caller's responsibility. + * Also, pages which the original bios are pointing to are not copied + * and the cloned bios just point same pages. + * So cloned bios must be completed before original bios, which means + * the caller must complete @rq before @rq_src. + */ +int blk_rq_prep_clone(struct request *rq, struct request *rq_src, + struct bio_set *bs, gfp_t gfp_mask, + int (*bio_ctr)(struct bio *, struct bio *, void *), + void *data) +{ + struct bio *bio, *bio_src; + + if (!bs) + bs = fs_bio_set; + + __rq_for_each_bio(bio_src, rq_src) { + bio = bio_clone_fast(bio_src, gfp_mask, bs); + if (!bio) + goto free_and_out; + + if (bio_ctr && bio_ctr(bio, bio_src, data)) + goto free_and_out; + + if (rq->bio) { + rq->biotail->bi_next = bio; + rq->biotail = bio; + } else + rq->bio = rq->biotail = bio; + } + + __blk_rq_prep_clone(rq, rq_src); + + return 0; + +free_and_out: + if (bio) + bio_put(bio); + blk_rq_unprep_clone(rq); + + return -ENOMEM; +} +EXPORT_SYMBOL_GPL(blk_rq_prep_clone); + +int kblockd_schedule_work(struct work_struct *work) +{ + return queue_work(kblockd_workqueue, work); +} +EXPORT_SYMBOL(kblockd_schedule_work); + +int kblockd_schedule_delayed_work(struct delayed_work *dwork, + unsigned long delay) +{ + return queue_delayed_work(kblockd_workqueue, dwork, delay); +} +EXPORT_SYMBOL(kblockd_schedule_delayed_work); + +int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, + unsigned long delay) +{ + return queue_delayed_work_on(cpu, kblockd_workqueue, dwork, delay); +} +EXPORT_SYMBOL(kblockd_schedule_delayed_work_on); + +/** + * blk_start_plug - initialize blk_plug and track it inside the task_struct + * @plug: The &struct blk_plug that needs to be initialized + * + * Description: + * Tracking blk_plug inside the task_struct will help with auto-flushing the + * pending I/O should the task end up blocking between blk_start_plug() and + * blk_finish_plug(). This is important from a performance perspective, but + * also ensures that we don't deadlock. For instance, if the task is blocking + * for a memory allocation, memory reclaim could end up wanting to free a + * page belonging to that request that is currently residing in our private + * plug. By flushing the pending I/O when the process goes to sleep, we avoid + * this kind of deadlock. + */ +void blk_start_plug(struct blk_plug *plug) +{ + struct task_struct *tsk = current; + + INIT_LIST_HEAD(&plug->list); + INIT_LIST_HEAD(&plug->mq_list); + INIT_LIST_HEAD(&plug->cb_list); + + /* + * If this is a nested plug, don't actually assign it. It will be + * flushed on its own. + */ + if (!tsk->plug) { + /* + * Store ordering should not be needed here, since a potential + * preempt will imply a full memory barrier + */ + tsk->plug = plug; + } +} +EXPORT_SYMBOL(blk_start_plug); + +static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b) +{ + struct request *rqa = container_of(a, struct request, queuelist); + struct request *rqb = container_of(b, struct request, queuelist); + + return !(rqa->q < rqb->q || + (rqa->q == rqb->q && blk_rq_pos(rqa) < blk_rq_pos(rqb))); +} + +/* + * If 'from_schedule' is true, then postpone the dispatch of requests + * until a safe kblockd context. We due this to avoid accidental big + * additional stack usage in driver dispatch, in places where the originally + * plugger did not intend it. + */ +static void queue_unplugged(struct request_queue *q, unsigned int depth, + bool from_schedule) + __releases(q->queue_lock) +{ + trace_block_unplug(q, depth, !from_schedule); + + if (from_schedule) + blk_run_queue_async(q); + else + __blk_run_queue(q); + spin_unlock(q->queue_lock); +} + +static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule) +{ + LIST_HEAD(callbacks); + + while (!list_empty(&plug->cb_list)) { + list_splice_init(&plug->cb_list, &callbacks); + + while (!list_empty(&callbacks)) { + struct blk_plug_cb *cb = list_first_entry(&callbacks, + struct blk_plug_cb, + list); + list_del(&cb->list); + cb->callback(cb, from_schedule); + } + } +} + +struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data, + int size) +{ + struct blk_plug *plug = current->plug; + struct blk_plug_cb *cb; + + if (!plug) + return NULL; + + list_for_each_entry(cb, &plug->cb_list, list) + if (cb->callback == unplug && cb->data == data) + return cb; + + /* Not currently on the callback list */ + BUG_ON(size < sizeof(*cb)); + cb = kzalloc(size, GFP_ATOMIC); + if (cb) { + cb->data = data; + cb->callback = unplug; + list_add(&cb->list, &plug->cb_list); + } + return cb; +} +EXPORT_SYMBOL(blk_check_plugged); + +void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) +{ + struct request_queue *q; + unsigned long flags; + struct request *rq; + LIST_HEAD(list); + unsigned int depth; + + flush_plug_callbacks(plug, from_schedule); + + if (!list_empty(&plug->mq_list)) + blk_mq_flush_plug_list(plug, from_schedule); + + if (list_empty(&plug->list)) + return; + + list_splice_init(&plug->list, &list); + + list_sort(NULL, &list, plug_rq_cmp); + + q = NULL; + depth = 0; + + /* + * Save and disable interrupts here, to avoid doing it for every + * queue lock we have to take. + */ + local_irq_save(flags); + while (!list_empty(&list)) { + rq = list_entry_rq(list.next); + list_del_init(&rq->queuelist); + BUG_ON(!rq->q); + if (rq->q != q) { + /* + * This drops the queue lock + */ + if (q) + queue_unplugged(q, depth, from_schedule); + q = rq->q; + depth = 0; + spin_lock(q->queue_lock); + } + + /* + * Short-circuit if @q is dead + */ + if (unlikely(blk_queue_dying(q))) { + __blk_end_request_all(rq, -ENODEV); + continue; + } + + /* + * rq is already accounted, so use raw insert + */ + if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) + __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH); + else + __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE); + + depth++; + } + + /* + * This drops the queue lock + */ + if (q) + queue_unplugged(q, depth, from_schedule); + + local_irq_restore(flags); +} + +void blk_finish_plug(struct blk_plug *plug) +{ + blk_flush_plug_list(plug, false); + + if (plug == current->plug) + current->plug = NULL; +} +EXPORT_SYMBOL(blk_finish_plug); + +#ifdef CONFIG_PM +/** + * blk_pm_runtime_init - Block layer runtime PM initialization routine + * @q: the queue of the device + * @dev: the device the queue belongs to + * + * Description: + * Initialize runtime-PM-related fields for @q and start auto suspend for + * @dev. Drivers that want to take advantage of request-based runtime PM + * should call this function after @dev has been initialized, and its + * request queue @q has been allocated, and runtime PM for it can not happen + * yet(either due to disabled/forbidden or its usage_count > 0). In most + * cases, driver should call this function before any I/O has taken place. + * + * This function takes care of setting up using auto suspend for the device, + * the autosuspend delay is set to -1 to make runtime suspend impossible + * until an updated value is either set by user or by driver. Drivers do + * not need to touch other autosuspend settings. + * + * The block layer runtime PM is request based, so only works for drivers + * that use request as their IO unit instead of those directly use bio's. + */ +void blk_pm_runtime_init(struct request_queue *q, struct device *dev) +{ + q->dev = dev; + q->rpm_status = RPM_ACTIVE; + pm_runtime_set_autosuspend_delay(q->dev, -1); + pm_runtime_use_autosuspend(q->dev); +} +EXPORT_SYMBOL(blk_pm_runtime_init); + +/** + * blk_pre_runtime_suspend - Pre runtime suspend check + * @q: the queue of the device + * + * Description: + * This function will check if runtime suspend is allowed for the device + * by examining if there are any requests pending in the queue. If there + * are requests pending, the device can not be runtime suspended; otherwise, + * the queue's status will be updated to SUSPENDING and the driver can + * proceed to suspend the device. + * + * For the not allowed case, we mark last busy for the device so that + * runtime PM core will try to autosuspend it some time later. + * + * This function should be called near the start of the device's + * runtime_suspend callback. + * + * Return: + * 0 - OK to runtime suspend the device + * -EBUSY - Device should not be runtime suspended + */ +int blk_pre_runtime_suspend(struct request_queue *q) +{ + int ret = 0; + + spin_lock_irq(q->queue_lock); + if (q->nr_pending) { + ret = -EBUSY; + pm_runtime_mark_last_busy(q->dev); + } else { + q->rpm_status = RPM_SUSPENDING; + } + spin_unlock_irq(q->queue_lock); + return ret; +} +EXPORT_SYMBOL(blk_pre_runtime_suspend); + +/** + * blk_post_runtime_suspend - Post runtime suspend processing + * @q: the queue of the device + * @err: return value of the device's runtime_suspend function + * + * Description: + * Update the queue's runtime status according to the return value of the + * device's runtime suspend function and mark last busy for the device so + * that PM core will try to auto suspend the device at a later time. + * + * This function should be called near the end of the device's + * runtime_suspend callback. + */ +void blk_post_runtime_suspend(struct request_queue *q, int err) +{ + spin_lock_irq(q->queue_lock); + if (!err) { + q->rpm_status = RPM_SUSPENDED; + } else { + q->rpm_status = RPM_ACTIVE; + pm_runtime_mark_last_busy(q->dev); + } + spin_unlock_irq(q->queue_lock); +} +EXPORT_SYMBOL(blk_post_runtime_suspend); + +/** + * blk_pre_runtime_resume - Pre runtime resume processing + * @q: the queue of the device + * + * Description: + * Update the queue's runtime status to RESUMING in preparation for the + * runtime resume of the device. + * + * This function should be called near the start of the device's + * runtime_resume callback. + */ +void blk_pre_runtime_resume(struct request_queue *q) +{ + spin_lock_irq(q->queue_lock); + q->rpm_status = RPM_RESUMING; + spin_unlock_irq(q->queue_lock); +} +EXPORT_SYMBOL(blk_pre_runtime_resume); + +/** + * blk_post_runtime_resume - Post runtime resume processing + * @q: the queue of the device + * @err: return value of the device's runtime_resume function + * + * Description: + * Update the queue's runtime status according to the return value of the + * device's runtime_resume function. If it is successfully resumed, process + * the requests that are queued into the device's queue when it is resuming + * and then mark last busy and initiate autosuspend for it. + * + * This function should be called near the end of the device's + * runtime_resume callback. + */ +void blk_post_runtime_resume(struct request_queue *q, int err) +{ + spin_lock_irq(q->queue_lock); + if (!err) { + q->rpm_status = RPM_ACTIVE; + __blk_run_queue(q); + pm_runtime_mark_last_busy(q->dev); + pm_request_autosuspend(q->dev); + } else { + q->rpm_status = RPM_SUSPENDED; + } + spin_unlock_irq(q->queue_lock); +} +EXPORT_SYMBOL(blk_post_runtime_resume); +#endif + +int __init blk_dev_init(void) +{ + BUILD_BUG_ON(__REQ_NR_BITS > 8 * + sizeof(((struct request *)0)->cmd_flags)); + + /* used for unplugging and affects IO latency/throughput - HIGHPRI */ + kblockd_workqueue = alloc_workqueue("kblockd", + WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); + if (!kblockd_workqueue) + panic("Failed to create kblockd\n"); + + request_cachep = kmem_cache_create("blkdev_requests", + sizeof(struct request), 0, SLAB_PANIC, NULL); + + blk_requestq_cachep = kmem_cache_create("blkdev_queue", + sizeof(struct request_queue), 0, SLAB_PANIC, NULL); + + return 0; +} diff --git a/block/blk-exec.c b/block/blk-exec.c new file mode 100644 index 000000000..9924725fa --- /dev/null +++ b/block/blk-exec.c @@ -0,0 +1,143 @@ +/* + * Functions related to setting various queue properties from drivers + */ +#include +#include +#include +#include +#include +#include + +#include "blk.h" + +/* + * for max sense size + */ +#include + +/** + * blk_end_sync_rq - executes a completion event on a request + * @rq: request to complete + * @error: end I/O status of the request + */ +static void blk_end_sync_rq(struct request *rq, int error) +{ + struct completion *waiting = rq->end_io_data; + + rq->end_io_data = NULL; + + /* + * complete last, if this is a stack request the process (and thus + * the rq pointer) could be invalid right after this complete() + */ + complete(waiting); +} + +/** + * blk_execute_rq_nowait - insert a request into queue for execution + * @q: queue to insert the request in + * @bd_disk: matching gendisk + * @rq: request to insert + * @at_head: insert request at head or tail of queue + * @done: I/O completion handler + * + * Description: + * Insert a fully prepared request at the back of the I/O scheduler queue + * for execution. Don't wait for completion. + * + * Note: + * This function will invoke @done directly if the queue is dead. + */ +void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, + struct request *rq, int at_head, + rq_end_io_fn *done) +{ + int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; + bool is_pm_resume; + + WARN_ON(irqs_disabled()); + WARN_ON(rq->cmd_type == REQ_TYPE_FS); + + rq->rq_disk = bd_disk; + rq->end_io = done; + + /* + * don't check dying flag for MQ because the request won't + * be resued after dying flag is set + */ + if (q->mq_ops) { + blk_mq_insert_request(rq, at_head, true, false); + return; + } + + /* + * need to check this before __blk_run_queue(), because rq can + * be freed before that returns. + */ + is_pm_resume = rq->cmd_type == REQ_TYPE_PM_RESUME; + + spin_lock_irq(q->queue_lock); + + if (unlikely(blk_queue_dying(q))) { + rq->cmd_flags |= REQ_QUIET; + rq->errors = -ENXIO; + __blk_end_request_all(rq, rq->errors); + spin_unlock_irq(q->queue_lock); + return; + } + + __elv_add_request(q, rq, where); + __blk_run_queue(q); + /* the queue is stopped so it won't be run */ + if (is_pm_resume) + __blk_run_queue_uncond(q); + spin_unlock_irq(q->queue_lock); +} +EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); + +/** + * blk_execute_rq - insert a request into queue for execution + * @q: queue to insert the request in + * @bd_disk: matching gendisk + * @rq: request to insert + * @at_head: insert request at head or tail of queue + * + * Description: + * Insert a fully prepared request at the back of the I/O scheduler queue + * for execution and wait for completion. + */ +int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk, + struct request *rq, int at_head) +{ + DECLARE_COMPLETION_ONSTACK(wait); + char sense[SCSI_SENSE_BUFFERSIZE]; + int err = 0; + unsigned long hang_check; + + if (!rq->sense) { + memset(sense, 0, sizeof(sense)); + rq->sense = sense; + rq->sense_len = 0; + } + + rq->end_io_data = &wait; + blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq); + + /* Prevent hang_check timer from firing at us during very long I/O */ + hang_check = sysctl_hung_task_timeout_secs; + if (hang_check) + while (!wait_for_completion_io_timeout(&wait, hang_check * (HZ/2))); + else + wait_for_completion_io(&wait); + + if (rq->errors) + err = -EIO; + + if (rq->sense == sense) { + rq->sense = NULL; + rq->sense_len = 0; + } + + return err; +} +EXPORT_SYMBOL(blk_execute_rq); diff --git a/block/blk-flush.c b/block/blk-flush.c new file mode 100644 index 000000000..20badd7b9 --- /dev/null +++ b/block/blk-flush.c @@ -0,0 +1,529 @@ +/* + * Functions to sequence FLUSH and FUA writes. + * + * Copyright (C) 2011 Max Planck Institute for Gravitational Physics + * Copyright (C) 2011 Tejun Heo + * + * This file is released under the GPLv2. + * + * REQ_{FLUSH|FUA} requests are decomposed to sequences consisted of three + * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request + * properties and hardware capability. + * + * If a request doesn't have data, only REQ_FLUSH makes sense, which + * indicates a simple flush request. If there is data, REQ_FLUSH indicates + * that the device cache should be flushed before the data is executed, and + * REQ_FUA means that the data must be on non-volatile media on request + * completion. + * + * If the device doesn't have writeback cache, FLUSH and FUA don't make any + * difference. The requests are either completed immediately if there's no + * data or executed as normal requests otherwise. + * + * If the device has writeback cache and supports FUA, REQ_FLUSH is + * translated to PREFLUSH but REQ_FUA is passed down directly with DATA. + * + * If the device has writeback cache and doesn't support FUA, REQ_FLUSH is + * translated to PREFLUSH and REQ_FUA to POSTFLUSH. + * + * The actual execution of flush is double buffered. Whenever a request + * needs to execute PRE or POSTFLUSH, it queues at + * fq->flush_queue[fq->flush_pending_idx]. Once certain criteria are met, a + * flush is issued and the pending_idx is toggled. When the flush + * completes, all the requests which were pending are proceeded to the next + * step. This allows arbitrary merging of different types of FLUSH/FUA + * requests. + * + * Currently, the following conditions are used to determine when to issue + * flush. + * + * C1. At any given time, only one flush shall be in progress. This makes + * double buffering sufficient. + * + * C2. Flush is deferred if any request is executing DATA of its sequence. + * This avoids issuing separate POSTFLUSHes for requests which shared + * PREFLUSH. + * + * C3. The second condition is ignored if there is a request which has + * waited longer than FLUSH_PENDING_TIMEOUT. This is to avoid + * starvation in the unlikely case where there are continuous stream of + * FUA (without FLUSH) requests. + * + * For devices which support FUA, it isn't clear whether C2 (and thus C3) + * is beneficial. + * + * Note that a sequenced FLUSH/FUA request with DATA is completed twice. + * Once while executing DATA and again after the whole sequence is + * complete. The first completion updates the contained bio but doesn't + * finish it so that the bio submitter is notified only after the whole + * sequence is complete. This is implemented by testing REQ_FLUSH_SEQ in + * req_bio_endio(). + * + * The above peculiarity requires that each FLUSH/FUA request has only one + * bio attached to it, which is guaranteed as they aren't allowed to be + * merged in the usual way. + */ + +#include +#include +#include +#include +#include +#include + +#include "blk.h" +#include "blk-mq.h" + +/* FLUSH/FUA sequences */ +enum { + REQ_FSEQ_PREFLUSH = (1 << 0), /* pre-flushing in progress */ + REQ_FSEQ_DATA = (1 << 1), /* data write in progress */ + REQ_FSEQ_POSTFLUSH = (1 << 2), /* post-flushing in progress */ + REQ_FSEQ_DONE = (1 << 3), + + REQ_FSEQ_ACTIONS = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA | + REQ_FSEQ_POSTFLUSH, + + /* + * If flush has been pending longer than the following timeout, + * it's issued even if flush_data requests are still in flight. + */ + FLUSH_PENDING_TIMEOUT = 5 * HZ, +}; + +static bool blk_kick_flush(struct request_queue *q, + struct blk_flush_queue *fq); + +static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq) +{ + unsigned int policy = 0; + + if (blk_rq_sectors(rq)) + policy |= REQ_FSEQ_DATA; + + if (fflags & REQ_FLUSH) { + if (rq->cmd_flags & REQ_FLUSH) + policy |= REQ_FSEQ_PREFLUSH; + if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA)) + policy |= REQ_FSEQ_POSTFLUSH; + } + return policy; +} + +static unsigned int blk_flush_cur_seq(struct request *rq) +{ + return 1 << ffz(rq->flush.seq); +} + +static void blk_flush_restore_request(struct request *rq) +{ + /* + * After flush data completion, @rq->bio is %NULL but we need to + * complete the bio again. @rq->biotail is guaranteed to equal the + * original @rq->bio. Restore it. + */ + rq->bio = rq->biotail; + + /* make @rq a normal request */ + rq->cmd_flags &= ~REQ_FLUSH_SEQ; + rq->end_io = rq->flush.saved_end_io; +} + +static bool blk_flush_queue_rq(struct request *rq, bool add_front) +{ + if (rq->q->mq_ops) { + struct request_queue *q = rq->q; + + blk_mq_add_to_requeue_list(rq, add_front); + blk_mq_kick_requeue_list(q); + return false; + } else { + if (add_front) + list_add(&rq->queuelist, &rq->q->queue_head); + else + list_add_tail(&rq->queuelist, &rq->q->queue_head); + return true; + } +} + +/** + * blk_flush_complete_seq - complete flush sequence + * @rq: FLUSH/FUA request being sequenced + * @fq: flush queue + * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero) + * @error: whether an error occurred + * + * @rq just completed @seq part of its flush sequence, record the + * completion and trigger the next step. + * + * CONTEXT: + * spin_lock_irq(q->queue_lock or fq->mq_flush_lock) + * + * RETURNS: + * %true if requests were added to the dispatch queue, %false otherwise. + */ +static bool blk_flush_complete_seq(struct request *rq, + struct blk_flush_queue *fq, + unsigned int seq, int error) +{ + struct request_queue *q = rq->q; + struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; + bool queued = false, kicked; + + BUG_ON(rq->flush.seq & seq); + rq->flush.seq |= seq; + + if (likely(!error)) + seq = blk_flush_cur_seq(rq); + else + seq = REQ_FSEQ_DONE; + + switch (seq) { + case REQ_FSEQ_PREFLUSH: + case REQ_FSEQ_POSTFLUSH: + /* queue for flush */ + if (list_empty(pending)) + fq->flush_pending_since = jiffies; + list_move_tail(&rq->flush.list, pending); + break; + + case REQ_FSEQ_DATA: + list_move_tail(&rq->flush.list, &fq->flush_data_in_flight); + queued = blk_flush_queue_rq(rq, true); + break; + + case REQ_FSEQ_DONE: + /* + * @rq was previously adjusted by blk_flush_issue() for + * flush sequencing and may already have gone through the + * flush data request completion path. Restore @rq for + * normal completion and end it. + */ + BUG_ON(!list_empty(&rq->queuelist)); + list_del_init(&rq->flush.list); + blk_flush_restore_request(rq); + if (q->mq_ops) + blk_mq_end_request(rq, error); + else + __blk_end_request_all(rq, error); + break; + + default: + BUG(); + } + + kicked = blk_kick_flush(q, fq); + return kicked | queued; +} + +static void flush_end_io(struct request *flush_rq, int error) +{ + struct request_queue *q = flush_rq->q; + struct list_head *running; + bool queued = false; + struct request *rq, *n; + unsigned long flags = 0; + struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx); + + if (q->mq_ops) { + spin_lock_irqsave(&fq->mq_flush_lock, flags); + flush_rq->tag = -1; + } + + running = &fq->flush_queue[fq->flush_running_idx]; + BUG_ON(fq->flush_pending_idx == fq->flush_running_idx); + + /* account completion of the flush request */ + fq->flush_running_idx ^= 1; + + if (!q->mq_ops) + elv_completed_request(q, flush_rq); + + /* and push the waiting requests to the next stage */ + list_for_each_entry_safe(rq, n, running, flush.list) { + unsigned int seq = blk_flush_cur_seq(rq); + + BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH); + queued |= blk_flush_complete_seq(rq, fq, seq, error); + } + + /* + * Kick the queue to avoid stall for two cases: + * 1. Moving a request silently to empty queue_head may stall the + * queue. + * 2. When flush request is running in non-queueable queue, the + * queue is hold. Restart the queue after flush request is finished + * to avoid stall. + * This function is called from request completion path and calling + * directly into request_fn may confuse the driver. Always use + * kblockd. + */ + if (queued || fq->flush_queue_delayed) { + WARN_ON(q->mq_ops); + blk_run_queue_async(q); + } + fq->flush_queue_delayed = 0; + if (q->mq_ops) + spin_unlock_irqrestore(&fq->mq_flush_lock, flags); +} + +/** + * blk_kick_flush - consider issuing flush request + * @q: request_queue being kicked + * @fq: flush queue + * + * Flush related states of @q have changed, consider issuing flush request. + * Please read the comment at the top of this file for more info. + * + * CONTEXT: + * spin_lock_irq(q->queue_lock or fq->mq_flush_lock) + * + * RETURNS: + * %true if flush was issued, %false otherwise. + */ +static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq) +{ + struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; + struct request *first_rq = + list_first_entry(pending, struct request, flush.list); + struct request *flush_rq = fq->flush_rq; + + /* C1 described at the top of this file */ + if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending)) + return false; + + /* C2 and C3 */ + if (!list_empty(&fq->flush_data_in_flight) && + time_before(jiffies, + fq->flush_pending_since + FLUSH_PENDING_TIMEOUT)) + return false; + + /* + * Issue flush and toggle pending_idx. This makes pending_idx + * different from running_idx, which means flush is in flight. + */ + fq->flush_pending_idx ^= 1; + + blk_rq_init(q, flush_rq); + + /* + * Borrow tag from the first request since they can't + * be in flight at the same time. + */ + if (q->mq_ops) { + flush_rq->mq_ctx = first_rq->mq_ctx; + flush_rq->tag = first_rq->tag; + } + + flush_rq->cmd_type = REQ_TYPE_FS; + flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ; + flush_rq->rq_disk = first_rq->rq_disk; + flush_rq->end_io = flush_end_io; + + return blk_flush_queue_rq(flush_rq, false); +} + +static void flush_data_end_io(struct request *rq, int error) +{ + struct request_queue *q = rq->q; + struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL); + + /* + * After populating an empty queue, kick it to avoid stall. Read + * the comment in flush_end_io(). + */ + if (blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error)) + blk_run_queue_async(q); +} + +static void mq_flush_data_end_io(struct request *rq, int error) +{ + struct request_queue *q = rq->q; + struct blk_mq_hw_ctx *hctx; + struct blk_mq_ctx *ctx = rq->mq_ctx; + unsigned long flags; + struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx); + + hctx = q->mq_ops->map_queue(q, ctx->cpu); + + /* + * After populating an empty queue, kick it to avoid stall. Read + * the comment in flush_end_io(). + */ + spin_lock_irqsave(&fq->mq_flush_lock, flags); + if (blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error)) + blk_mq_run_hw_queue(hctx, true); + spin_unlock_irqrestore(&fq->mq_flush_lock, flags); +} + +/** + * blk_insert_flush - insert a new FLUSH/FUA request + * @rq: request to insert + * + * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions. + * or __blk_mq_run_hw_queue() to dispatch request. + * @rq is being submitted. Analyze what needs to be done and put it on the + * right queue. + * + * CONTEXT: + * spin_lock_irq(q->queue_lock) in !mq case + */ +void blk_insert_flush(struct request *rq) +{ + struct request_queue *q = rq->q; + unsigned int fflags = q->flush_flags; /* may change, cache */ + unsigned int policy = blk_flush_policy(fflags, rq); + struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx); + + /* + * @policy now records what operations need to be done. Adjust + * REQ_FLUSH and FUA for the driver. + */ + rq->cmd_flags &= ~REQ_FLUSH; + if (!(fflags & REQ_FUA)) + rq->cmd_flags &= ~REQ_FUA; + + /* + * An empty flush handed down from a stacking driver may + * translate into nothing if the underlying device does not + * advertise a write-back cache. In this case, simply + * complete the request. + */ + if (!policy) { + if (q->mq_ops) + blk_mq_end_request(rq, 0); + else + __blk_end_bidi_request(rq, 0, 0, 0); + return; + } + + BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */ + + /* + * If there's data but flush is not necessary, the request can be + * processed directly without going through flush machinery. Queue + * for normal execution. + */ + if ((policy & REQ_FSEQ_DATA) && + !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { + if (q->mq_ops) { + blk_mq_insert_request(rq, false, false, true); + } else + list_add_tail(&rq->queuelist, &q->queue_head); + return; + } + + /* + * @rq should go through flush machinery. Mark it part of flush + * sequence and submit for further processing. + */ + memset(&rq->flush, 0, sizeof(rq->flush)); + INIT_LIST_HEAD(&rq->flush.list); + rq->cmd_flags |= REQ_FLUSH_SEQ; + rq->flush.saved_end_io = rq->end_io; /* Usually NULL */ + if (q->mq_ops) { + rq->end_io = mq_flush_data_end_io; + + spin_lock_irq(&fq->mq_flush_lock); + blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0); + spin_unlock_irq(&fq->mq_flush_lock); + return; + } + rq->end_io = flush_data_end_io; + + blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0); +} + +/** + * blkdev_issue_flush - queue a flush + * @bdev: blockdev to issue flush for + * @gfp_mask: memory allocation flags (for bio_alloc) + * @error_sector: error sector + * + * Description: + * Issue a flush for the block device in question. Caller can supply + * room for storing the error offset in case of a flush error, if they + * wish to. If WAIT flag is not passed then caller may check only what + * request was pushed in some internal queue for later handling. + */ +int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, + sector_t *error_sector) +{ + struct request_queue *q; + struct bio *bio; + int ret = 0; + + if (bdev->bd_disk == NULL) + return -ENXIO; + + q = bdev_get_queue(bdev); + if (!q) + return -ENXIO; + + /* + * some block devices may not have their queue correctly set up here + * (e.g. loop device without a backing file) and so issuing a flush + * here will panic. Ensure there is a request function before issuing + * the flush. + */ + if (!q->make_request_fn) + return -ENXIO; + + bio = bio_alloc(gfp_mask, 0); + bio->bi_bdev = bdev; + + ret = submit_bio_wait(WRITE_FLUSH, bio); + + /* + * The driver must store the error location in ->bi_sector, if + * it supports it. For non-stacked drivers, this should be + * copied from blk_rq_pos(rq). + */ + if (error_sector) + *error_sector = bio->bi_iter.bi_sector; + + bio_put(bio); + return ret; +} +EXPORT_SYMBOL(blkdev_issue_flush); + +struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q, + int node, int cmd_size) +{ + struct blk_flush_queue *fq; + int rq_sz = sizeof(struct request); + + fq = kzalloc_node(sizeof(*fq), GFP_KERNEL, node); + if (!fq) + goto fail; + + if (q->mq_ops) { + spin_lock_init(&fq->mq_flush_lock); + rq_sz = round_up(rq_sz + cmd_size, cache_line_size()); + } + + fq->flush_rq = kzalloc_node(rq_sz, GFP_KERNEL, node); + if (!fq->flush_rq) + goto fail_rq; + + INIT_LIST_HEAD(&fq->flush_queue[0]); + INIT_LIST_HEAD(&fq->flush_queue[1]); + INIT_LIST_HEAD(&fq->flush_data_in_flight); + + return fq; + + fail_rq: + kfree(fq); + fail: + return NULL; +} + +void blk_free_flush_queue(struct blk_flush_queue *fq) +{ + /* bio based request queue hasn't flush queue */ + if (!fq) + return; + + kfree(fq->flush_rq); + kfree(fq); +} diff --git a/block/blk-integrity.c b/block/blk-integrity.c new file mode 100644 index 000000000..79ffb4855 --- /dev/null +++ b/block/blk-integrity.c @@ -0,0 +1,483 @@ +/* + * blk-integrity.c - Block layer data integrity extensions + * + * Copyright (C) 2007, 2008 Oracle Corporation + * Written by: Martin K. Petersen + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, + * USA. + * + */ + +#include +#include +#include +#include +#include +#include + +#include "blk.h" + +static struct kmem_cache *integrity_cachep; + +static const char *bi_unsupported_name = "unsupported"; + +/** + * blk_rq_count_integrity_sg - Count number of integrity scatterlist elements + * @q: request queue + * @bio: bio with integrity metadata attached + * + * Description: Returns the number of elements required in a + * scatterlist corresponding to the integrity metadata in a bio. + */ +int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio) +{ + struct bio_vec iv, ivprv = { NULL }; + unsigned int segments = 0; + unsigned int seg_size = 0; + struct bvec_iter iter; + int prev = 0; + + bio_for_each_integrity_vec(iv, bio, iter) { + + if (prev) { + if (!BIOVEC_PHYS_MERGEABLE(&ivprv, &iv)) + goto new_segment; + + if (!BIOVEC_SEG_BOUNDARY(q, &ivprv, &iv)) + goto new_segment; + + if (seg_size + iv.bv_len > queue_max_segment_size(q)) + goto new_segment; + + seg_size += iv.bv_len; + } else { +new_segment: + segments++; + seg_size = iv.bv_len; + } + + prev = 1; + ivprv = iv; + } + + return segments; +} +EXPORT_SYMBOL(blk_rq_count_integrity_sg); + +/** + * blk_rq_map_integrity_sg - Map integrity metadata into a scatterlist + * @q: request queue + * @bio: bio with integrity metadata attached + * @sglist: target scatterlist + * + * Description: Map the integrity vectors in request into a + * scatterlist. The scatterlist must be big enough to hold all + * elements. I.e. sized using blk_rq_count_integrity_sg(). + */ +int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio, + struct scatterlist *sglist) +{ + struct bio_vec iv, ivprv = { NULL }; + struct scatterlist *sg = NULL; + unsigned int segments = 0; + struct bvec_iter iter; + int prev = 0; + + bio_for_each_integrity_vec(iv, bio, iter) { + + if (prev) { + if (!BIOVEC_PHYS_MERGEABLE(&ivprv, &iv)) + goto new_segment; + + if (!BIOVEC_SEG_BOUNDARY(q, &ivprv, &iv)) + goto new_segment; + + if (sg->length + iv.bv_len > queue_max_segment_size(q)) + goto new_segment; + + sg->length += iv.bv_len; + } else { +new_segment: + if (!sg) + sg = sglist; + else { + sg_unmark_end(sg); + sg = sg_next(sg); + } + + sg_set_page(sg, iv.bv_page, iv.bv_len, iv.bv_offset); + segments++; + } + + prev = 1; + ivprv = iv; + } + + if (sg) + sg_mark_end(sg); + + return segments; +} +EXPORT_SYMBOL(blk_rq_map_integrity_sg); + +/** + * blk_integrity_compare - Compare integrity profile of two disks + * @gd1: Disk to compare + * @gd2: Disk to compare + * + * Description: Meta-devices like DM and MD need to verify that all + * sub-devices use the same integrity format before advertising to + * upper layers that they can send/receive integrity metadata. This + * function can be used to check whether two gendisk devices have + * compatible integrity formats. + */ +int blk_integrity_compare(struct gendisk *gd1, struct gendisk *gd2) +{ + struct blk_integrity *b1 = gd1->integrity; + struct blk_integrity *b2 = gd2->integrity; + + if (!b1 && !b2) + return 0; + + if (!b1 || !b2) + return -1; + + if (b1->interval != b2->interval) { + pr_err("%s: %s/%s protection interval %u != %u\n", + __func__, gd1->disk_name, gd2->disk_name, + b1->interval, b2->interval); + return -1; + } + + if (b1->tuple_size != b2->tuple_size) { + printk(KERN_ERR "%s: %s/%s tuple sz %u != %u\n", __func__, + gd1->disk_name, gd2->disk_name, + b1->tuple_size, b2->tuple_size); + return -1; + } + + if (b1->tag_size && b2->tag_size && (b1->tag_size != b2->tag_size)) { + printk(KERN_ERR "%s: %s/%s tag sz %u != %u\n", __func__, + gd1->disk_name, gd2->disk_name, + b1->tag_size, b2->tag_size); + return -1; + } + + if (strcmp(b1->name, b2->name)) { + printk(KERN_ERR "%s: %s/%s type %s != %s\n", __func__, + gd1->disk_name, gd2->disk_name, + b1->name, b2->name); + return -1; + } + + return 0; +} +EXPORT_SYMBOL(blk_integrity_compare); + +bool blk_integrity_merge_rq(struct request_queue *q, struct request *req, + struct request *next) +{ + if (blk_integrity_rq(req) == 0 && blk_integrity_rq(next) == 0) + return true; + + if (blk_integrity_rq(req) == 0 || blk_integrity_rq(next) == 0) + return false; + + if (bio_integrity(req->bio)->bip_flags != + bio_integrity(next->bio)->bip_flags) + return false; + + if (req->nr_integrity_segments + next->nr_integrity_segments > + q->limits.max_integrity_segments) + return false; + + return true; +} +EXPORT_SYMBOL(blk_integrity_merge_rq); + +bool blk_integrity_merge_bio(struct request_queue *q, struct request *req, + struct bio *bio) +{ + int nr_integrity_segs; + struct bio *next = bio->bi_next; + + if (blk_integrity_rq(req) == 0 && bio_integrity(bio) == NULL) + return true; + + if (blk_integrity_rq(req) == 0 || bio_integrity(bio) == NULL) + return false; + + if (bio_integrity(req->bio)->bip_flags != bio_integrity(bio)->bip_flags) + return false; + + bio->bi_next = NULL; + nr_integrity_segs = blk_rq_count_integrity_sg(q, bio); + bio->bi_next = next; + + if (req->nr_integrity_segments + nr_integrity_segs > + q->limits.max_integrity_segments) + return false; + + req->nr_integrity_segments += nr_integrity_segs; + + return true; +} +EXPORT_SYMBOL(blk_integrity_merge_bio); + +struct integrity_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct blk_integrity *, char *); + ssize_t (*store)(struct blk_integrity *, const char *, size_t); +}; + +static ssize_t integrity_attr_show(struct kobject *kobj, struct attribute *attr, + char *page) +{ + struct blk_integrity *bi = + container_of(kobj, struct blk_integrity, kobj); + struct integrity_sysfs_entry *entry = + container_of(attr, struct integrity_sysfs_entry, attr); + + return entry->show(bi, page); +} + +static ssize_t integrity_attr_store(struct kobject *kobj, + struct attribute *attr, const char *page, + size_t count) +{ + struct blk_integrity *bi = + container_of(kobj, struct blk_integrity, kobj); + struct integrity_sysfs_entry *entry = + container_of(attr, struct integrity_sysfs_entry, attr); + ssize_t ret = 0; + + if (entry->store) + ret = entry->store(bi, page, count); + + return ret; +} + +static ssize_t integrity_format_show(struct blk_integrity *bi, char *page) +{ + if (bi != NULL && bi->name != NULL) + return sprintf(page, "%s\n", bi->name); + else + return sprintf(page, "none\n"); +} + +static ssize_t integrity_tag_size_show(struct blk_integrity *bi, char *page) +{ + if (bi != NULL) + return sprintf(page, "%u\n", bi->tag_size); + else + return sprintf(page, "0\n"); +} + +static ssize_t integrity_verify_store(struct blk_integrity *bi, + const char *page, size_t count) +{ + char *p = (char *) page; + unsigned long val = simple_strtoul(p, &p, 10); + + if (val) + bi->flags |= BLK_INTEGRITY_VERIFY; + else + bi->flags &= ~BLK_INTEGRITY_VERIFY; + + return count; +} + +static ssize_t integrity_verify_show(struct blk_integrity *bi, char *page) +{ + return sprintf(page, "%d\n", (bi->flags & BLK_INTEGRITY_VERIFY) != 0); +} + +static ssize_t integrity_generate_store(struct blk_integrity *bi, + const char *page, size_t count) +{ + char *p = (char *) page; + unsigned long val = simple_strtoul(p, &p, 10); + + if (val) + bi->flags |= BLK_INTEGRITY_GENERATE; + else + bi->flags &= ~BLK_INTEGRITY_GENERATE; + + return count; +} + +static ssize_t integrity_generate_show(struct blk_integrity *bi, char *page) +{ + return sprintf(page, "%d\n", (bi->flags & BLK_INTEGRITY_GENERATE) != 0); +} + +static ssize_t integrity_device_show(struct blk_integrity *bi, char *page) +{ + return sprintf(page, "%u\n", + (bi->flags & BLK_INTEGRITY_DEVICE_CAPABLE) != 0); +} + +static struct integrity_sysfs_entry integrity_format_entry = { + .attr = { .name = "format", .mode = S_IRUGO }, + .show = integrity_format_show, +}; + +static struct integrity_sysfs_entry integrity_tag_size_entry = { + .attr = { .name = "tag_size", .mode = S_IRUGO }, + .show = integrity_tag_size_show, +}; + +static struct integrity_sysfs_entry integrity_verify_entry = { + .attr = { .name = "read_verify", .mode = S_IRUGO | S_IWUSR }, + .show = integrity_verify_show, + .store = integrity_verify_store, +}; + +static struct integrity_sysfs_entry integrity_generate_entry = { + .attr = { .name = "write_generate", .mode = S_IRUGO | S_IWUSR }, + .show = integrity_generate_show, + .store = integrity_generate_store, +}; + +static struct integrity_sysfs_entry integrity_device_entry = { + .attr = { .name = "device_is_integrity_capable", .mode = S_IRUGO }, + .show = integrity_device_show, +}; + +static struct attribute *integrity_attrs[] = { + &integrity_format_entry.attr, + &integrity_tag_size_entry.attr, + &integrity_verify_entry.attr, + &integrity_generate_entry.attr, + &integrity_device_entry.attr, + NULL, +}; + +static const struct sysfs_ops integrity_ops = { + .show = &integrity_attr_show, + .store = &integrity_attr_store, +}; + +static int __init blk_dev_integrity_init(void) +{ + integrity_cachep = kmem_cache_create("blkdev_integrity", + sizeof(struct blk_integrity), + 0, SLAB_PANIC, NULL); + return 0; +} +subsys_initcall(blk_dev_integrity_init); + +static void blk_integrity_release(struct kobject *kobj) +{ + struct blk_integrity *bi = + container_of(kobj, struct blk_integrity, kobj); + + kmem_cache_free(integrity_cachep, bi); +} + +static struct kobj_type integrity_ktype = { + .default_attrs = integrity_attrs, + .sysfs_ops = &integrity_ops, + .release = blk_integrity_release, +}; + +bool blk_integrity_is_initialized(struct gendisk *disk) +{ + struct blk_integrity *bi = blk_get_integrity(disk); + + return (bi && bi->name && strcmp(bi->name, bi_unsupported_name) != 0); +} +EXPORT_SYMBOL(blk_integrity_is_initialized); + +/** + * blk_integrity_register - Register a gendisk as being integrity-capable + * @disk: struct gendisk pointer to make integrity-aware + * @template: optional integrity profile to register + * + * Description: When a device needs to advertise itself as being able + * to send/receive integrity metadata it must use this function to + * register the capability with the block layer. The template is a + * blk_integrity struct with values appropriate for the underlying + * hardware. If template is NULL the new profile is allocated but + * not filled out. See Documentation/block/data-integrity.txt. + */ +int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template) +{ + struct blk_integrity *bi; + + BUG_ON(disk == NULL); + + if (disk->integrity == NULL) { + bi = kmem_cache_alloc(integrity_cachep, + GFP_KERNEL | __GFP_ZERO); + if (!bi) + return -1; + + if (kobject_init_and_add(&bi->kobj, &integrity_ktype, + &disk_to_dev(disk)->kobj, + "%s", "integrity")) { + kmem_cache_free(integrity_cachep, bi); + return -1; + } + + kobject_uevent(&bi->kobj, KOBJ_ADD); + + bi->flags |= BLK_INTEGRITY_VERIFY | BLK_INTEGRITY_GENERATE; + bi->interval = queue_logical_block_size(disk->queue); + disk->integrity = bi; + } else + bi = disk->integrity; + + /* Use the provided profile as template */ + if (template != NULL) { + bi->name = template->name; + bi->generate_fn = template->generate_fn; + bi->verify_fn = template->verify_fn; + bi->tuple_size = template->tuple_size; + bi->tag_size = template->tag_size; + bi->flags |= template->flags; + } else + bi->name = bi_unsupported_name; + + disk->queue->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES; + + return 0; +} +EXPORT_SYMBOL(blk_integrity_register); + +/** + * blk_integrity_unregister - Remove block integrity profile + * @disk: disk whose integrity profile to deallocate + * + * Description: This function frees all memory used by the block + * integrity profile. To be called at device teardown. + */ +void blk_integrity_unregister(struct gendisk *disk) +{ + struct blk_integrity *bi; + + if (!disk || !disk->integrity) + return; + + disk->queue->backing_dev_info.capabilities &= ~BDI_CAP_STABLE_WRITES; + + bi = disk->integrity; + + kobject_uevent(&bi->kobj, KOBJ_REMOVE); + kobject_del(&bi->kobj); + kobject_put(&bi->kobj); + disk->integrity = NULL; +} +EXPORT_SYMBOL(blk_integrity_unregister); diff --git a/block/blk-ioc.c b/block/blk-ioc.c new file mode 100644 index 000000000..1a27f45ec --- /dev/null +++ b/block/blk-ioc.c @@ -0,0 +1,407 @@ +/* + * Functions related to io context handling + */ +#include +#include +#include +#include +#include +#include + +#include "blk.h" + +/* + * For io context allocations + */ +static struct kmem_cache *iocontext_cachep; + +/** + * get_io_context - increment reference count to io_context + * @ioc: io_context to get + * + * Increment reference count to @ioc. + */ +void get_io_context(struct io_context *ioc) +{ + BUG_ON(atomic_long_read(&ioc->refcount) <= 0); + atomic_long_inc(&ioc->refcount); +} +EXPORT_SYMBOL(get_io_context); + +static void icq_free_icq_rcu(struct rcu_head *head) +{ + struct io_cq *icq = container_of(head, struct io_cq, __rcu_head); + + kmem_cache_free(icq->__rcu_icq_cache, icq); +} + +/* Exit an icq. Called with both ioc and q locked. */ +static void ioc_exit_icq(struct io_cq *icq) +{ + struct elevator_type *et = icq->q->elevator->type; + + if (icq->flags & ICQ_EXITED) + return; + + if (et->ops.elevator_exit_icq_fn) + et->ops.elevator_exit_icq_fn(icq); + + icq->flags |= ICQ_EXITED; +} + +/* Release an icq. Called with both ioc and q locked. */ +static void ioc_destroy_icq(struct io_cq *icq) +{ + struct io_context *ioc = icq->ioc; + struct request_queue *q = icq->q; + struct elevator_type *et = q->elevator->type; + + lockdep_assert_held(&ioc->lock); + lockdep_assert_held(q->queue_lock); + + radix_tree_delete(&ioc->icq_tree, icq->q->id); + hlist_del_init(&icq->ioc_node); + list_del_init(&icq->q_node); + + /* + * Both setting lookup hint to and clearing it from @icq are done + * under queue_lock. If it's not pointing to @icq now, it never + * will. Hint assignment itself can race safely. + */ + if (rcu_access_pointer(ioc->icq_hint) == icq) + rcu_assign_pointer(ioc->icq_hint, NULL); + + ioc_exit_icq(icq); + + /* + * @icq->q might have gone away by the time RCU callback runs + * making it impossible to determine icq_cache. Record it in @icq. + */ + icq->__rcu_icq_cache = et->icq_cache; + call_rcu(&icq->__rcu_head, icq_free_icq_rcu); +} + +/* + * Slow path for ioc release in put_io_context(). Performs double-lock + * dancing to unlink all icq's and then frees ioc. + */ +static void ioc_release_fn(struct work_struct *work) +{ + struct io_context *ioc = container_of(work, struct io_context, + release_work); + unsigned long flags; + + /* + * Exiting icq may call into put_io_context() through elevator + * which will trigger lockdep warning. The ioc's are guaranteed to + * be different, use a different locking subclass here. Use + * irqsave variant as there's no spin_lock_irq_nested(). + */ + spin_lock_irqsave_nested(&ioc->lock, flags, 1); + + while (!hlist_empty(&ioc->icq_list)) { + struct io_cq *icq = hlist_entry(ioc->icq_list.first, + struct io_cq, ioc_node); + struct request_queue *q = icq->q; + + if (spin_trylock(q->queue_lock)) { + ioc_destroy_icq(icq); + spin_unlock(q->queue_lock); + } else { + spin_unlock_irqrestore(&ioc->lock, flags); + cpu_relax(); + spin_lock_irqsave_nested(&ioc->lock, flags, 1); + } + } + + spin_unlock_irqrestore(&ioc->lock, flags); + + kmem_cache_free(iocontext_cachep, ioc); +} + +/** + * put_io_context - put a reference of io_context + * @ioc: io_context to put + * + * Decrement reference count of @ioc and release it if the count reaches + * zero. + */ +void put_io_context(struct io_context *ioc) +{ + unsigned long flags; + bool free_ioc = false; + + if (ioc == NULL) + return; + + BUG_ON(atomic_long_read(&ioc->refcount) <= 0); + + /* + * Releasing ioc requires reverse order double locking and we may + * already be holding a queue_lock. Do it asynchronously from wq. + */ + if (atomic_long_dec_and_test(&ioc->refcount)) { + spin_lock_irqsave(&ioc->lock, flags); + if (!hlist_empty(&ioc->icq_list)) + queue_work(system_power_efficient_wq, + &ioc->release_work); + else + free_ioc = true; + spin_unlock_irqrestore(&ioc->lock, flags); + } + + if (free_ioc) + kmem_cache_free(iocontext_cachep, ioc); +} +EXPORT_SYMBOL(put_io_context); + +/** + * put_io_context_active - put active reference on ioc + * @ioc: ioc of interest + * + * Undo get_io_context_active(). If active reference reaches zero after + * put, @ioc can never issue further IOs and ioscheds are notified. + */ +void put_io_context_active(struct io_context *ioc) +{ + unsigned long flags; + struct io_cq *icq; + + if (!atomic_dec_and_test(&ioc->active_ref)) { + put_io_context(ioc); + return; + } + + /* + * Need ioc lock to walk icq_list and q lock to exit icq. Perform + * reverse double locking. Read comment in ioc_release_fn() for + * explanation on the nested locking annotation. + */ +retry: + spin_lock_irqsave_nested(&ioc->lock, flags, 1); + hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) { + if (icq->flags & ICQ_EXITED) + continue; + if (spin_trylock(icq->q->queue_lock)) { + ioc_exit_icq(icq); + spin_unlock(icq->q->queue_lock); + } else { + spin_unlock_irqrestore(&ioc->lock, flags); + cpu_relax(); + goto retry; + } + } + spin_unlock_irqrestore(&ioc->lock, flags); + + put_io_context(ioc); +} + +/* Called by the exiting task */ +void exit_io_context(struct task_struct *task) +{ + struct io_context *ioc; + + task_lock(task); + ioc = task->io_context; + task->io_context = NULL; + task_unlock(task); + + atomic_dec(&ioc->nr_tasks); + put_io_context_active(ioc); +} + +/** + * ioc_clear_queue - break any ioc association with the specified queue + * @q: request_queue being cleared + * + * Walk @q->icq_list and exit all io_cq's. Must be called with @q locked. + */ +void ioc_clear_queue(struct request_queue *q) +{ + lockdep_assert_held(q->queue_lock); + + while (!list_empty(&q->icq_list)) { + struct io_cq *icq = list_entry(q->icq_list.next, + struct io_cq, q_node); + struct io_context *ioc = icq->ioc; + + spin_lock(&ioc->lock); + ioc_destroy_icq(icq); + spin_unlock(&ioc->lock); + } +} + +int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node) +{ + struct io_context *ioc; + int ret; + + ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO, + node); + if (unlikely(!ioc)) + return -ENOMEM; + + /* initialize */ + atomic_long_set(&ioc->refcount, 1); + atomic_set(&ioc->nr_tasks, 1); + atomic_set(&ioc->active_ref, 1); + spin_lock_init(&ioc->lock); + INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH); + INIT_HLIST_HEAD(&ioc->icq_list); + INIT_WORK(&ioc->release_work, ioc_release_fn); + + /* + * Try to install. ioc shouldn't be installed if someone else + * already did or @task, which isn't %current, is exiting. Note + * that we need to allow ioc creation on exiting %current as exit + * path may issue IOs from e.g. exit_files(). The exit path is + * responsible for not issuing IO after exit_io_context(). + */ + task_lock(task); + if (!task->io_context && + (task == current || !(task->flags & PF_EXITING))) + task->io_context = ioc; + else + kmem_cache_free(iocontext_cachep, ioc); + + ret = task->io_context ? 0 : -EBUSY; + + task_unlock(task); + + return ret; +} + +/** + * get_task_io_context - get io_context of a task + * @task: task of interest + * @gfp_flags: allocation flags, used if allocation is necessary + * @node: allocation node, used if allocation is necessary + * + * Return io_context of @task. If it doesn't exist, it is created with + * @gfp_flags and @node. The returned io_context has its reference count + * incremented. + * + * This function always goes through task_lock() and it's better to use + * %current->io_context + get_io_context() for %current. + */ +struct io_context *get_task_io_context(struct task_struct *task, + gfp_t gfp_flags, int node) +{ + struct io_context *ioc; + + might_sleep_if(gfp_flags & __GFP_WAIT); + + do { + task_lock(task); + ioc = task->io_context; + if (likely(ioc)) { + get_io_context(ioc); + task_unlock(task); + return ioc; + } + task_unlock(task); + } while (!create_task_io_context(task, gfp_flags, node)); + + return NULL; +} +EXPORT_SYMBOL(get_task_io_context); + +/** + * ioc_lookup_icq - lookup io_cq from ioc + * @ioc: the associated io_context + * @q: the associated request_queue + * + * Look up io_cq associated with @ioc - @q pair from @ioc. Must be called + * with @q->queue_lock held. + */ +struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q) +{ + struct io_cq *icq; + + lockdep_assert_held(q->queue_lock); + + /* + * icq's are indexed from @ioc using radix tree and hint pointer, + * both of which are protected with RCU. All removals are done + * holding both q and ioc locks, and we're holding q lock - if we + * find a icq which points to us, it's guaranteed to be valid. + */ + rcu_read_lock(); + icq = rcu_dereference(ioc->icq_hint); + if (icq && icq->q == q) + goto out; + + icq = radix_tree_lookup(&ioc->icq_tree, q->id); + if (icq && icq->q == q) + rcu_assign_pointer(ioc->icq_hint, icq); /* allowed to race */ + else + icq = NULL; +out: + rcu_read_unlock(); + return icq; +} +EXPORT_SYMBOL(ioc_lookup_icq); + +/** + * ioc_create_icq - create and link io_cq + * @ioc: io_context of interest + * @q: request_queue of interest + * @gfp_mask: allocation mask + * + * Make sure io_cq linking @ioc and @q exists. If icq doesn't exist, they + * will be created using @gfp_mask. + * + * The caller is responsible for ensuring @ioc won't go away and @q is + * alive and will stay alive until this function returns. + */ +struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q, + gfp_t gfp_mask) +{ + struct elevator_type *et = q->elevator->type; + struct io_cq *icq; + + /* allocate stuff */ + icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO, + q->node); + if (!icq) + return NULL; + + if (radix_tree_maybe_preload(gfp_mask) < 0) { + kmem_cache_free(et->icq_cache, icq); + return NULL; + } + + icq->ioc = ioc; + icq->q = q; + INIT_LIST_HEAD(&icq->q_node); + INIT_HLIST_NODE(&icq->ioc_node); + + /* lock both q and ioc and try to link @icq */ + spin_lock_irq(q->queue_lock); + spin_lock(&ioc->lock); + + if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) { + hlist_add_head(&icq->ioc_node, &ioc->icq_list); + list_add(&icq->q_node, &q->icq_list); + if (et->ops.elevator_init_icq_fn) + et->ops.elevator_init_icq_fn(icq); + } else { + kmem_cache_free(et->icq_cache, icq); + icq = ioc_lookup_icq(ioc, q); + if (!icq) + printk(KERN_ERR "cfq: icq link failed!\n"); + } + + spin_unlock(&ioc->lock); + spin_unlock_irq(q->queue_lock); + radix_tree_preload_end(); + return icq; +} + +static int __init blk_ioc_init(void) +{ + iocontext_cachep = kmem_cache_create("blkdev_ioc", + sizeof(struct io_context), 0, SLAB_PANIC, NULL); + return 0; +} +subsys_initcall(blk_ioc_init); diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c new file mode 100644 index 000000000..0736729d6 --- /dev/null +++ b/block/blk-iopoll.c @@ -0,0 +1,224 @@ +/* + * Functions related to interrupt-poll handling in the block layer. This + * is similar to NAPI for network devices. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "blk.h" + +static unsigned int blk_iopoll_budget __read_mostly = 256; + +static DEFINE_PER_CPU(struct list_head, blk_cpu_iopoll); + +/** + * blk_iopoll_sched - Schedule a run of the iopoll handler + * @iop: The parent iopoll structure + * + * Description: + * Add this blk_iopoll structure to the pending poll list and trigger the + * raise of the blk iopoll softirq. The driver must already have gotten a + * successful return from blk_iopoll_sched_prep() before calling this. + **/ +void blk_iopoll_sched(struct blk_iopoll *iop) +{ + unsigned long flags; + + local_irq_save(flags); + list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll)); + __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ); + local_irq_restore(flags); +} +EXPORT_SYMBOL(blk_iopoll_sched); + +/** + * __blk_iopoll_complete - Mark this @iop as un-polled again + * @iop: The parent iopoll structure + * + * Description: + * See blk_iopoll_complete(). This function must be called with interrupts + * disabled. + **/ +void __blk_iopoll_complete(struct blk_iopoll *iop) +{ + list_del(&iop->list); + smp_mb__before_atomic(); + clear_bit_unlock(IOPOLL_F_SCHED, &iop->state); +} +EXPORT_SYMBOL(__blk_iopoll_complete); + +/** + * blk_iopoll_complete - Mark this @iop as un-polled again + * @iop: The parent iopoll structure + * + * Description: + * If a driver consumes less than the assigned budget in its run of the + * iopoll handler, it'll end the polled mode by calling this function. The + * iopoll handler will not be invoked again before blk_iopoll_sched_prep() + * is called. + **/ +void blk_iopoll_complete(struct blk_iopoll *iop) +{ + unsigned long flags; + + local_irq_save(flags); + __blk_iopoll_complete(iop); + local_irq_restore(flags); +} +EXPORT_SYMBOL(blk_iopoll_complete); + +static void blk_iopoll_softirq(struct softirq_action *h) +{ + struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll); + int rearm = 0, budget = blk_iopoll_budget; + unsigned long start_time = jiffies; + + local_irq_disable(); + + while (!list_empty(list)) { + struct blk_iopoll *iop; + int work, weight; + + /* + * If softirq window is exhausted then punt. + */ + if (budget <= 0 || time_after(jiffies, start_time)) { + rearm = 1; + break; + } + + local_irq_enable(); + + /* Even though interrupts have been re-enabled, this + * access is safe because interrupts can only add new + * entries to the tail of this list, and only ->poll() + * calls can remove this head entry from the list. + */ + iop = list_entry(list->next, struct blk_iopoll, list); + + weight = iop->weight; + work = 0; + if (test_bit(IOPOLL_F_SCHED, &iop->state)) + work = iop->poll(iop, weight); + + budget -= work; + + local_irq_disable(); + + /* + * Drivers must not modify the iopoll state, if they + * consume their assigned weight (or more, some drivers can't + * easily just stop processing, they have to complete an + * entire mask of commands).In such cases this code + * still "owns" the iopoll instance and therefore can + * move the instance around on the list at-will. + */ + if (work >= weight) { + if (blk_iopoll_disable_pending(iop)) + __blk_iopoll_complete(iop); + else + list_move_tail(&iop->list, list); + } + } + + if (rearm) + __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ); + + local_irq_enable(); +} + +/** + * blk_iopoll_disable - Disable iopoll on this @iop + * @iop: The parent iopoll structure + * + * Description: + * Disable io polling and wait for any pending callbacks to have completed. + **/ +void blk_iopoll_disable(struct blk_iopoll *iop) +{ + set_bit(IOPOLL_F_DISABLE, &iop->state); + while (test_and_set_bit(IOPOLL_F_SCHED, &iop->state)) + msleep(1); + clear_bit(IOPOLL_F_DISABLE, &iop->state); +} +EXPORT_SYMBOL(blk_iopoll_disable); + +/** + * blk_iopoll_enable - Enable iopoll on this @iop + * @iop: The parent iopoll structure + * + * Description: + * Enable iopoll on this @iop. Note that the handler run will not be + * scheduled, it will only mark it as active. + **/ +void blk_iopoll_enable(struct blk_iopoll *iop) +{ + BUG_ON(!test_bit(IOPOLL_F_SCHED, &iop->state)); + smp_mb__before_atomic(); + clear_bit_unlock(IOPOLL_F_SCHED, &iop->state); +} +EXPORT_SYMBOL(blk_iopoll_enable); + +/** + * blk_iopoll_init - Initialize this @iop + * @iop: The parent iopoll structure + * @weight: The default weight (or command completion budget) + * @poll_fn: The handler to invoke + * + * Description: + * Initialize this blk_iopoll structure. Before being actively used, the + * driver must call blk_iopoll_enable(). + **/ +void blk_iopoll_init(struct blk_iopoll *iop, int weight, blk_iopoll_fn *poll_fn) +{ + memset(iop, 0, sizeof(*iop)); + INIT_LIST_HEAD(&iop->list); + iop->weight = weight; + iop->poll = poll_fn; + set_bit(IOPOLL_F_SCHED, &iop->state); +} +EXPORT_SYMBOL(blk_iopoll_init); + +static int blk_iopoll_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + /* + * If a CPU goes away, splice its entries to the current CPU + * and trigger a run of the softirq + */ + if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { + int cpu = (unsigned long) hcpu; + + local_irq_disable(); + list_splice_init(&per_cpu(blk_cpu_iopoll, cpu), + this_cpu_ptr(&blk_cpu_iopoll)); + __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ); + local_irq_enable(); + } + + return NOTIFY_OK; +} + +static struct notifier_block blk_iopoll_cpu_notifier = { + .notifier_call = blk_iopoll_cpu_notify, +}; + +static __init int blk_iopoll_setup(void) +{ + int i; + + for_each_possible_cpu(i) + INIT_LIST_HEAD(&per_cpu(blk_cpu_iopoll, i)); + + open_softirq(BLOCK_IOPOLL_SOFTIRQ, blk_iopoll_softirq); + register_hotcpu_notifier(&blk_iopoll_cpu_notifier); + return 0; +} +subsys_initcall(blk_iopoll_setup); diff --git a/block/blk-lib.c b/block/blk-lib.c new file mode 100644 index 000000000..7688ee3f5 --- /dev/null +++ b/block/blk-lib.c @@ -0,0 +1,317 @@ +/* + * Functions related to generic helpers functions + */ +#include +#include +#include +#include +#include + +#include "blk.h" + +struct bio_batch { + atomic_t done; + unsigned long flags; + struct completion *wait; +}; + +static void bio_batch_end_io(struct bio *bio, int err) +{ + struct bio_batch *bb = bio->bi_private; + + if (err && (err != -EOPNOTSUPP)) + clear_bit(BIO_UPTODATE, &bb->flags); + if (atomic_dec_and_test(&bb->done)) + complete(bb->wait); + bio_put(bio); +} + +/** + * blkdev_issue_discard - queue a discard + * @bdev: blockdev to issue discard for + * @sector: start sector + * @nr_sects: number of sectors to discard + * @gfp_mask: memory allocation flags (for bio_alloc) + * @flags: BLKDEV_IFL_* flags to control behaviour + * + * Description: + * Issue a discard request for the sectors in question. + */ +int blkdev_issue_discard(struct block_device *bdev, sector_t sector, + sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) +{ + DECLARE_COMPLETION_ONSTACK(wait); + struct request_queue *q = bdev_get_queue(bdev); + int type = REQ_WRITE | REQ_DISCARD; + unsigned int max_discard_sectors, granularity; + int alignment; + struct bio_batch bb; + struct bio *bio; + int ret = 0; + struct blk_plug plug; + + if (!q) + return -ENXIO; + + if (!blk_queue_discard(q)) + return -EOPNOTSUPP; + + /* Zero-sector (unknown) and one-sector granularities are the same. */ + granularity = max(q->limits.discard_granularity >> 9, 1U); + alignment = (bdev_discard_alignment(bdev) >> 9) % granularity; + + /* + * Ensure that max_discard_sectors is of the proper + * granularity, so that requests stay aligned after a split. + */ + max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); + max_discard_sectors -= max_discard_sectors % granularity; + if (unlikely(!max_discard_sectors)) { + /* Avoid infinite loop below. Being cautious never hurts. */ + return -EOPNOTSUPP; + } + + if (flags & BLKDEV_DISCARD_SECURE) { + if (!blk_queue_secdiscard(q)) + return -EOPNOTSUPP; + type |= REQ_SECURE; + } + + atomic_set(&bb.done, 1); + bb.flags = 1 << BIO_UPTODATE; + bb.wait = &wait; + + blk_start_plug(&plug); + while (nr_sects) { + unsigned int req_sects; + sector_t end_sect, tmp; + + bio = bio_alloc(gfp_mask, 1); + if (!bio) { + ret = -ENOMEM; + break; + } + + req_sects = min_t(sector_t, nr_sects, max_discard_sectors); + + /* + * If splitting a request, and the next starting sector would be + * misaligned, stop the discard at the previous aligned sector. + */ + end_sect = sector + req_sects; + tmp = end_sect; + if (req_sects < nr_sects && + sector_div(tmp, granularity) != alignment) { + end_sect = end_sect - alignment; + sector_div(end_sect, granularity); + end_sect = end_sect * granularity + alignment; + req_sects = end_sect - sector; + } + + bio->bi_iter.bi_sector = sector; + bio->bi_end_io = bio_batch_end_io; + bio->bi_bdev = bdev; + bio->bi_private = &bb; + + bio->bi_iter.bi_size = req_sects << 9; + nr_sects -= req_sects; + sector = end_sect; + + atomic_inc(&bb.done); + submit_bio(type, bio); + + /* + * We can loop for a long time in here, if someone does + * full device discards (like mkfs). Be nice and allow + * us to schedule out to avoid softlocking if preempt + * is disabled. + */ + cond_resched(); + } + blk_finish_plug(&plug); + + /* Wait for bios in-flight */ + if (!atomic_dec_and_test(&bb.done)) + wait_for_completion_io(&wait); + + if (!test_bit(BIO_UPTODATE, &bb.flags)) + ret = -EIO; + + return ret; +} +EXPORT_SYMBOL(blkdev_issue_discard); + +/** + * blkdev_issue_write_same - queue a write same operation + * @bdev: target blockdev + * @sector: start sector + * @nr_sects: number of sectors to write + * @gfp_mask: memory allocation flags (for bio_alloc) + * @page: page containing data to write + * + * Description: + * Issue a write same request for the sectors in question. + */ +int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, + sector_t nr_sects, gfp_t gfp_mask, + struct page *page) +{ + DECLARE_COMPLETION_ONSTACK(wait); + struct request_queue *q = bdev_get_queue(bdev); + unsigned int max_write_same_sectors; + struct bio_batch bb; + struct bio *bio; + int ret = 0; + + if (!q) + return -ENXIO; + + max_write_same_sectors = q->limits.max_write_same_sectors; + + if (max_write_same_sectors == 0) + return -EOPNOTSUPP; + + atomic_set(&bb.done, 1); + bb.flags = 1 << BIO_UPTODATE; + bb.wait = &wait; + + while (nr_sects) { + bio = bio_alloc(gfp_mask, 1); + if (!bio) { + ret = -ENOMEM; + break; + } + + bio->bi_iter.bi_sector = sector; + bio->bi_end_io = bio_batch_end_io; + bio->bi_bdev = bdev; + bio->bi_private = &bb; + bio->bi_vcnt = 1; + bio->bi_io_vec->bv_page = page; + bio->bi_io_vec->bv_offset = 0; + bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev); + + if (nr_sects > max_write_same_sectors) { + bio->bi_iter.bi_size = max_write_same_sectors << 9; + nr_sects -= max_write_same_sectors; + sector += max_write_same_sectors; + } else { + bio->bi_iter.bi_size = nr_sects << 9; + nr_sects = 0; + } + + atomic_inc(&bb.done); + submit_bio(REQ_WRITE | REQ_WRITE_SAME, bio); + } + + /* Wait for bios in-flight */ + if (!atomic_dec_and_test(&bb.done)) + wait_for_completion_io(&wait); + + if (!test_bit(BIO_UPTODATE, &bb.flags)) + ret = -ENOTSUPP; + + return ret; +} +EXPORT_SYMBOL(blkdev_issue_write_same); + +/** + * blkdev_issue_zeroout - generate number of zero filed write bios + * @bdev: blockdev to issue + * @sector: start sector + * @nr_sects: number of sectors to write + * @gfp_mask: memory allocation flags (for bio_alloc) + * + * Description: + * Generate and issue number of bios with zerofiled pages. + */ + +static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, + sector_t nr_sects, gfp_t gfp_mask) +{ + int ret; + struct bio *bio; + struct bio_batch bb; + unsigned int sz; + DECLARE_COMPLETION_ONSTACK(wait); + + atomic_set(&bb.done, 1); + bb.flags = 1 << BIO_UPTODATE; + bb.wait = &wait; + + ret = 0; + while (nr_sects != 0) { + bio = bio_alloc(gfp_mask, + min(nr_sects, (sector_t)BIO_MAX_PAGES)); + if (!bio) { + ret = -ENOMEM; + break; + } + + bio->bi_iter.bi_sector = sector; + bio->bi_bdev = bdev; + bio->bi_end_io = bio_batch_end_io; + bio->bi_private = &bb; + + while (nr_sects != 0) { + sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects); + ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0); + nr_sects -= ret >> 9; + sector += ret >> 9; + if (ret < (sz << 9)) + break; + } + ret = 0; + atomic_inc(&bb.done); + submit_bio(WRITE, bio); + } + + /* Wait for bios in-flight */ + if (!atomic_dec_and_test(&bb.done)) + wait_for_completion_io(&wait); + + if (!test_bit(BIO_UPTODATE, &bb.flags)) + /* One of bios in the batch was completed with error.*/ + ret = -EIO; + + return ret; +} + +/** + * blkdev_issue_zeroout - zero-fill a block range + * @bdev: blockdev to write + * @sector: start sector + * @nr_sects: number of sectors to write + * @gfp_mask: memory allocation flags (for bio_alloc) + * @discard: whether to discard the block range + * + * Description: + * Zero-fill a block range. If the discard flag is set and the block + * device guarantees that subsequent READ operations to the block range + * in question will return zeroes, the blocks will be discarded. Should + * the discard request fail, if the discard flag is not set, or if + * discard_zeroes_data is not supported, this function will resort to + * zeroing the blocks manually, thus provisioning (allocating, + * anchoring) them. If the block device supports the WRITE SAME command + * blkdev_issue_zeroout() will use it to optimize the process of + * clearing the block range. Otherwise the zeroing will be performed + * using regular WRITE calls. + */ + +int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, + sector_t nr_sects, gfp_t gfp_mask, bool discard) +{ + struct request_queue *q = bdev_get_queue(bdev); + + if (discard && blk_queue_discard(q) && q->limits.discard_zeroes_data && + blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, 0) == 0) + return 0; + + if (bdev_write_same(bdev) && + blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, + ZERO_PAGE(0)) == 0) + return 0; + + return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask); +} +EXPORT_SYMBOL(blkdev_issue_zeroout); diff --git a/block/blk-map.c b/block/blk-map.c new file mode 100644 index 000000000..da310a105 --- /dev/null +++ b/block/blk-map.c @@ -0,0 +1,220 @@ +/* + * Functions related to mapping data to requests + */ +#include +#include +#include +#include +#include + +#include "blk.h" + +int blk_rq_append_bio(struct request_queue *q, struct request *rq, + struct bio *bio) +{ + if (!rq->bio) + blk_rq_bio_prep(q, rq, bio); + else if (!ll_back_merge_fn(q, rq, bio)) + return -EINVAL; + else { + rq->biotail->bi_next = bio; + rq->biotail = bio; + + rq->__data_len += bio->bi_iter.bi_size; + } + return 0; +} + +static int __blk_rq_unmap_user(struct bio *bio) +{ + int ret = 0; + + if (bio) { + if (bio_flagged(bio, BIO_USER_MAPPED)) + bio_unmap_user(bio); + else + ret = bio_uncopy_user(bio); + } + + return ret; +} + +/** + * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage + * @q: request queue where request should be inserted + * @rq: request to map data to + * @map_data: pointer to the rq_map_data holding pages (if necessary) + * @iter: iovec iterator + * @gfp_mask: memory allocation flags + * + * Description: + * Data will be mapped directly for zero copy I/O, if possible. Otherwise + * a kernel bounce buffer is used. + * + * A matching blk_rq_unmap_user() must be issued at the end of I/O, while + * still in process context. + * + * Note: The mapped bio may need to be bounced through blk_queue_bounce() + * before being submitted to the device, as pages mapped may be out of + * reach. It's the callers responsibility to make sure this happens. The + * original bio must be passed back in to blk_rq_unmap_user() for proper + * unmapping. + */ +int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, + struct rq_map_data *map_data, + const struct iov_iter *iter, gfp_t gfp_mask) +{ + struct bio *bio; + int unaligned = 0; + struct iov_iter i; + struct iovec iov; + + if (!iter || !iter->count) + return -EINVAL; + + iov_for_each(iov, i, *iter) { + unsigned long uaddr = (unsigned long) iov.iov_base; + + if (!iov.iov_len) + return -EINVAL; + + /* + * Keep going so we check length of all segments + */ + if (uaddr & queue_dma_alignment(q)) + unaligned = 1; + } + + if (unaligned || (q->dma_pad_mask & iter->count) || map_data) + bio = bio_copy_user_iov(q, map_data, iter, gfp_mask); + else + bio = bio_map_user_iov(q, iter, gfp_mask); + + if (IS_ERR(bio)) + return PTR_ERR(bio); + + if (map_data && map_data->null_mapped) + bio->bi_flags |= (1 << BIO_NULL_MAPPED); + + if (bio->bi_iter.bi_size != iter->count) { + /* + * Grab an extra reference to this bio, as bio_unmap_user() + * expects to be able to drop it twice as it happens on the + * normal IO completion path + */ + bio_get(bio); + bio_endio(bio, 0); + __blk_rq_unmap_user(bio); + return -EINVAL; + } + + if (!bio_flagged(bio, BIO_USER_MAPPED)) + rq->cmd_flags |= REQ_COPY_USER; + + blk_queue_bounce(q, &bio); + bio_get(bio); + blk_rq_bio_prep(q, rq, bio); + return 0; +} +EXPORT_SYMBOL(blk_rq_map_user_iov); + +int blk_rq_map_user(struct request_queue *q, struct request *rq, + struct rq_map_data *map_data, void __user *ubuf, + unsigned long len, gfp_t gfp_mask) +{ + struct iovec iov; + struct iov_iter i; + int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i); + + if (unlikely(ret < 0)) + return ret; + + return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask); +} +EXPORT_SYMBOL(blk_rq_map_user); + +/** + * blk_rq_unmap_user - unmap a request with user data + * @bio: start of bio list + * + * Description: + * Unmap a rq previously mapped by blk_rq_map_user(). The caller must + * supply the original rq->bio from the blk_rq_map_user() return, since + * the I/O completion may have changed rq->bio. + */ +int blk_rq_unmap_user(struct bio *bio) +{ + struct bio *mapped_bio; + int ret = 0, ret2; + + while (bio) { + mapped_bio = bio; + if (unlikely(bio_flagged(bio, BIO_BOUNCED))) + mapped_bio = bio->bi_private; + + ret2 = __blk_rq_unmap_user(mapped_bio); + if (ret2 && !ret) + ret = ret2; + + mapped_bio = bio; + bio = bio->bi_next; + bio_put(mapped_bio); + } + + return ret; +} +EXPORT_SYMBOL(blk_rq_unmap_user); + +/** + * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage + * @q: request queue where request should be inserted + * @rq: request to fill + * @kbuf: the kernel buffer + * @len: length of user data + * @gfp_mask: memory allocation flags + * + * Description: + * Data will be mapped directly if possible. Otherwise a bounce + * buffer is used. Can be called multiple times to append multiple + * buffers. + */ +int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, + unsigned int len, gfp_t gfp_mask) +{ + int reading = rq_data_dir(rq) == READ; + unsigned long addr = (unsigned long) kbuf; + int do_copy = 0; + struct bio *bio; + int ret; + + if (len > (queue_max_hw_sectors(q) << 9)) + return -EINVAL; + if (!len || !kbuf) + return -EINVAL; + + do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf); + if (do_copy) + bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); + else + bio = bio_map_kern(q, kbuf, len, gfp_mask); + + if (IS_ERR(bio)) + return PTR_ERR(bio); + + if (!reading) + bio->bi_rw |= REQ_WRITE; + + if (do_copy) + rq->cmd_flags |= REQ_COPY_USER; + + ret = blk_rq_append_bio(q, rq, bio); + if (unlikely(ret)) { + /* request is too big */ + bio_put(bio); + return ret; + } + + blk_queue_bounce(q, &rq->bio); + return 0; +} +EXPORT_SYMBOL(blk_rq_map_kern); diff --git a/block/blk-merge.c b/block/blk-merge.c new file mode 100644 index 000000000..fd3fee81c --- /dev/null +++ b/block/blk-merge.c @@ -0,0 +1,610 @@ +/* + * Functions related to segment and merge handling + */ +#include +#include +#include +#include +#include + +#include "blk.h" + +static unsigned int __blk_recalc_rq_segments(struct request_queue *q, + struct bio *bio, + bool no_sg_merge) +{ + struct bio_vec bv, bvprv = { NULL }; + int cluster, high, highprv = 1; + unsigned int seg_size, nr_phys_segs; + struct bio *fbio, *bbio; + struct bvec_iter iter; + + if (!bio) + return 0; + + /* + * This should probably be returning 0, but blk_add_request_payload() + * (Christoph!!!!) + */ + if (bio->bi_rw & REQ_DISCARD) + return 1; + + if (bio->bi_rw & REQ_WRITE_SAME) + return 1; + + fbio = bio; + cluster = blk_queue_cluster(q); + seg_size = 0; + nr_phys_segs = 0; + high = 0; + for_each_bio(bio) { + bio_for_each_segment(bv, bio, iter) { + /* + * If SG merging is disabled, each bio vector is + * a segment + */ + if (no_sg_merge) + goto new_segment; + + /* + * the trick here is making sure that a high page is + * never considered part of another segment, since + * that might change with the bounce page. + */ + high = page_to_pfn(bv.bv_page) > queue_bounce_pfn(q); + if (!high && !highprv && cluster) { + if (seg_size + bv.bv_len + > queue_max_segment_size(q)) + goto new_segment; + if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv)) + goto new_segment; + if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv)) + goto new_segment; + + seg_size += bv.bv_len; + bvprv = bv; + continue; + } +new_segment: + if (nr_phys_segs == 1 && seg_size > + fbio->bi_seg_front_size) + fbio->bi_seg_front_size = seg_size; + + nr_phys_segs++; + bvprv = bv; + seg_size = bv.bv_len; + highprv = high; + } + bbio = bio; + } + + if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size) + fbio->bi_seg_front_size = seg_size; + if (seg_size > bbio->bi_seg_back_size) + bbio->bi_seg_back_size = seg_size; + + return nr_phys_segs; +} + +void blk_recalc_rq_segments(struct request *rq) +{ + bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE, + &rq->q->queue_flags); + + rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio, + no_sg_merge); +} + +void blk_recount_segments(struct request_queue *q, struct bio *bio) +{ + unsigned short seg_cnt; + + /* estimate segment number by bi_vcnt for non-cloned bio */ + if (bio_flagged(bio, BIO_CLONED)) + seg_cnt = bio_segments(bio); + else + seg_cnt = bio->bi_vcnt; + + if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) && + (seg_cnt < queue_max_segments(q))) + bio->bi_phys_segments = seg_cnt; + else { + struct bio *nxt = bio->bi_next; + + bio->bi_next = NULL; + bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false); + bio->bi_next = nxt; + } + + bio->bi_flags |= (1 << BIO_SEG_VALID); +} +EXPORT_SYMBOL(blk_recount_segments); + +static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, + struct bio *nxt) +{ + struct bio_vec end_bv = { NULL }, nxt_bv; + struct bvec_iter iter; + + if (!blk_queue_cluster(q)) + return 0; + + if (bio->bi_seg_back_size + nxt->bi_seg_front_size > + queue_max_segment_size(q)) + return 0; + + if (!bio_has_data(bio)) + return 1; + + bio_for_each_segment(end_bv, bio, iter) + if (end_bv.bv_len == iter.bi_size) + break; + + nxt_bv = bio_iovec(nxt); + + if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv)) + return 0; + + /* + * bio and nxt are contiguous in memory; check if the queue allows + * these two to be merged into one + */ + if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv)) + return 1; + + return 0; +} + +static inline void +__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, + struct scatterlist *sglist, struct bio_vec *bvprv, + struct scatterlist **sg, int *nsegs, int *cluster) +{ + + int nbytes = bvec->bv_len; + + if (*sg && *cluster) { + if ((*sg)->length + nbytes > queue_max_segment_size(q)) + goto new_segment; + + if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) + goto new_segment; + if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) + goto new_segment; + + (*sg)->length += nbytes; + } else { +new_segment: + if (!*sg) + *sg = sglist; + else { + /* + * If the driver previously mapped a shorter + * list, we could see a termination bit + * prematurely unless it fully inits the sg + * table on each mapping. We KNOW that there + * must be more entries here or the driver + * would be buggy, so force clear the + * termination bit to avoid doing a full + * sg_init_table() in drivers for each command. + */ + sg_unmark_end(*sg); + *sg = sg_next(*sg); + } + + sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset); + (*nsegs)++; + } + *bvprv = *bvec; +} + +static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, + struct scatterlist *sglist, + struct scatterlist **sg) +{ + struct bio_vec bvec, bvprv = { NULL }; + struct bvec_iter iter; + int nsegs, cluster; + + nsegs = 0; + cluster = blk_queue_cluster(q); + + if (bio->bi_rw & REQ_DISCARD) { + /* + * This is a hack - drivers should be neither modifying the + * biovec, nor relying on bi_vcnt - but because of + * blk_add_request_payload(), a discard bio may or may not have + * a payload we need to set up here (thank you Christoph) and + * bi_vcnt is really the only way of telling if we need to. + */ + + if (bio->bi_vcnt) + goto single_segment; + + return 0; + } + + if (bio->bi_rw & REQ_WRITE_SAME) { +single_segment: + *sg = sglist; + bvec = bio_iovec(bio); + sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset); + return 1; + } + + for_each_bio(bio) + bio_for_each_segment(bvec, bio, iter) + __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg, + &nsegs, &cluster); + + return nsegs; +} + +/* + * map a request to scatterlist, return number of sg entries setup. Caller + * must make sure sg can hold rq->nr_phys_segments entries + */ +int blk_rq_map_sg(struct request_queue *q, struct request *rq, + struct scatterlist *sglist) +{ + struct scatterlist *sg = NULL; + int nsegs = 0; + + if (rq->bio) + nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg); + + if (unlikely(rq->cmd_flags & REQ_COPY_USER) && + (blk_rq_bytes(rq) & q->dma_pad_mask)) { + unsigned int pad_len = + (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; + + sg->length += pad_len; + rq->extra_len += pad_len; + } + + if (q->dma_drain_size && q->dma_drain_needed(rq)) { + if (rq->cmd_flags & REQ_WRITE) + memset(q->dma_drain_buffer, 0, q->dma_drain_size); + + sg->page_link &= ~0x02; + sg = sg_next(sg); + sg_set_page(sg, virt_to_page(q->dma_drain_buffer), + q->dma_drain_size, + ((unsigned long)q->dma_drain_buffer) & + (PAGE_SIZE - 1)); + nsegs++; + rq->extra_len += q->dma_drain_size; + } + + if (sg) + sg_mark_end(sg); + + return nsegs; +} +EXPORT_SYMBOL(blk_rq_map_sg); + +static inline int ll_new_hw_segment(struct request_queue *q, + struct request *req, + struct bio *bio) +{ + int nr_phys_segs = bio_phys_segments(q, bio); + + if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q)) + goto no_merge; + + if (blk_integrity_merge_bio(q, req, bio) == false) + goto no_merge; + + /* + * This will form the start of a new hw segment. Bump both + * counters. + */ + req->nr_phys_segments += nr_phys_segs; + return 1; + +no_merge: + req->cmd_flags |= REQ_NOMERGE; + if (req == q->last_merge) + q->last_merge = NULL; + return 0; +} + +int ll_back_merge_fn(struct request_queue *q, struct request *req, + struct bio *bio) +{ + if (blk_rq_sectors(req) + bio_sectors(bio) > + blk_rq_get_max_sectors(req)) { + req->cmd_flags |= REQ_NOMERGE; + if (req == q->last_merge) + q->last_merge = NULL; + return 0; + } + if (!bio_flagged(req->biotail, BIO_SEG_VALID)) + blk_recount_segments(q, req->biotail); + if (!bio_flagged(bio, BIO_SEG_VALID)) + blk_recount_segments(q, bio); + + return ll_new_hw_segment(q, req, bio); +} + +int ll_front_merge_fn(struct request_queue *q, struct request *req, + struct bio *bio) +{ + if (blk_rq_sectors(req) + bio_sectors(bio) > + blk_rq_get_max_sectors(req)) { + req->cmd_flags |= REQ_NOMERGE; + if (req == q->last_merge) + q->last_merge = NULL; + return 0; + } + if (!bio_flagged(bio, BIO_SEG_VALID)) + blk_recount_segments(q, bio); + if (!bio_flagged(req->bio, BIO_SEG_VALID)) + blk_recount_segments(q, req->bio); + + return ll_new_hw_segment(q, req, bio); +} + +/* + * blk-mq uses req->special to carry normal driver per-request payload, it + * does not indicate a prepared command that we cannot merge with. + */ +static bool req_no_special_merge(struct request *req) +{ + struct request_queue *q = req->q; + + return !q->mq_ops && req->special; +} + +static int req_gap_to_prev(struct request *req, struct request *next) +{ + struct bio *prev = req->biotail; + + return bvec_gap_to_prev(&prev->bi_io_vec[prev->bi_vcnt - 1], + next->bio->bi_io_vec[0].bv_offset); +} + +static int ll_merge_requests_fn(struct request_queue *q, struct request *req, + struct request *next) +{ + int total_phys_segments; + unsigned int seg_size = + req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size; + + /* + * First check if the either of the requests are re-queued + * requests. Can't merge them if they are. + */ + if (req_no_special_merge(req) || req_no_special_merge(next)) + return 0; + + if (test_bit(QUEUE_FLAG_SG_GAPS, &q->queue_flags) && + req_gap_to_prev(req, next)) + return 0; + + /* + * Will it become too large? + */ + if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > + blk_rq_get_max_sectors(req)) + return 0; + + total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; + if (blk_phys_contig_segment(q, req->biotail, next->bio)) { + if (req->nr_phys_segments == 1) + req->bio->bi_seg_front_size = seg_size; + if (next->nr_phys_segments == 1) + next->biotail->bi_seg_back_size = seg_size; + total_phys_segments--; + } + + if (total_phys_segments > queue_max_segments(q)) + return 0; + + if (blk_integrity_merge_rq(q, req, next) == false) + return 0; + + /* Merge is OK... */ + req->nr_phys_segments = total_phys_segments; + return 1; +} + +/** + * blk_rq_set_mixed_merge - mark a request as mixed merge + * @rq: request to mark as mixed merge + * + * Description: + * @rq is about to be mixed merged. Make sure the attributes + * which can be mixed are set in each bio and mark @rq as mixed + * merged. + */ +void blk_rq_set_mixed_merge(struct request *rq) +{ + unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; + struct bio *bio; + + if (rq->cmd_flags & REQ_MIXED_MERGE) + return; + + /* + * @rq will no longer represent mixable attributes for all the + * contained bios. It will just track those of the first one. + * Distributes the attributs to each bio. + */ + for (bio = rq->bio; bio; bio = bio->bi_next) { + WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) && + (bio->bi_rw & REQ_FAILFAST_MASK) != ff); + bio->bi_rw |= ff; + } + rq->cmd_flags |= REQ_MIXED_MERGE; +} + +static void blk_account_io_merge(struct request *req) +{ + if (blk_do_io_stat(req)) { + struct hd_struct *part; + int cpu; + + cpu = part_stat_lock(); + part = req->part; + + part_round_stats(cpu, part); + part_dec_in_flight(part, rq_data_dir(req)); + + hd_struct_put(part); + part_stat_unlock(); + } +} + +/* + * Has to be called with the request spinlock acquired + */ +static int attempt_merge(struct request_queue *q, struct request *req, + struct request *next) +{ + if (!rq_mergeable(req) || !rq_mergeable(next)) + return 0; + + if (!blk_check_merge_flags(req->cmd_flags, next->cmd_flags)) + return 0; + + /* + * not contiguous + */ + if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next)) + return 0; + + if (rq_data_dir(req) != rq_data_dir(next) + || req->rq_disk != next->rq_disk + || req_no_special_merge(next)) + return 0; + + if (req->cmd_flags & REQ_WRITE_SAME && + !blk_write_same_mergeable(req->bio, next->bio)) + return 0; + + /* + * If we are allowed to merge, then append bio list + * from next to rq and release next. merge_requests_fn + * will have updated segment counts, update sector + * counts here. + */ + if (!ll_merge_requests_fn(q, req, next)) + return 0; + + /* + * If failfast settings disagree or any of the two is already + * a mixed merge, mark both as mixed before proceeding. This + * makes sure that all involved bios have mixable attributes + * set properly. + */ + if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE || + (req->cmd_flags & REQ_FAILFAST_MASK) != + (next->cmd_flags & REQ_FAILFAST_MASK)) { + blk_rq_set_mixed_merge(req); + blk_rq_set_mixed_merge(next); + } + + /* + * At this point we have either done a back merge + * or front merge. We need the smaller start_time of + * the merged requests to be the current request + * for accounting purposes. + */ + if (time_after(req->start_time, next->start_time)) + req->start_time = next->start_time; + + req->biotail->bi_next = next->bio; + req->biotail = next->biotail; + + req->__data_len += blk_rq_bytes(next); + + elv_merge_requests(q, req, next); + + /* + * 'next' is going away, so update stats accordingly + */ + blk_account_io_merge(next); + + req->ioprio = ioprio_best(req->ioprio, next->ioprio); + if (blk_rq_cpu_valid(next)) + req->cpu = next->cpu; + + /* owner-ship of bio passed from next to req */ + next->bio = NULL; + __blk_put_request(q, next); + return 1; +} + +int attempt_back_merge(struct request_queue *q, struct request *rq) +{ + struct request *next = elv_latter_request(q, rq); + + if (next) + return attempt_merge(q, rq, next); + + return 0; +} + +int attempt_front_merge(struct request_queue *q, struct request *rq) +{ + struct request *prev = elv_former_request(q, rq); + + if (prev) + return attempt_merge(q, prev, rq); + + return 0; +} + +int blk_attempt_req_merge(struct request_queue *q, struct request *rq, + struct request *next) +{ + return attempt_merge(q, rq, next); +} + +bool blk_rq_merge_ok(struct request *rq, struct bio *bio) +{ + struct request_queue *q = rq->q; + + if (!rq_mergeable(rq) || !bio_mergeable(bio)) + return false; + + if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw)) + return false; + + /* different data direction or already started, don't merge */ + if (bio_data_dir(bio) != rq_data_dir(rq)) + return false; + + /* must be same device and not a special request */ + if (rq->rq_disk != bio->bi_bdev->bd_disk || req_no_special_merge(rq)) + return false; + + /* only merge integrity protected bio into ditto rq */ + if (blk_integrity_merge_bio(rq->q, rq, bio) == false) + return false; + + /* must be using the same buffer */ + if (rq->cmd_flags & REQ_WRITE_SAME && + !blk_write_same_mergeable(rq->bio, bio)) + return false; + + if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS)) { + struct bio_vec *bprev; + + bprev = &rq->biotail->bi_io_vec[rq->biotail->bi_vcnt - 1]; + if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset)) + return false; + } + + return true; +} + +int blk_try_merge(struct request *rq, struct bio *bio) +{ + if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) + return ELEVATOR_BACK_MERGE; + else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) + return ELEVATOR_FRONT_MERGE; + return ELEVATOR_NO_MERGE; +} diff --git a/block/blk-mq-cpu.c b/block/blk-mq-cpu.c new file mode 100644 index 000000000..bb3ed488f --- /dev/null +++ b/block/blk-mq-cpu.c @@ -0,0 +1,67 @@ +/* + * CPU notifier helper code for blk-mq + * + * Copyright (C) 2013-2014 Jens Axboe + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "blk-mq.h" + +static LIST_HEAD(blk_mq_cpu_notify_list); +static DEFINE_RAW_SPINLOCK(blk_mq_cpu_notify_lock); + +static int blk_mq_main_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + unsigned int cpu = (unsigned long) hcpu; + struct blk_mq_cpu_notifier *notify; + int ret = NOTIFY_OK; + + raw_spin_lock(&blk_mq_cpu_notify_lock); + + list_for_each_entry(notify, &blk_mq_cpu_notify_list, list) { + ret = notify->notify(notify->data, action, cpu); + if (ret != NOTIFY_OK) + break; + } + + raw_spin_unlock(&blk_mq_cpu_notify_lock); + return ret; +} + +void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier) +{ + BUG_ON(!notifier->notify); + + raw_spin_lock(&blk_mq_cpu_notify_lock); + list_add_tail(¬ifier->list, &blk_mq_cpu_notify_list); + raw_spin_unlock(&blk_mq_cpu_notify_lock); +} + +void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier) +{ + raw_spin_lock(&blk_mq_cpu_notify_lock); + list_del(¬ifier->list); + raw_spin_unlock(&blk_mq_cpu_notify_lock); +} + +void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier, + int (*fn)(void *, unsigned long, unsigned int), + void *data) +{ + notifier->notify = fn; + notifier->data = data; +} + +void __init blk_mq_cpu_init(void) +{ + hotcpu_notifier(blk_mq_main_cpu_notify, 0); +} diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c new file mode 100644 index 000000000..5f13f4d0b --- /dev/null +++ b/block/blk-mq-cpumap.c @@ -0,0 +1,119 @@ +/* + * CPU <-> hardware queue mapping helpers + * + * Copyright (C) 2013-2014 Jens Axboe + */ +#include +#include +#include +#include +#include +#include + +#include +#include "blk.h" +#include "blk-mq.h" + +static int cpu_to_queue_index(unsigned int nr_cpus, unsigned int nr_queues, + const int cpu) +{ + return cpu * nr_queues / nr_cpus; +} + +static int get_first_sibling(unsigned int cpu) +{ + unsigned int ret; + + ret = cpumask_first(topology_thread_cpumask(cpu)); + if (ret < nr_cpu_ids) + return ret; + + return cpu; +} + +int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues) +{ + unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling; + cpumask_var_t cpus; + + if (!alloc_cpumask_var(&cpus, GFP_ATOMIC)) + return 1; + + cpumask_clear(cpus); + nr_cpus = nr_uniq_cpus = 0; + for_each_online_cpu(i) { + nr_cpus++; + first_sibling = get_first_sibling(i); + if (!cpumask_test_cpu(first_sibling, cpus)) + nr_uniq_cpus++; + cpumask_set_cpu(i, cpus); + } + + queue = 0; + for_each_possible_cpu(i) { + if (!cpu_online(i)) { + map[i] = 0; + continue; + } + + /* + * Easy case - we have equal or more hardware queues. Or + * there are no thread siblings to take into account. Do + * 1:1 if enough, or sequential mapping if less. + */ + if (nr_queues >= nr_cpus || nr_cpus == nr_uniq_cpus) { + map[i] = cpu_to_queue_index(nr_cpus, nr_queues, queue); + queue++; + continue; + } + + /* + * Less then nr_cpus queues, and we have some number of + * threads per cores. Map sibling threads to the same + * queue. + */ + first_sibling = get_first_sibling(i); + if (first_sibling == i) { + map[i] = cpu_to_queue_index(nr_uniq_cpus, nr_queues, + queue); + queue++; + } else + map[i] = map[first_sibling]; + } + + free_cpumask_var(cpus); + return 0; +} + +unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set) +{ + unsigned int *map; + + /* If cpus are offline, map them to first hctx */ + map = kzalloc_node(sizeof(*map) * nr_cpu_ids, GFP_KERNEL, + set->numa_node); + if (!map) + return NULL; + + if (!blk_mq_update_queue_map(map, set->nr_hw_queues)) + return map; + + kfree(map); + return NULL; +} + +/* + * We have no quick way of doing reverse lookups. This is only used at + * queue init time, so runtime isn't important. + */ +int blk_mq_hw_queue_to_node(unsigned int *mq_map, unsigned int index) +{ + int i; + + for_each_possible_cpu(i) { + if (index == mq_map[i]) + return cpu_to_node(i); + } + + return NUMA_NO_NODE; +} diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c new file mode 100644 index 000000000..b79685e06 --- /dev/null +++ b/block/blk-mq-sysfs.c @@ -0,0 +1,462 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "blk-mq.h" +#include "blk-mq-tag.h" + +static void blk_mq_sysfs_release(struct kobject *kobj) +{ +} + +struct blk_mq_ctx_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct blk_mq_ctx *, char *); + ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t); +}; + +struct blk_mq_hw_ctx_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct blk_mq_hw_ctx *, char *); + ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t); +}; + +static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr, + char *page) +{ + struct blk_mq_ctx_sysfs_entry *entry; + struct blk_mq_ctx *ctx; + struct request_queue *q; + ssize_t res; + + entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr); + ctx = container_of(kobj, struct blk_mq_ctx, kobj); + q = ctx->queue; + + if (!entry->show) + return -EIO; + + res = -ENOENT; + mutex_lock(&q->sysfs_lock); + if (!blk_queue_dying(q)) + res = entry->show(ctx, page); + mutex_unlock(&q->sysfs_lock); + return res; +} + +static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr, + const char *page, size_t length) +{ + struct blk_mq_ctx_sysfs_entry *entry; + struct blk_mq_ctx *ctx; + struct request_queue *q; + ssize_t res; + + entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr); + ctx = container_of(kobj, struct blk_mq_ctx, kobj); + q = ctx->queue; + + if (!entry->store) + return -EIO; + + res = -ENOENT; + mutex_lock(&q->sysfs_lock); + if (!blk_queue_dying(q)) + res = entry->store(ctx, page, length); + mutex_unlock(&q->sysfs_lock); + return res; +} + +static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj, + struct attribute *attr, char *page) +{ + struct blk_mq_hw_ctx_sysfs_entry *entry; + struct blk_mq_hw_ctx *hctx; + struct request_queue *q; + ssize_t res; + + entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr); + hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); + q = hctx->queue; + + if (!entry->show) + return -EIO; + + res = -ENOENT; + mutex_lock(&q->sysfs_lock); + if (!blk_queue_dying(q)) + res = entry->show(hctx, page); + mutex_unlock(&q->sysfs_lock); + return res; +} + +static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj, + struct attribute *attr, const char *page, + size_t length) +{ + struct blk_mq_hw_ctx_sysfs_entry *entry; + struct blk_mq_hw_ctx *hctx; + struct request_queue *q; + ssize_t res; + + entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr); + hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); + q = hctx->queue; + + if (!entry->store) + return -EIO; + + res = -ENOENT; + mutex_lock(&q->sysfs_lock); + if (!blk_queue_dying(q)) + res = entry->store(hctx, page, length); + mutex_unlock(&q->sysfs_lock); + return res; +} + +static ssize_t blk_mq_sysfs_dispatched_show(struct blk_mq_ctx *ctx, char *page) +{ + return sprintf(page, "%lu %lu\n", ctx->rq_dispatched[1], + ctx->rq_dispatched[0]); +} + +static ssize_t blk_mq_sysfs_merged_show(struct blk_mq_ctx *ctx, char *page) +{ + return sprintf(page, "%lu\n", ctx->rq_merged); +} + +static ssize_t blk_mq_sysfs_completed_show(struct blk_mq_ctx *ctx, char *page) +{ + return sprintf(page, "%lu %lu\n", ctx->rq_completed[1], + ctx->rq_completed[0]); +} + +static ssize_t sysfs_list_show(char *page, struct list_head *list, char *msg) +{ + char *start_page = page; + struct request *rq; + + page += sprintf(page, "%s:\n", msg); + + list_for_each_entry(rq, list, queuelist) + page += sprintf(page, "\t%p\n", rq); + + return page - start_page; +} + +static ssize_t blk_mq_sysfs_rq_list_show(struct blk_mq_ctx *ctx, char *page) +{ + ssize_t ret; + + spin_lock(&ctx->lock); + ret = sysfs_list_show(page, &ctx->rq_list, "CTX pending"); + spin_unlock(&ctx->lock); + + return ret; +} + +static ssize_t blk_mq_hw_sysfs_queued_show(struct blk_mq_hw_ctx *hctx, + char *page) +{ + return sprintf(page, "%lu\n", hctx->queued); +} + +static ssize_t blk_mq_hw_sysfs_run_show(struct blk_mq_hw_ctx *hctx, char *page) +{ + return sprintf(page, "%lu\n", hctx->run); +} + +static ssize_t blk_mq_hw_sysfs_dispatched_show(struct blk_mq_hw_ctx *hctx, + char *page) +{ + char *start_page = page; + int i; + + page += sprintf(page, "%8u\t%lu\n", 0U, hctx->dispatched[0]); + + for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER; i++) { + unsigned long d = 1U << (i - 1); + + page += sprintf(page, "%8lu\t%lu\n", d, hctx->dispatched[i]); + } + + return page - start_page; +} + +static ssize_t blk_mq_hw_sysfs_rq_list_show(struct blk_mq_hw_ctx *hctx, + char *page) +{ + ssize_t ret; + + spin_lock(&hctx->lock); + ret = sysfs_list_show(page, &hctx->dispatch, "HCTX pending"); + spin_unlock(&hctx->lock); + + return ret; +} + +static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page) +{ + return blk_mq_tag_sysfs_show(hctx->tags, page); +} + +static ssize_t blk_mq_hw_sysfs_active_show(struct blk_mq_hw_ctx *hctx, char *page) +{ + return sprintf(page, "%u\n", atomic_read(&hctx->nr_active)); +} + +static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page) +{ + unsigned int i, first = 1; + ssize_t ret = 0; + + blk_mq_disable_hotplug(); + + for_each_cpu(i, hctx->cpumask) { + if (first) + ret += sprintf(ret + page, "%u", i); + else + ret += sprintf(ret + page, ", %u", i); + + first = 0; + } + + blk_mq_enable_hotplug(); + + ret += sprintf(ret + page, "\n"); + return ret; +} + +static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched = { + .attr = {.name = "dispatched", .mode = S_IRUGO }, + .show = blk_mq_sysfs_dispatched_show, +}; +static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_merged = { + .attr = {.name = "merged", .mode = S_IRUGO }, + .show = blk_mq_sysfs_merged_show, +}; +static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_completed = { + .attr = {.name = "completed", .mode = S_IRUGO }, + .show = blk_mq_sysfs_completed_show, +}; +static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_rq_list = { + .attr = {.name = "rq_list", .mode = S_IRUGO }, + .show = blk_mq_sysfs_rq_list_show, +}; + +static struct attribute *default_ctx_attrs[] = { + &blk_mq_sysfs_dispatched.attr, + &blk_mq_sysfs_merged.attr, + &blk_mq_sysfs_completed.attr, + &blk_mq_sysfs_rq_list.attr, + NULL, +}; + +static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_queued = { + .attr = {.name = "queued", .mode = S_IRUGO }, + .show = blk_mq_hw_sysfs_queued_show, +}; +static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_run = { + .attr = {.name = "run", .mode = S_IRUGO }, + .show = blk_mq_hw_sysfs_run_show, +}; +static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_dispatched = { + .attr = {.name = "dispatched", .mode = S_IRUGO }, + .show = blk_mq_hw_sysfs_dispatched_show, +}; +static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_active = { + .attr = {.name = "active", .mode = S_IRUGO }, + .show = blk_mq_hw_sysfs_active_show, +}; +static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_pending = { + .attr = {.name = "pending", .mode = S_IRUGO }, + .show = blk_mq_hw_sysfs_rq_list_show, +}; +static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags = { + .attr = {.name = "tags", .mode = S_IRUGO }, + .show = blk_mq_hw_sysfs_tags_show, +}; +static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = { + .attr = {.name = "cpu_list", .mode = S_IRUGO }, + .show = blk_mq_hw_sysfs_cpus_show, +}; + +static struct attribute *default_hw_ctx_attrs[] = { + &blk_mq_hw_sysfs_queued.attr, + &blk_mq_hw_sysfs_run.attr, + &blk_mq_hw_sysfs_dispatched.attr, + &blk_mq_hw_sysfs_pending.attr, + &blk_mq_hw_sysfs_tags.attr, + &blk_mq_hw_sysfs_cpus.attr, + &blk_mq_hw_sysfs_active.attr, + NULL, +}; + +static const struct sysfs_ops blk_mq_sysfs_ops = { + .show = blk_mq_sysfs_show, + .store = blk_mq_sysfs_store, +}; + +static const struct sysfs_ops blk_mq_hw_sysfs_ops = { + .show = blk_mq_hw_sysfs_show, + .store = blk_mq_hw_sysfs_store, +}; + +static struct kobj_type blk_mq_ktype = { + .sysfs_ops = &blk_mq_sysfs_ops, + .release = blk_mq_sysfs_release, +}; + +static struct kobj_type blk_mq_ctx_ktype = { + .sysfs_ops = &blk_mq_sysfs_ops, + .default_attrs = default_ctx_attrs, + .release = blk_mq_sysfs_release, +}; + +static struct kobj_type blk_mq_hw_ktype = { + .sysfs_ops = &blk_mq_hw_sysfs_ops, + .default_attrs = default_hw_ctx_attrs, + .release = blk_mq_sysfs_release, +}; + +static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx) +{ + struct blk_mq_ctx *ctx; + int i; + + if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP)) + return; + + hctx_for_each_ctx(hctx, ctx, i) + kobject_del(&ctx->kobj); + + kobject_del(&hctx->kobj); +} + +static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx) +{ + struct request_queue *q = hctx->queue; + struct blk_mq_ctx *ctx; + int i, ret; + + if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP)) + return 0; + + ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num); + if (ret) + return ret; + + hctx_for_each_ctx(hctx, ctx, i) { + ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu); + if (ret) + break; + } + + return ret; +} + +void blk_mq_unregister_disk(struct gendisk *disk) +{ + struct request_queue *q = disk->queue; + struct blk_mq_hw_ctx *hctx; + struct blk_mq_ctx *ctx; + int i, j; + + queue_for_each_hw_ctx(q, hctx, i) { + blk_mq_unregister_hctx(hctx); + + hctx_for_each_ctx(hctx, ctx, j) + kobject_put(&ctx->kobj); + + kobject_put(&hctx->kobj); + } + + kobject_uevent(&q->mq_kobj, KOBJ_REMOVE); + kobject_del(&q->mq_kobj); + kobject_put(&q->mq_kobj); + + kobject_put(&disk_to_dev(disk)->kobj); +} + +static void blk_mq_sysfs_init(struct request_queue *q) +{ + struct blk_mq_hw_ctx *hctx; + struct blk_mq_ctx *ctx; + int i; + + kobject_init(&q->mq_kobj, &blk_mq_ktype); + + queue_for_each_hw_ctx(q, hctx, i) + kobject_init(&hctx->kobj, &blk_mq_hw_ktype); + + queue_for_each_ctx(q, ctx, i) + kobject_init(&ctx->kobj, &blk_mq_ctx_ktype); +} + +/* see blk_register_queue() */ +void blk_mq_finish_init(struct request_queue *q) +{ + percpu_ref_switch_to_percpu(&q->mq_usage_counter); +} + +int blk_mq_register_disk(struct gendisk *disk) +{ + struct device *dev = disk_to_dev(disk); + struct request_queue *q = disk->queue; + struct blk_mq_hw_ctx *hctx; + int ret, i; + + blk_mq_sysfs_init(q); + + ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq"); + if (ret < 0) + return ret; + + kobject_uevent(&q->mq_kobj, KOBJ_ADD); + + queue_for_each_hw_ctx(q, hctx, i) { + hctx->flags |= BLK_MQ_F_SYSFS_UP; + ret = blk_mq_register_hctx(hctx); + if (ret) + break; + } + + if (ret) { + blk_mq_unregister_disk(disk); + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(blk_mq_register_disk); + +void blk_mq_sysfs_unregister(struct request_queue *q) +{ + struct blk_mq_hw_ctx *hctx; + int i; + + queue_for_each_hw_ctx(q, hctx, i) + blk_mq_unregister_hctx(hctx); +} + +int blk_mq_sysfs_register(struct request_queue *q) +{ + struct blk_mq_hw_ctx *hctx; + int i, ret = 0; + + queue_for_each_hw_ctx(q, hctx, i) { + ret = blk_mq_register_hctx(hctx); + if (ret) + break; + } + + return ret; +} diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c new file mode 100644 index 000000000..be3290cc0 --- /dev/null +++ b/block/blk-mq-tag.c @@ -0,0 +1,666 @@ +/* + * Fast and scalable bitmap tagging variant. Uses sparser bitmaps spread + * over multiple cachelines to avoid ping-pong between multiple submitters + * or submitter and completer. Uses rolling wakeups to avoid falling of + * the scaling cliff when we run out of tags and have to start putting + * submitters to sleep. + * + * Uses active queue tracking to support fairer distribution of tags + * between multiple submitters when a shared tag map is used. + * + * Copyright (C) 2013-2014 Jens Axboe + */ +#include +#include +#include + +#include +#include "blk.h" +#include "blk-mq.h" +#include "blk-mq-tag.h" + +static bool bt_has_free_tags(struct blk_mq_bitmap_tags *bt) +{ + int i; + + for (i = 0; i < bt->map_nr; i++) { + struct blk_align_bitmap *bm = &bt->map[i]; + int ret; + + ret = find_first_zero_bit(&bm->word, bm->depth); + if (ret < bm->depth) + return true; + } + + return false; +} + +bool blk_mq_has_free_tags(struct blk_mq_tags *tags) +{ + if (!tags) + return true; + + return bt_has_free_tags(&tags->bitmap_tags); +} + +static inline int bt_index_inc(int index) +{ + return (index + 1) & (BT_WAIT_QUEUES - 1); +} + +static inline void bt_index_atomic_inc(atomic_t *index) +{ + int old = atomic_read(index); + int new = bt_index_inc(old); + atomic_cmpxchg(index, old, new); +} + +/* + * If a previously inactive queue goes active, bump the active user count. + */ +bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) +{ + if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) && + !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) + atomic_inc(&hctx->tags->active_queues); + + return true; +} + +/* + * Wakeup all potentially sleeping on tags + */ +void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve) +{ + struct blk_mq_bitmap_tags *bt; + int i, wake_index; + + bt = &tags->bitmap_tags; + wake_index = atomic_read(&bt->wake_index); + for (i = 0; i < BT_WAIT_QUEUES; i++) { + struct bt_wait_state *bs = &bt->bs[wake_index]; + + if (waitqueue_active(&bs->wait)) + wake_up(&bs->wait); + + wake_index = bt_index_inc(wake_index); + } + + if (include_reserve) { + bt = &tags->breserved_tags; + if (waitqueue_active(&bt->bs[0].wait)) + wake_up(&bt->bs[0].wait); + } +} + +/* + * If a previously busy queue goes inactive, potential waiters could now + * be allowed to queue. Wake them up and check. + */ +void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) +{ + struct blk_mq_tags *tags = hctx->tags; + + if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) + return; + + atomic_dec(&tags->active_queues); + + blk_mq_tag_wakeup_all(tags, false); +} + +/* + * For shared tag users, we track the number of currently active users + * and attempt to provide a fair share of the tag depth for each of them. + */ +static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, + struct blk_mq_bitmap_tags *bt) +{ + unsigned int depth, users; + + if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED)) + return true; + if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) + return true; + + /* + * Don't try dividing an ant + */ + if (bt->depth == 1) + return true; + + users = atomic_read(&hctx->tags->active_queues); + if (!users) + return true; + + /* + * Allow at least some tags + */ + depth = max((bt->depth + users - 1) / users, 4U); + return atomic_read(&hctx->nr_active) < depth; +} + +static int __bt_get_word(struct blk_align_bitmap *bm, unsigned int last_tag, + bool nowrap) +{ + int tag, org_last_tag = last_tag; + + while (1) { + tag = find_next_zero_bit(&bm->word, bm->depth, last_tag); + if (unlikely(tag >= bm->depth)) { + /* + * We started with an offset, and we didn't reset the + * offset to 0 in a failure case, so start from 0 to + * exhaust the map. + */ + if (org_last_tag && last_tag && !nowrap) { + last_tag = org_last_tag = 0; + continue; + } + return -1; + } + + if (!test_and_set_bit(tag, &bm->word)) + break; + + last_tag = tag + 1; + if (last_tag >= bm->depth - 1) + last_tag = 0; + } + + return tag; +} + +#define BT_ALLOC_RR(tags) (tags->alloc_policy == BLK_TAG_ALLOC_RR) + +/* + * Straight forward bitmap tag implementation, where each bit is a tag + * (cleared == free, and set == busy). The small twist is using per-cpu + * last_tag caches, which blk-mq stores in the blk_mq_ctx software queue + * contexts. This enables us to drastically limit the space searched, + * without dirtying an extra shared cacheline like we would if we stored + * the cache value inside the shared blk_mq_bitmap_tags structure. On top + * of that, each word of tags is in a separate cacheline. This means that + * multiple users will tend to stick to different cachelines, at least + * until the map is exhausted. + */ +static int __bt_get(struct blk_mq_hw_ctx *hctx, struct blk_mq_bitmap_tags *bt, + unsigned int *tag_cache, struct blk_mq_tags *tags) +{ + unsigned int last_tag, org_last_tag; + int index, i, tag; + + if (!hctx_may_queue(hctx, bt)) + return -1; + + last_tag = org_last_tag = *tag_cache; + index = TAG_TO_INDEX(bt, last_tag); + + for (i = 0; i < bt->map_nr; i++) { + tag = __bt_get_word(&bt->map[index], TAG_TO_BIT(bt, last_tag), + BT_ALLOC_RR(tags)); + if (tag != -1) { + tag += (index << bt->bits_per_word); + goto done; + } + + /* + * Jump to next index, and reset the last tag to be the + * first tag of that index + */ + index++; + last_tag = (index << bt->bits_per_word); + + if (index >= bt->map_nr) { + index = 0; + last_tag = 0; + } + } + + *tag_cache = 0; + return -1; + + /* + * Only update the cache from the allocation path, if we ended + * up using the specific cached tag. + */ +done: + if (tag == org_last_tag || unlikely(BT_ALLOC_RR(tags))) { + last_tag = tag + 1; + if (last_tag >= bt->depth - 1) + last_tag = 0; + + *tag_cache = last_tag; + } + + return tag; +} + +static struct bt_wait_state *bt_wait_ptr(struct blk_mq_bitmap_tags *bt, + struct blk_mq_hw_ctx *hctx) +{ + struct bt_wait_state *bs; + int wait_index; + + if (!hctx) + return &bt->bs[0]; + + wait_index = atomic_read(&hctx->wait_index); + bs = &bt->bs[wait_index]; + bt_index_atomic_inc(&hctx->wait_index); + return bs; +} + +static int bt_get(struct blk_mq_alloc_data *data, + struct blk_mq_bitmap_tags *bt, + struct blk_mq_hw_ctx *hctx, + unsigned int *last_tag, struct blk_mq_tags *tags) +{ + struct bt_wait_state *bs; + DEFINE_WAIT(wait); + int tag; + + tag = __bt_get(hctx, bt, last_tag, tags); + if (tag != -1) + return tag; + + if (!(data->gfp & __GFP_WAIT)) + return -1; + + bs = bt_wait_ptr(bt, hctx); + do { + prepare_to_wait(&bs->wait, &wait, TASK_UNINTERRUPTIBLE); + + tag = __bt_get(hctx, bt, last_tag, tags); + if (tag != -1) + break; + + /* + * We're out of tags on this hardware queue, kick any + * pending IO submits before going to sleep waiting for + * some to complete. Note that hctx can be NULL here for + * reserved tag allocation. + */ + if (hctx) + blk_mq_run_hw_queue(hctx, false); + + /* + * Retry tag allocation after running the hardware queue, + * as running the queue may also have found completions. + */ + tag = __bt_get(hctx, bt, last_tag, tags); + if (tag != -1) + break; + + blk_mq_put_ctx(data->ctx); + + io_schedule(); + + data->ctx = blk_mq_get_ctx(data->q); + data->hctx = data->q->mq_ops->map_queue(data->q, + data->ctx->cpu); + if (data->reserved) { + bt = &data->hctx->tags->breserved_tags; + } else { + last_tag = &data->ctx->last_tag; + hctx = data->hctx; + bt = &hctx->tags->bitmap_tags; + } + finish_wait(&bs->wait, &wait); + bs = bt_wait_ptr(bt, hctx); + } while (1); + + finish_wait(&bs->wait, &wait); + return tag; +} + +static unsigned int __blk_mq_get_tag(struct blk_mq_alloc_data *data) +{ + int tag; + + tag = bt_get(data, &data->hctx->tags->bitmap_tags, data->hctx, + &data->ctx->last_tag, data->hctx->tags); + if (tag >= 0) + return tag + data->hctx->tags->nr_reserved_tags; + + return BLK_MQ_TAG_FAIL; +} + +static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_alloc_data *data) +{ + int tag, zero = 0; + + if (unlikely(!data->hctx->tags->nr_reserved_tags)) { + WARN_ON_ONCE(1); + return BLK_MQ_TAG_FAIL; + } + + tag = bt_get(data, &data->hctx->tags->breserved_tags, NULL, &zero, + data->hctx->tags); + if (tag < 0) + return BLK_MQ_TAG_FAIL; + + return tag; +} + +unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) +{ + if (!data->reserved) + return __blk_mq_get_tag(data); + + return __blk_mq_get_reserved_tag(data); +} + +static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt) +{ + int i, wake_index; + + wake_index = atomic_read(&bt->wake_index); + for (i = 0; i < BT_WAIT_QUEUES; i++) { + struct bt_wait_state *bs = &bt->bs[wake_index]; + + if (waitqueue_active(&bs->wait)) { + int o = atomic_read(&bt->wake_index); + if (wake_index != o) + atomic_cmpxchg(&bt->wake_index, o, wake_index); + + return bs; + } + + wake_index = bt_index_inc(wake_index); + } + + return NULL; +} + +static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag) +{ + const int index = TAG_TO_INDEX(bt, tag); + struct bt_wait_state *bs; + int wait_cnt; + + clear_bit(TAG_TO_BIT(bt, tag), &bt->map[index].word); + + /* Ensure that the wait list checks occur after clear_bit(). */ + smp_mb(); + + bs = bt_wake_ptr(bt); + if (!bs) + return; + + wait_cnt = atomic_dec_return(&bs->wait_cnt); + if (unlikely(wait_cnt < 0)) + wait_cnt = atomic_inc_return(&bs->wait_cnt); + if (wait_cnt == 0) { + atomic_add(bt->wake_cnt, &bs->wait_cnt); + bt_index_atomic_inc(&bt->wake_index); + wake_up(&bs->wait); + } +} + +void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag, + unsigned int *last_tag) +{ + struct blk_mq_tags *tags = hctx->tags; + + if (tag >= tags->nr_reserved_tags) { + const int real_tag = tag - tags->nr_reserved_tags; + + BUG_ON(real_tag >= tags->nr_tags); + bt_clear_tag(&tags->bitmap_tags, real_tag); + if (likely(tags->alloc_policy == BLK_TAG_ALLOC_FIFO)) + *last_tag = real_tag; + } else { + BUG_ON(tag >= tags->nr_reserved_tags); + bt_clear_tag(&tags->breserved_tags, tag); + } +} + +static void bt_for_each(struct blk_mq_hw_ctx *hctx, + struct blk_mq_bitmap_tags *bt, unsigned int off, + busy_iter_fn *fn, void *data, bool reserved) +{ + struct request *rq; + int bit, i; + + for (i = 0; i < bt->map_nr; i++) { + struct blk_align_bitmap *bm = &bt->map[i]; + + for (bit = find_first_bit(&bm->word, bm->depth); + bit < bm->depth; + bit = find_next_bit(&bm->word, bm->depth, bit + 1)) { + rq = blk_mq_tag_to_rq(hctx->tags, off + bit); + if (rq->q == hctx->queue) + fn(hctx, rq, data, reserved); + } + + off += (1 << bt->bits_per_word); + } +} + +void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn, + void *priv) +{ + struct blk_mq_tags *tags = hctx->tags; + + if (tags->nr_reserved_tags) + bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true); + bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv, + false); +} +EXPORT_SYMBOL(blk_mq_tag_busy_iter); + +static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt) +{ + unsigned int i, used; + + for (i = 0, used = 0; i < bt->map_nr; i++) { + struct blk_align_bitmap *bm = &bt->map[i]; + + used += bitmap_weight(&bm->word, bm->depth); + } + + return bt->depth - used; +} + +static void bt_update_count(struct blk_mq_bitmap_tags *bt, + unsigned int depth) +{ + unsigned int tags_per_word = 1U << bt->bits_per_word; + unsigned int map_depth = depth; + + if (depth) { + int i; + + for (i = 0; i < bt->map_nr; i++) { + bt->map[i].depth = min(map_depth, tags_per_word); + map_depth -= bt->map[i].depth; + } + } + + bt->wake_cnt = BT_WAIT_BATCH; + if (bt->wake_cnt > depth / BT_WAIT_QUEUES) + bt->wake_cnt = max(1U, depth / BT_WAIT_QUEUES); + + bt->depth = depth; +} + +static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth, + int node, bool reserved) +{ + int i; + + bt->bits_per_word = ilog2(BITS_PER_LONG); + + /* + * Depth can be zero for reserved tags, that's not a failure + * condition. + */ + if (depth) { + unsigned int nr, tags_per_word; + + tags_per_word = (1 << bt->bits_per_word); + + /* + * If the tag space is small, shrink the number of tags + * per word so we spread over a few cachelines, at least. + * If less than 4 tags, just forget about it, it's not + * going to work optimally anyway. + */ + if (depth >= 4) { + while (tags_per_word * 4 > depth) { + bt->bits_per_word--; + tags_per_word = (1 << bt->bits_per_word); + } + } + + nr = ALIGN(depth, tags_per_word) / tags_per_word; + bt->map = kzalloc_node(nr * sizeof(struct blk_align_bitmap), + GFP_KERNEL, node); + if (!bt->map) + return -ENOMEM; + + bt->map_nr = nr; + } + + bt->bs = kzalloc(BT_WAIT_QUEUES * sizeof(*bt->bs), GFP_KERNEL); + if (!bt->bs) { + kfree(bt->map); + bt->map = NULL; + return -ENOMEM; + } + + bt_update_count(bt, depth); + + for (i = 0; i < BT_WAIT_QUEUES; i++) { + init_waitqueue_head(&bt->bs[i].wait); + atomic_set(&bt->bs[i].wait_cnt, bt->wake_cnt); + } + + return 0; +} + +static void bt_free(struct blk_mq_bitmap_tags *bt) +{ + kfree(bt->map); + kfree(bt->bs); +} + +static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags, + int node, int alloc_policy) +{ + unsigned int depth = tags->nr_tags - tags->nr_reserved_tags; + + tags->alloc_policy = alloc_policy; + + if (bt_alloc(&tags->bitmap_tags, depth, node, false)) + goto enomem; + if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, node, true)) + goto enomem; + + return tags; +enomem: + bt_free(&tags->bitmap_tags); + kfree(tags); + return NULL; +} + +struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, + unsigned int reserved_tags, + int node, int alloc_policy) +{ + struct blk_mq_tags *tags; + + if (total_tags > BLK_MQ_TAG_MAX) { + pr_err("blk-mq: tag depth too large\n"); + return NULL; + } + + tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node); + if (!tags) + return NULL; + + tags->nr_tags = total_tags; + tags->nr_reserved_tags = reserved_tags; + + return blk_mq_init_bitmap_tags(tags, node, alloc_policy); +} + +void blk_mq_free_tags(struct blk_mq_tags *tags) +{ + bt_free(&tags->bitmap_tags); + bt_free(&tags->breserved_tags); + kfree(tags); +} + +void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *tag) +{ + unsigned int depth = tags->nr_tags - tags->nr_reserved_tags; + + *tag = prandom_u32() % depth; +} + +int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth) +{ + tdepth -= tags->nr_reserved_tags; + if (tdepth > tags->nr_tags) + return -EINVAL; + + /* + * Don't need (or can't) update reserved tags here, they remain + * static and should never need resizing. + */ + bt_update_count(&tags->bitmap_tags, tdepth); + blk_mq_tag_wakeup_all(tags, false); + return 0; +} + +/** + * blk_mq_unique_tag() - return a tag that is unique queue-wide + * @rq: request for which to compute a unique tag + * + * The tag field in struct request is unique per hardware queue but not over + * all hardware queues. Hence this function that returns a tag with the + * hardware context index in the upper bits and the per hardware queue tag in + * the lower bits. + * + * Note: When called for a request that is queued on a non-multiqueue request + * queue, the hardware context index is set to zero. + */ +u32 blk_mq_unique_tag(struct request *rq) +{ + struct request_queue *q = rq->q; + struct blk_mq_hw_ctx *hctx; + int hwq = 0; + + if (q->mq_ops) { + hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu); + hwq = hctx->queue_num; + } + + return (hwq << BLK_MQ_UNIQUE_TAG_BITS) | + (rq->tag & BLK_MQ_UNIQUE_TAG_MASK); +} +EXPORT_SYMBOL(blk_mq_unique_tag); + +ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page) +{ + char *orig_page = page; + unsigned int free, res; + + if (!tags) + return 0; + + page += sprintf(page, "nr_tags=%u, reserved_tags=%u, " + "bits_per_word=%u\n", + tags->nr_tags, tags->nr_reserved_tags, + tags->bitmap_tags.bits_per_word); + + free = bt_unused_tags(&tags->bitmap_tags); + res = bt_unused_tags(&tags->breserved_tags); + + page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", free, res); + page += sprintf(page, "active_queues=%u\n", atomic_read(&tags->active_queues)); + + return page - orig_page; +} diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h new file mode 100644 index 000000000..90767b370 --- /dev/null +++ b/block/blk-mq-tag.h @@ -0,0 +1,91 @@ +#ifndef INT_BLK_MQ_TAG_H +#define INT_BLK_MQ_TAG_H + +#include "blk-mq.h" + +enum { + BT_WAIT_QUEUES = 8, + BT_WAIT_BATCH = 8, +}; + +struct bt_wait_state { + atomic_t wait_cnt; + wait_queue_head_t wait; +} ____cacheline_aligned_in_smp; + +#define TAG_TO_INDEX(bt, tag) ((tag) >> (bt)->bits_per_word) +#define TAG_TO_BIT(bt, tag) ((tag) & ((1 << (bt)->bits_per_word) - 1)) + +struct blk_mq_bitmap_tags { + unsigned int depth; + unsigned int wake_cnt; + unsigned int bits_per_word; + + unsigned int map_nr; + struct blk_align_bitmap *map; + + atomic_t wake_index; + struct bt_wait_state *bs; +}; + +/* + * Tag address space map. + */ +struct blk_mq_tags { + unsigned int nr_tags; + unsigned int nr_reserved_tags; + + atomic_t active_queues; + + struct blk_mq_bitmap_tags bitmap_tags; + struct blk_mq_bitmap_tags breserved_tags; + + struct request **rqs; + struct list_head page_list; + + int alloc_policy; +}; + + +extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node, int alloc_policy); +extern void blk_mq_free_tags(struct blk_mq_tags *tags); + +extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data); +extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag, unsigned int *last_tag); +extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags); +extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page); +extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag); +extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth); +extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool); + +enum { + BLK_MQ_TAG_CACHE_MIN = 1, + BLK_MQ_TAG_CACHE_MAX = 64, +}; + +enum { + BLK_MQ_TAG_FAIL = -1U, + BLK_MQ_TAG_MIN = BLK_MQ_TAG_CACHE_MIN, + BLK_MQ_TAG_MAX = BLK_MQ_TAG_FAIL - 1, +}; + +extern bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *); +extern void __blk_mq_tag_idle(struct blk_mq_hw_ctx *); + +static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) +{ + if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) + return false; + + return __blk_mq_tag_busy(hctx); +} + +static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) +{ + if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) + return; + + __blk_mq_tag_idle(hctx); +} + +#endif diff --git a/block/blk-mq.c b/block/blk-mq.c new file mode 100644 index 000000000..594eea042 --- /dev/null +++ b/block/blk-mq.c @@ -0,0 +1,2286 @@ +/* + * Block multiqueue core code + * + * Copyright (C) 2013-2014 Jens Axboe + * Copyright (C) 2013-2014 Christoph Hellwig + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include "blk.h" +#include "blk-mq.h" +#include "blk-mq-tag.h" + +static DEFINE_MUTEX(all_q_mutex); +static LIST_HEAD(all_q_list); + +static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx); + +/* + * Check if any of the ctx's have pending work in this hardware queue + */ +static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) +{ + unsigned int i; + + for (i = 0; i < hctx->ctx_map.size; i++) + if (hctx->ctx_map.map[i].word) + return true; + + return false; +} + +static inline struct blk_align_bitmap *get_bm(struct blk_mq_hw_ctx *hctx, + struct blk_mq_ctx *ctx) +{ + return &hctx->ctx_map.map[ctx->index_hw / hctx->ctx_map.bits_per_word]; +} + +#define CTX_TO_BIT(hctx, ctx) \ + ((ctx)->index_hw & ((hctx)->ctx_map.bits_per_word - 1)) + +/* + * Mark this ctx as having pending work in this hardware queue + */ +static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, + struct blk_mq_ctx *ctx) +{ + struct blk_align_bitmap *bm = get_bm(hctx, ctx); + + if (!test_bit(CTX_TO_BIT(hctx, ctx), &bm->word)) + set_bit(CTX_TO_BIT(hctx, ctx), &bm->word); +} + +static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, + struct blk_mq_ctx *ctx) +{ + struct blk_align_bitmap *bm = get_bm(hctx, ctx); + + clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word); +} + +static int blk_mq_queue_enter(struct request_queue *q, gfp_t gfp) +{ + while (true) { + int ret; + + if (percpu_ref_tryget_live(&q->mq_usage_counter)) + return 0; + + if (!(gfp & __GFP_WAIT)) + return -EBUSY; + + ret = wait_event_interruptible(q->mq_freeze_wq, + !q->mq_freeze_depth || blk_queue_dying(q)); + if (blk_queue_dying(q)) + return -ENODEV; + if (ret) + return ret; + } +} + +static void blk_mq_queue_exit(struct request_queue *q) +{ + percpu_ref_put(&q->mq_usage_counter); +} + +static void blk_mq_usage_counter_release(struct percpu_ref *ref) +{ + struct request_queue *q = + container_of(ref, struct request_queue, mq_usage_counter); + + wake_up_all(&q->mq_freeze_wq); +} + +void blk_mq_freeze_queue_start(struct request_queue *q) +{ + bool freeze; + + spin_lock_irq(q->queue_lock); + freeze = !q->mq_freeze_depth++; + spin_unlock_irq(q->queue_lock); + + if (freeze) { + percpu_ref_kill(&q->mq_usage_counter); + blk_mq_run_hw_queues(q, false); + } +} +EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start); + +static void blk_mq_freeze_queue_wait(struct request_queue *q) +{ + wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter)); +} + +/* + * Guarantee no request is in use, so we can change any data structure of + * the queue afterward. + */ +void blk_mq_freeze_queue(struct request_queue *q) +{ + blk_mq_freeze_queue_start(q); + blk_mq_freeze_queue_wait(q); +} +EXPORT_SYMBOL_GPL(blk_mq_freeze_queue); + +void blk_mq_unfreeze_queue(struct request_queue *q) +{ + bool wake; + + spin_lock_irq(q->queue_lock); + wake = !--q->mq_freeze_depth; + WARN_ON_ONCE(q->mq_freeze_depth < 0); + spin_unlock_irq(q->queue_lock); + if (wake) { + percpu_ref_reinit(&q->mq_usage_counter); + wake_up_all(&q->mq_freeze_wq); + } +} +EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue); + +void blk_mq_wake_waiters(struct request_queue *q) +{ + struct blk_mq_hw_ctx *hctx; + unsigned int i; + + queue_for_each_hw_ctx(q, hctx, i) + if (blk_mq_hw_queue_mapped(hctx)) + blk_mq_tag_wakeup_all(hctx->tags, true); + + /* + * If we are called because the queue has now been marked as + * dying, we need to ensure that processes currently waiting on + * the queue are notified as well. + */ + wake_up_all(&q->mq_freeze_wq); +} + +bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx) +{ + return blk_mq_has_free_tags(hctx->tags); +} +EXPORT_SYMBOL(blk_mq_can_queue); + +static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, + struct request *rq, unsigned int rw_flags) +{ + if (blk_queue_io_stat(q)) + rw_flags |= REQ_IO_STAT; + + INIT_LIST_HEAD(&rq->queuelist); + /* csd/requeue_work/fifo_time is initialized before use */ + rq->q = q; + rq->mq_ctx = ctx; + rq->cmd_flags |= rw_flags; + /* do not touch atomic flags, it needs atomic ops against the timer */ + rq->cpu = -1; + INIT_HLIST_NODE(&rq->hash); + RB_CLEAR_NODE(&rq->rb_node); + rq->rq_disk = NULL; + rq->part = NULL; + rq->start_time = jiffies; +#ifdef CONFIG_BLK_CGROUP + rq->rl = NULL; + set_start_time_ns(rq); + rq->io_start_time_ns = 0; +#endif + rq->nr_phys_segments = 0; +#if defined(CONFIG_BLK_DEV_INTEGRITY) + rq->nr_integrity_segments = 0; +#endif + rq->special = NULL; + /* tag was already set */ + rq->errors = 0; + + rq->cmd = rq->__cmd; + + rq->extra_len = 0; + rq->sense_len = 0; + rq->resid_len = 0; + rq->sense = NULL; + + INIT_LIST_HEAD(&rq->timeout_list); + rq->timeout = 0; + + rq->end_io = NULL; + rq->end_io_data = NULL; + rq->next_rq = NULL; + + ctx->rq_dispatched[rw_is_sync(rw_flags)]++; +} + +static struct request * +__blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw) +{ + struct request *rq; + unsigned int tag; + + tag = blk_mq_get_tag(data); + if (tag != BLK_MQ_TAG_FAIL) { + rq = data->hctx->tags->rqs[tag]; + + if (blk_mq_tag_busy(data->hctx)) { + rq->cmd_flags = REQ_MQ_INFLIGHT; + atomic_inc(&data->hctx->nr_active); + } + + rq->tag = tag; + blk_mq_rq_ctx_init(data->q, data->ctx, rq, rw); + return rq; + } + + return NULL; +} + +struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, + bool reserved) +{ + struct blk_mq_ctx *ctx; + struct blk_mq_hw_ctx *hctx; + struct request *rq; + struct blk_mq_alloc_data alloc_data; + int ret; + + ret = blk_mq_queue_enter(q, gfp); + if (ret) + return ERR_PTR(ret); + + ctx = blk_mq_get_ctx(q); + hctx = q->mq_ops->map_queue(q, ctx->cpu); + blk_mq_set_alloc_data(&alloc_data, q, gfp & ~__GFP_WAIT, + reserved, ctx, hctx); + + rq = __blk_mq_alloc_request(&alloc_data, rw); + if (!rq && (gfp & __GFP_WAIT)) { + __blk_mq_run_hw_queue(hctx); + blk_mq_put_ctx(ctx); + + ctx = blk_mq_get_ctx(q); + hctx = q->mq_ops->map_queue(q, ctx->cpu); + blk_mq_set_alloc_data(&alloc_data, q, gfp, reserved, ctx, + hctx); + rq = __blk_mq_alloc_request(&alloc_data, rw); + ctx = alloc_data.ctx; + } + blk_mq_put_ctx(ctx); + if (!rq) { + blk_mq_queue_exit(q); + return ERR_PTR(-EWOULDBLOCK); + } + return rq; +} +EXPORT_SYMBOL(blk_mq_alloc_request); + +static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx, + struct blk_mq_ctx *ctx, struct request *rq) +{ + const int tag = rq->tag; + struct request_queue *q = rq->q; + + if (rq->cmd_flags & REQ_MQ_INFLIGHT) + atomic_dec(&hctx->nr_active); + rq->cmd_flags = 0; + + clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags); + blk_mq_put_tag(hctx, tag, &ctx->last_tag); + blk_mq_queue_exit(q); +} + +void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *hctx, struct request *rq) +{ + struct blk_mq_ctx *ctx = rq->mq_ctx; + + ctx->rq_completed[rq_is_sync(rq)]++; + __blk_mq_free_request(hctx, ctx, rq); + +} +EXPORT_SYMBOL_GPL(blk_mq_free_hctx_request); + +void blk_mq_free_request(struct request *rq) +{ + struct blk_mq_hw_ctx *hctx; + struct request_queue *q = rq->q; + + hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu); + blk_mq_free_hctx_request(hctx, rq); +} +EXPORT_SYMBOL_GPL(blk_mq_free_request); + +inline void __blk_mq_end_request(struct request *rq, int error) +{ + blk_account_io_done(rq); + + if (rq->end_io) { + rq->end_io(rq, error); + } else { + if (unlikely(blk_bidi_rq(rq))) + blk_mq_free_request(rq->next_rq); + blk_mq_free_request(rq); + } +} +EXPORT_SYMBOL(__blk_mq_end_request); + +void blk_mq_end_request(struct request *rq, int error) +{ + if (blk_update_request(rq, error, blk_rq_bytes(rq))) + BUG(); + __blk_mq_end_request(rq, error); +} +EXPORT_SYMBOL(blk_mq_end_request); + +static void __blk_mq_complete_request_remote(void *data) +{ + struct request *rq = data; + + rq->q->softirq_done_fn(rq); +} + +static void blk_mq_ipi_complete_request(struct request *rq) +{ + struct blk_mq_ctx *ctx = rq->mq_ctx; + bool shared = false; + int cpu; + + if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) { + rq->q->softirq_done_fn(rq); + return; + } + + cpu = get_cpu(); + if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags)) + shared = cpus_share_cache(cpu, ctx->cpu); + + if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) { + rq->csd.func = __blk_mq_complete_request_remote; + rq->csd.info = rq; + rq->csd.flags = 0; + smp_call_function_single_async(ctx->cpu, &rq->csd); + } else { + rq->q->softirq_done_fn(rq); + } + put_cpu(); +} + +void __blk_mq_complete_request(struct request *rq) +{ + struct request_queue *q = rq->q; + + if (!q->softirq_done_fn) + blk_mq_end_request(rq, rq->errors); + else + blk_mq_ipi_complete_request(rq); +} + +/** + * blk_mq_complete_request - end I/O on a request + * @rq: the request being processed + * + * Description: + * Ends all I/O on a request. It does not handle partial completions. + * The actual completion happens out-of-order, through a IPI handler. + **/ +void blk_mq_complete_request(struct request *rq) +{ + struct request_queue *q = rq->q; + + if (unlikely(blk_should_fake_timeout(q))) + return; + if (!blk_mark_rq_complete(rq)) + __blk_mq_complete_request(rq); +} +EXPORT_SYMBOL(blk_mq_complete_request); + +int blk_mq_request_started(struct request *rq) +{ + return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags); +} +EXPORT_SYMBOL_GPL(blk_mq_request_started); + +void blk_mq_start_request(struct request *rq) +{ + struct request_queue *q = rq->q; + + trace_block_rq_issue(q, rq); + + rq->resid_len = blk_rq_bytes(rq); + if (unlikely(blk_bidi_rq(rq))) + rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq); + + blk_add_timer(rq); + + /* + * Ensure that ->deadline is visible before set the started + * flag and clear the completed flag. + */ + smp_mb__before_atomic(); + + /* + * Mark us as started and clear complete. Complete might have been + * set if requeue raced with timeout, which then marked it as + * complete. So be sure to clear complete again when we start + * the request, otherwise we'll ignore the completion event. + */ + if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) + set_bit(REQ_ATOM_STARTED, &rq->atomic_flags); + if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags)) + clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); + + if (q->dma_drain_size && blk_rq_bytes(rq)) { + /* + * Make sure space for the drain appears. We know we can do + * this because max_hw_segments has been adjusted to be one + * fewer than the device can handle. + */ + rq->nr_phys_segments++; + } +} +EXPORT_SYMBOL(blk_mq_start_request); + +static void __blk_mq_requeue_request(struct request *rq) +{ + struct request_queue *q = rq->q; + + trace_block_rq_requeue(q, rq); + + if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) { + if (q->dma_drain_size && blk_rq_bytes(rq)) + rq->nr_phys_segments--; + } +} + +void blk_mq_requeue_request(struct request *rq) +{ + __blk_mq_requeue_request(rq); + + BUG_ON(blk_queued_rq(rq)); + blk_mq_add_to_requeue_list(rq, true); +} +EXPORT_SYMBOL(blk_mq_requeue_request); + +static void blk_mq_requeue_work(struct work_struct *work) +{ + struct request_queue *q = + container_of(work, struct request_queue, requeue_work); + LIST_HEAD(rq_list); + struct request *rq, *next; + unsigned long flags; + + spin_lock_irqsave(&q->requeue_lock, flags); + list_splice_init(&q->requeue_list, &rq_list); + spin_unlock_irqrestore(&q->requeue_lock, flags); + + list_for_each_entry_safe(rq, next, &rq_list, queuelist) { + if (!(rq->cmd_flags & REQ_SOFTBARRIER)) + continue; + + rq->cmd_flags &= ~REQ_SOFTBARRIER; + list_del_init(&rq->queuelist); + blk_mq_insert_request(rq, true, false, false); + } + + while (!list_empty(&rq_list)) { + rq = list_entry(rq_list.next, struct request, queuelist); + list_del_init(&rq->queuelist); + blk_mq_insert_request(rq, false, false, false); + } + + /* + * Use the start variant of queue running here, so that running + * the requeue work will kick stopped queues. + */ + blk_mq_start_hw_queues(q); +} + +void blk_mq_add_to_requeue_list(struct request *rq, bool at_head) +{ + struct request_queue *q = rq->q; + unsigned long flags; + + /* + * We abuse this flag that is otherwise used by the I/O scheduler to + * request head insertation from the workqueue. + */ + BUG_ON(rq->cmd_flags & REQ_SOFTBARRIER); + + spin_lock_irqsave(&q->requeue_lock, flags); + if (at_head) { + rq->cmd_flags |= REQ_SOFTBARRIER; + list_add(&rq->queuelist, &q->requeue_list); + } else { + list_add_tail(&rq->queuelist, &q->requeue_list); + } + spin_unlock_irqrestore(&q->requeue_lock, flags); +} +EXPORT_SYMBOL(blk_mq_add_to_requeue_list); + +void blk_mq_cancel_requeue_work(struct request_queue *q) +{ + cancel_work_sync(&q->requeue_work); +} +EXPORT_SYMBOL_GPL(blk_mq_cancel_requeue_work); + +void blk_mq_kick_requeue_list(struct request_queue *q) +{ + kblockd_schedule_work(&q->requeue_work); +} +EXPORT_SYMBOL(blk_mq_kick_requeue_list); + +void blk_mq_abort_requeue_list(struct request_queue *q) +{ + unsigned long flags; + LIST_HEAD(rq_list); + + spin_lock_irqsave(&q->requeue_lock, flags); + list_splice_init(&q->requeue_list, &rq_list); + spin_unlock_irqrestore(&q->requeue_lock, flags); + + while (!list_empty(&rq_list)) { + struct request *rq; + + rq = list_first_entry(&rq_list, struct request, queuelist); + list_del_init(&rq->queuelist); + rq->errors = -EIO; + blk_mq_end_request(rq, rq->errors); + } +} +EXPORT_SYMBOL(blk_mq_abort_requeue_list); + +static inline bool is_flush_request(struct request *rq, + struct blk_flush_queue *fq, unsigned int tag) +{ + return ((rq->cmd_flags & REQ_FLUSH_SEQ) && + fq->flush_rq->tag == tag); +} + +struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag) +{ + struct request *rq = tags->rqs[tag]; + /* mq_ctx of flush rq is always cloned from the corresponding req */ + struct blk_flush_queue *fq = blk_get_flush_queue(rq->q, rq->mq_ctx); + + if (!is_flush_request(rq, fq, tag)) + return rq; + + return fq->flush_rq; +} +EXPORT_SYMBOL(blk_mq_tag_to_rq); + +struct blk_mq_timeout_data { + unsigned long next; + unsigned int next_set; +}; + +void blk_mq_rq_timed_out(struct request *req, bool reserved) +{ + struct blk_mq_ops *ops = req->q->mq_ops; + enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER; + + /* + * We know that complete is set at this point. If STARTED isn't set + * anymore, then the request isn't active and the "timeout" should + * just be ignored. This can happen due to the bitflag ordering. + * Timeout first checks if STARTED is set, and if it is, assumes + * the request is active. But if we race with completion, then + * we both flags will get cleared. So check here again, and ignore + * a timeout event with a request that isn't active. + */ + if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags)) + return; + + if (ops->timeout) + ret = ops->timeout(req, reserved); + + switch (ret) { + case BLK_EH_HANDLED: + __blk_mq_complete_request(req); + break; + case BLK_EH_RESET_TIMER: + blk_add_timer(req); + blk_clear_rq_complete(req); + break; + case BLK_EH_NOT_HANDLED: + break; + default: + printk(KERN_ERR "block: bad eh return: %d\n", ret); + break; + } +} + +static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, + struct request *rq, void *priv, bool reserved) +{ + struct blk_mq_timeout_data *data = priv; + + if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) { + /* + * If a request wasn't started before the queue was + * marked dying, kill it here or it'll go unnoticed. + */ + if (unlikely(blk_queue_dying(rq->q))) { + rq->errors = -EIO; + blk_mq_complete_request(rq); + } + return; + } + if (rq->cmd_flags & REQ_NO_TIMEOUT) + return; + + if (time_after_eq(jiffies, rq->deadline)) { + if (!blk_mark_rq_complete(rq)) + blk_mq_rq_timed_out(rq, reserved); + } else if (!data->next_set || time_after(data->next, rq->deadline)) { + data->next = rq->deadline; + data->next_set = 1; + } +} + +static void blk_mq_rq_timer(unsigned long priv) +{ + struct request_queue *q = (struct request_queue *)priv; + struct blk_mq_timeout_data data = { + .next = 0, + .next_set = 0, + }; + struct blk_mq_hw_ctx *hctx; + int i; + + queue_for_each_hw_ctx(q, hctx, i) { + /* + * If not software queues are currently mapped to this + * hardware queue, there's nothing to check + */ + if (!blk_mq_hw_queue_mapped(hctx)) + continue; + + blk_mq_tag_busy_iter(hctx, blk_mq_check_expired, &data); + } + + if (data.next_set) { + data.next = blk_rq_timeout(round_jiffies_up(data.next)); + mod_timer(&q->timeout, data.next); + } else { + queue_for_each_hw_ctx(q, hctx, i) { + /* the hctx may be unmapped, so check it here */ + if (blk_mq_hw_queue_mapped(hctx)) + blk_mq_tag_idle(hctx); + } + } +} + +/* + * Reverse check our software queue for entries that we could potentially + * merge with. Currently includes a hand-wavy stop count of 8, to not spend + * too much time checking for merges. + */ +static bool blk_mq_attempt_merge(struct request_queue *q, + struct blk_mq_ctx *ctx, struct bio *bio) +{ + struct request *rq; + int checked = 8; + + list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) { + int el_ret; + + if (!checked--) + break; + + if (!blk_rq_merge_ok(rq, bio)) + continue; + + el_ret = blk_try_merge(rq, bio); + if (el_ret == ELEVATOR_BACK_MERGE) { + if (bio_attempt_back_merge(q, rq, bio)) { + ctx->rq_merged++; + return true; + } + break; + } else if (el_ret == ELEVATOR_FRONT_MERGE) { + if (bio_attempt_front_merge(q, rq, bio)) { + ctx->rq_merged++; + return true; + } + break; + } + } + + return false; +} + +/* + * Process software queues that have been marked busy, splicing them + * to the for-dispatch + */ +static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list) +{ + struct blk_mq_ctx *ctx; + int i; + + for (i = 0; i < hctx->ctx_map.size; i++) { + struct blk_align_bitmap *bm = &hctx->ctx_map.map[i]; + unsigned int off, bit; + + if (!bm->word) + continue; + + bit = 0; + off = i * hctx->ctx_map.bits_per_word; + do { + bit = find_next_bit(&bm->word, bm->depth, bit); + if (bit >= bm->depth) + break; + + ctx = hctx->ctxs[bit + off]; + clear_bit(bit, &bm->word); + spin_lock(&ctx->lock); + list_splice_tail_init(&ctx->rq_list, list); + spin_unlock(&ctx->lock); + + bit++; + } while (1); + } +} + +/* + * Run this hardware queue, pulling any software queues mapped to it in. + * Note that this function currently has various problems around ordering + * of IO. In particular, we'd like FIFO behaviour on handling existing + * items on the hctx->dispatch list. Ignore that for now. + */ +static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) +{ + struct request_queue *q = hctx->queue; + struct request *rq; + LIST_HEAD(rq_list); + LIST_HEAD(driver_list); + struct list_head *dptr; + int queued; + + WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)); + + if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state))) + return; + + hctx->run++; + + /* + * Touch any software queue that has pending entries. + */ + flush_busy_ctxs(hctx, &rq_list); + + /* + * If we have previous entries on our dispatch list, grab them + * and stuff them at the front for more fair dispatch. + */ + if (!list_empty_careful(&hctx->dispatch)) { + spin_lock(&hctx->lock); + if (!list_empty(&hctx->dispatch)) + list_splice_init(&hctx->dispatch, &rq_list); + spin_unlock(&hctx->lock); + } + + /* + * Start off with dptr being NULL, so we start the first request + * immediately, even if we have more pending. + */ + dptr = NULL; + + /* + * Now process all the entries, sending them to the driver. + */ + queued = 0; + while (!list_empty(&rq_list)) { + struct blk_mq_queue_data bd; + int ret; + + rq = list_first_entry(&rq_list, struct request, queuelist); + list_del_init(&rq->queuelist); + + bd.rq = rq; + bd.list = dptr; + bd.last = list_empty(&rq_list); + + ret = q->mq_ops->queue_rq(hctx, &bd); + switch (ret) { + case BLK_MQ_RQ_QUEUE_OK: + queued++; + continue; + case BLK_MQ_RQ_QUEUE_BUSY: + list_add(&rq->queuelist, &rq_list); + __blk_mq_requeue_request(rq); + break; + default: + pr_err("blk-mq: bad return on queue: %d\n", ret); + case BLK_MQ_RQ_QUEUE_ERROR: + rq->errors = -EIO; + blk_mq_end_request(rq, rq->errors); + break; + } + + if (ret == BLK_MQ_RQ_QUEUE_BUSY) + break; + + /* + * We've done the first request. If we have more than 1 + * left in the list, set dptr to defer issue. + */ + if (!dptr && rq_list.next != rq_list.prev) + dptr = &driver_list; + } + + if (!queued) + hctx->dispatched[0]++; + else if (queued < (1 << (BLK_MQ_MAX_DISPATCH_ORDER - 1))) + hctx->dispatched[ilog2(queued) + 1]++; + + /* + * Any items that need requeuing? Stuff them into hctx->dispatch, + * that is where we will continue on next queue run. + */ + if (!list_empty(&rq_list)) { + spin_lock(&hctx->lock); + list_splice(&rq_list, &hctx->dispatch); + spin_unlock(&hctx->lock); + /* + * the queue is expected stopped with BLK_MQ_RQ_QUEUE_BUSY, but + * it's possible the queue is stopped and restarted again + * before this. Queue restart will dispatch requests. And since + * requests in rq_list aren't added into hctx->dispatch yet, + * the requests in rq_list might get lost. + * + * blk_mq_run_hw_queue() already checks the STOPPED bit + **/ + blk_mq_run_hw_queue(hctx, true); + } +} + +/* + * It'd be great if the workqueue API had a way to pass + * in a mask and had some smarts for more clever placement. + * For now we just round-robin here, switching for every + * BLK_MQ_CPU_WORK_BATCH queued items. + */ +static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx) +{ + if (hctx->queue->nr_hw_queues == 1) + return WORK_CPU_UNBOUND; + + if (--hctx->next_cpu_batch <= 0) { + int cpu = hctx->next_cpu, next_cpu; + + next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask); + if (next_cpu >= nr_cpu_ids) + next_cpu = cpumask_first(hctx->cpumask); + + hctx->next_cpu = next_cpu; + hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; + + return cpu; + } + + return hctx->next_cpu; +} + +void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) +{ + if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state) || + !blk_mq_hw_queue_mapped(hctx))) + return; + + if (!async) { + int cpu = get_cpu(); + if (cpumask_test_cpu(cpu, hctx->cpumask)) { + __blk_mq_run_hw_queue(hctx); + put_cpu(); + return; + } + + put_cpu(); + } + + kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx), + &hctx->run_work, 0); +} + +void blk_mq_run_hw_queues(struct request_queue *q, bool async) +{ + struct blk_mq_hw_ctx *hctx; + int i; + + queue_for_each_hw_ctx(q, hctx, i) { + if ((!blk_mq_hctx_has_pending(hctx) && + list_empty_careful(&hctx->dispatch)) || + test_bit(BLK_MQ_S_STOPPED, &hctx->state)) + continue; + + blk_mq_run_hw_queue(hctx, async); + } +} +EXPORT_SYMBOL(blk_mq_run_hw_queues); + +void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx) +{ + cancel_delayed_work(&hctx->run_work); + cancel_delayed_work(&hctx->delay_work); + set_bit(BLK_MQ_S_STOPPED, &hctx->state); +} +EXPORT_SYMBOL(blk_mq_stop_hw_queue); + +void blk_mq_stop_hw_queues(struct request_queue *q) +{ + struct blk_mq_hw_ctx *hctx; + int i; + + queue_for_each_hw_ctx(q, hctx, i) + blk_mq_stop_hw_queue(hctx); +} +EXPORT_SYMBOL(blk_mq_stop_hw_queues); + +void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx) +{ + clear_bit(BLK_MQ_S_STOPPED, &hctx->state); + + blk_mq_run_hw_queue(hctx, false); +} +EXPORT_SYMBOL(blk_mq_start_hw_queue); + +void blk_mq_start_hw_queues(struct request_queue *q) +{ + struct blk_mq_hw_ctx *hctx; + int i; + + queue_for_each_hw_ctx(q, hctx, i) + blk_mq_start_hw_queue(hctx); +} +EXPORT_SYMBOL(blk_mq_start_hw_queues); + +void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async) +{ + struct blk_mq_hw_ctx *hctx; + int i; + + queue_for_each_hw_ctx(q, hctx, i) { + if (!test_bit(BLK_MQ_S_STOPPED, &hctx->state)) + continue; + + clear_bit(BLK_MQ_S_STOPPED, &hctx->state); + blk_mq_run_hw_queue(hctx, async); + } +} +EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues); + +static void blk_mq_run_work_fn(struct work_struct *work) +{ + struct blk_mq_hw_ctx *hctx; + + hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work); + + __blk_mq_run_hw_queue(hctx); +} + +static void blk_mq_delay_work_fn(struct work_struct *work) +{ + struct blk_mq_hw_ctx *hctx; + + hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work); + + if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state)) + __blk_mq_run_hw_queue(hctx); +} + +void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) +{ + if (unlikely(!blk_mq_hw_queue_mapped(hctx))) + return; + + kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx), + &hctx->delay_work, msecs_to_jiffies(msecs)); +} +EXPORT_SYMBOL(blk_mq_delay_queue); + +static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, + struct request *rq, bool at_head) +{ + struct blk_mq_ctx *ctx = rq->mq_ctx; + + trace_block_rq_insert(hctx->queue, rq); + + if (at_head) + list_add(&rq->queuelist, &ctx->rq_list); + else + list_add_tail(&rq->queuelist, &ctx->rq_list); + + blk_mq_hctx_mark_pending(hctx, ctx); +} + +void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue, + bool async) +{ + struct request_queue *q = rq->q; + struct blk_mq_hw_ctx *hctx; + struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx; + + current_ctx = blk_mq_get_ctx(q); + if (!cpu_online(ctx->cpu)) + rq->mq_ctx = ctx = current_ctx; + + hctx = q->mq_ops->map_queue(q, ctx->cpu); + + spin_lock(&ctx->lock); + __blk_mq_insert_request(hctx, rq, at_head); + spin_unlock(&ctx->lock); + + if (run_queue) + blk_mq_run_hw_queue(hctx, async); + + blk_mq_put_ctx(current_ctx); +} + +static void blk_mq_insert_requests(struct request_queue *q, + struct blk_mq_ctx *ctx, + struct list_head *list, + int depth, + bool from_schedule) + +{ + struct blk_mq_hw_ctx *hctx; + struct blk_mq_ctx *current_ctx; + + trace_block_unplug(q, depth, !from_schedule); + + current_ctx = blk_mq_get_ctx(q); + + if (!cpu_online(ctx->cpu)) + ctx = current_ctx; + hctx = q->mq_ops->map_queue(q, ctx->cpu); + + /* + * preemption doesn't flush plug list, so it's possible ctx->cpu is + * offline now + */ + spin_lock(&ctx->lock); + while (!list_empty(list)) { + struct request *rq; + + rq = list_first_entry(list, struct request, queuelist); + list_del_init(&rq->queuelist); + rq->mq_ctx = ctx; + __blk_mq_insert_request(hctx, rq, false); + } + spin_unlock(&ctx->lock); + + blk_mq_run_hw_queue(hctx, from_schedule); + blk_mq_put_ctx(current_ctx); +} + +static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b) +{ + struct request *rqa = container_of(a, struct request, queuelist); + struct request *rqb = container_of(b, struct request, queuelist); + + return !(rqa->mq_ctx < rqb->mq_ctx || + (rqa->mq_ctx == rqb->mq_ctx && + blk_rq_pos(rqa) < blk_rq_pos(rqb))); +} + +void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) +{ + struct blk_mq_ctx *this_ctx; + struct request_queue *this_q; + struct request *rq; + LIST_HEAD(list); + LIST_HEAD(ctx_list); + unsigned int depth; + + list_splice_init(&plug->mq_list, &list); + + list_sort(NULL, &list, plug_ctx_cmp); + + this_q = NULL; + this_ctx = NULL; + depth = 0; + + while (!list_empty(&list)) { + rq = list_entry_rq(list.next); + list_del_init(&rq->queuelist); + BUG_ON(!rq->q); + if (rq->mq_ctx != this_ctx) { + if (this_ctx) { + blk_mq_insert_requests(this_q, this_ctx, + &ctx_list, depth, + from_schedule); + } + + this_ctx = rq->mq_ctx; + this_q = rq->q; + depth = 0; + } + + depth++; + list_add_tail(&rq->queuelist, &ctx_list); + } + + /* + * If 'this_ctx' is set, we know we have entries to complete + * on 'ctx_list'. Do those. + */ + if (this_ctx) { + blk_mq_insert_requests(this_q, this_ctx, &ctx_list, depth, + from_schedule); + } +} + +static void blk_mq_bio_to_request(struct request *rq, struct bio *bio) +{ + init_request_from_bio(rq, bio); + + if (blk_do_io_stat(rq)) + blk_account_io_start(rq, 1); +} + +static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx) +{ + return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) && + !blk_queue_nomerges(hctx->queue); +} + +static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx, + struct blk_mq_ctx *ctx, + struct request *rq, struct bio *bio) +{ + if (!hctx_allow_merges(hctx)) { + blk_mq_bio_to_request(rq, bio); + spin_lock(&ctx->lock); +insert_rq: + __blk_mq_insert_request(hctx, rq, false); + spin_unlock(&ctx->lock); + return false; + } else { + struct request_queue *q = hctx->queue; + + spin_lock(&ctx->lock); + if (!blk_mq_attempt_merge(q, ctx, bio)) { + blk_mq_bio_to_request(rq, bio); + goto insert_rq; + } + + spin_unlock(&ctx->lock); + __blk_mq_free_request(hctx, ctx, rq); + return true; + } +} + +struct blk_map_ctx { + struct blk_mq_hw_ctx *hctx; + struct blk_mq_ctx *ctx; +}; + +static struct request *blk_mq_map_request(struct request_queue *q, + struct bio *bio, + struct blk_map_ctx *data) +{ + struct blk_mq_hw_ctx *hctx; + struct blk_mq_ctx *ctx; + struct request *rq; + int rw = bio_data_dir(bio); + struct blk_mq_alloc_data alloc_data; + + if (unlikely(blk_mq_queue_enter(q, GFP_KERNEL))) { + bio_endio(bio, -EIO); + return NULL; + } + + ctx = blk_mq_get_ctx(q); + hctx = q->mq_ops->map_queue(q, ctx->cpu); + + if (rw_is_sync(bio->bi_rw)) + rw |= REQ_SYNC; + + trace_block_getrq(q, bio, rw); + blk_mq_set_alloc_data(&alloc_data, q, GFP_ATOMIC, false, ctx, + hctx); + rq = __blk_mq_alloc_request(&alloc_data, rw); + if (unlikely(!rq)) { + __blk_mq_run_hw_queue(hctx); + blk_mq_put_ctx(ctx); + trace_block_sleeprq(q, bio, rw); + + ctx = blk_mq_get_ctx(q); + hctx = q->mq_ops->map_queue(q, ctx->cpu); + blk_mq_set_alloc_data(&alloc_data, q, + __GFP_WAIT|GFP_ATOMIC, false, ctx, hctx); + rq = __blk_mq_alloc_request(&alloc_data, rw); + ctx = alloc_data.ctx; + hctx = alloc_data.hctx; + } + + hctx->queued++; + data->hctx = hctx; + data->ctx = ctx; + return rq; +} + +/* + * Multiple hardware queue variant. This will not use per-process plugs, + * but will attempt to bypass the hctx queueing if we can go straight to + * hardware for SYNC IO. + */ +static void blk_mq_make_request(struct request_queue *q, struct bio *bio) +{ + const int is_sync = rw_is_sync(bio->bi_rw); + const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA); + struct blk_map_ctx data; + struct request *rq; + + blk_queue_bounce(q, &bio); + + if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { + bio_endio(bio, -EIO); + return; + } + + rq = blk_mq_map_request(q, bio, &data); + if (unlikely(!rq)) + return; + + if (unlikely(is_flush_fua)) { + blk_mq_bio_to_request(rq, bio); + blk_insert_flush(rq); + goto run_queue; + } + + /* + * If the driver supports defer issued based on 'last', then + * queue it up like normal since we can potentially save some + * CPU this way. + */ + if (is_sync && !(data.hctx->flags & BLK_MQ_F_DEFER_ISSUE)) { + struct blk_mq_queue_data bd = { + .rq = rq, + .list = NULL, + .last = 1 + }; + int ret; + + blk_mq_bio_to_request(rq, bio); + + /* + * For OK queue, we are done. For error, kill it. Any other + * error (busy), just add it to our list as we previously + * would have done + */ + ret = q->mq_ops->queue_rq(data.hctx, &bd); + if (ret == BLK_MQ_RQ_QUEUE_OK) + goto done; + else { + __blk_mq_requeue_request(rq); + + if (ret == BLK_MQ_RQ_QUEUE_ERROR) { + rq->errors = -EIO; + blk_mq_end_request(rq, rq->errors); + goto done; + } + } + } + + if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) { + /* + * For a SYNC request, send it to the hardware immediately. For + * an ASYNC request, just ensure that we run it later on. The + * latter allows for merging opportunities and more efficient + * dispatching. + */ +run_queue: + blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua); + } +done: + blk_mq_put_ctx(data.ctx); +} + +/* + * Single hardware queue variant. This will attempt to use any per-process + * plug for merging and IO deferral. + */ +static void blk_sq_make_request(struct request_queue *q, struct bio *bio) +{ + const int is_sync = rw_is_sync(bio->bi_rw); + const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA); + unsigned int use_plug, request_count = 0; + struct blk_map_ctx data; + struct request *rq; + + /* + * If we have multiple hardware queues, just go directly to + * one of those for sync IO. + */ + use_plug = !is_flush_fua && !is_sync; + + blk_queue_bounce(q, &bio); + + if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { + bio_endio(bio, -EIO); + return; + } + + if (use_plug && !blk_queue_nomerges(q) && + blk_attempt_plug_merge(q, bio, &request_count)) + return; + + rq = blk_mq_map_request(q, bio, &data); + if (unlikely(!rq)) + return; + + if (unlikely(is_flush_fua)) { + blk_mq_bio_to_request(rq, bio); + blk_insert_flush(rq); + goto run_queue; + } + + /* + * A task plug currently exists. Since this is completely lockless, + * utilize that to temporarily store requests until the task is + * either done or scheduled away. + */ + if (use_plug) { + struct blk_plug *plug = current->plug; + + if (plug) { + blk_mq_bio_to_request(rq, bio); + if (list_empty(&plug->mq_list)) + trace_block_plug(q); + else if (request_count >= BLK_MAX_REQUEST_COUNT) { + blk_flush_plug_list(plug, false); + trace_block_plug(q); + } + list_add_tail(&rq->queuelist, &plug->mq_list); + blk_mq_put_ctx(data.ctx); + return; + } + } + + if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) { + /* + * For a SYNC request, send it to the hardware immediately. For + * an ASYNC request, just ensure that we run it later on. The + * latter allows for merging opportunities and more efficient + * dispatching. + */ +run_queue: + blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua); + } + + blk_mq_put_ctx(data.ctx); +} + +/* + * Default mapping to a software queue, since we use one per CPU. + */ +struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu) +{ + return q->queue_hw_ctx[q->mq_map[cpu]]; +} +EXPORT_SYMBOL(blk_mq_map_queue); + +static void blk_mq_free_rq_map(struct blk_mq_tag_set *set, + struct blk_mq_tags *tags, unsigned int hctx_idx) +{ + struct page *page; + + if (tags->rqs && set->ops->exit_request) { + int i; + + for (i = 0; i < tags->nr_tags; i++) { + if (!tags->rqs[i]) + continue; + set->ops->exit_request(set->driver_data, tags->rqs[i], + hctx_idx, i); + tags->rqs[i] = NULL; + } + } + + while (!list_empty(&tags->page_list)) { + page = list_first_entry(&tags->page_list, struct page, lru); + list_del_init(&page->lru); + __free_pages(page, page->private); + } + + kfree(tags->rqs); + + blk_mq_free_tags(tags); +} + +static size_t order_to_size(unsigned int order) +{ + return (size_t)PAGE_SIZE << order; +} + +static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set, + unsigned int hctx_idx) +{ + struct blk_mq_tags *tags; + unsigned int i, j, entries_per_page, max_order = 4; + size_t rq_size, left; + + tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags, + set->numa_node, + BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags)); + if (!tags) + return NULL; + + INIT_LIST_HEAD(&tags->page_list); + + tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *), + GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY, + set->numa_node); + if (!tags->rqs) { + blk_mq_free_tags(tags); + return NULL; + } + + /* + * rq_size is the size of the request plus driver payload, rounded + * to the cacheline size + */ + rq_size = round_up(sizeof(struct request) + set->cmd_size, + cache_line_size()); + left = rq_size * set->queue_depth; + + for (i = 0; i < set->queue_depth; ) { + int this_order = max_order; + struct page *page; + int to_do; + void *p; + + while (left < order_to_size(this_order - 1) && this_order) + this_order--; + + do { + page = alloc_pages_node(set->numa_node, + GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO, + this_order); + if (page) + break; + if (!this_order--) + break; + if (order_to_size(this_order) < rq_size) + break; + } while (1); + + if (!page) + goto fail; + + page->private = this_order; + list_add_tail(&page->lru, &tags->page_list); + + p = page_address(page); + entries_per_page = order_to_size(this_order) / rq_size; + to_do = min(entries_per_page, set->queue_depth - i); + left -= to_do * rq_size; + for (j = 0; j < to_do; j++) { + tags->rqs[i] = p; + if (set->ops->init_request) { + if (set->ops->init_request(set->driver_data, + tags->rqs[i], hctx_idx, i, + set->numa_node)) { + tags->rqs[i] = NULL; + goto fail; + } + } + + p += rq_size; + i++; + } + } + + return tags; + +fail: + blk_mq_free_rq_map(set, tags, hctx_idx); + return NULL; +} + +static void blk_mq_free_bitmap(struct blk_mq_ctxmap *bitmap) +{ + kfree(bitmap->map); +} + +static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node) +{ + unsigned int bpw = 8, total, num_maps, i; + + bitmap->bits_per_word = bpw; + + num_maps = ALIGN(nr_cpu_ids, bpw) / bpw; + bitmap->map = kzalloc_node(num_maps * sizeof(struct blk_align_bitmap), + GFP_KERNEL, node); + if (!bitmap->map) + return -ENOMEM; + + total = nr_cpu_ids; + for (i = 0; i < num_maps; i++) { + bitmap->map[i].depth = min(total, bitmap->bits_per_word); + total -= bitmap->map[i].depth; + } + + return 0; +} + +static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu) +{ + struct request_queue *q = hctx->queue; + struct blk_mq_ctx *ctx; + LIST_HEAD(tmp); + + /* + * Move ctx entries to new CPU, if this one is going away. + */ + ctx = __blk_mq_get_ctx(q, cpu); + + spin_lock(&ctx->lock); + if (!list_empty(&ctx->rq_list)) { + list_splice_init(&ctx->rq_list, &tmp); + blk_mq_hctx_clear_pending(hctx, ctx); + } + spin_unlock(&ctx->lock); + + if (list_empty(&tmp)) + return NOTIFY_OK; + + ctx = blk_mq_get_ctx(q); + spin_lock(&ctx->lock); + + while (!list_empty(&tmp)) { + struct request *rq; + + rq = list_first_entry(&tmp, struct request, queuelist); + rq->mq_ctx = ctx; + list_move_tail(&rq->queuelist, &ctx->rq_list); + } + + hctx = q->mq_ops->map_queue(q, ctx->cpu); + blk_mq_hctx_mark_pending(hctx, ctx); + + spin_unlock(&ctx->lock); + + blk_mq_run_hw_queue(hctx, true); + blk_mq_put_ctx(ctx); + return NOTIFY_OK; +} + +static int blk_mq_hctx_notify(void *data, unsigned long action, + unsigned int cpu) +{ + struct blk_mq_hw_ctx *hctx = data; + + if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) + return blk_mq_hctx_cpu_offline(hctx, cpu); + + /* + * In case of CPU online, tags may be reallocated + * in blk_mq_map_swqueue() after mapping is updated. + */ + + return NOTIFY_OK; +} + +/* hctx->ctxs will be freed in queue's release handler */ +static void blk_mq_exit_hctx(struct request_queue *q, + struct blk_mq_tag_set *set, + struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) +{ + unsigned flush_start_tag = set->queue_depth; + + blk_mq_tag_idle(hctx); + + if (set->ops->exit_request) + set->ops->exit_request(set->driver_data, + hctx->fq->flush_rq, hctx_idx, + flush_start_tag + hctx_idx); + + if (set->ops->exit_hctx) + set->ops->exit_hctx(hctx, hctx_idx); + + blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier); + blk_free_flush_queue(hctx->fq); + blk_mq_free_bitmap(&hctx->ctx_map); +} + +static void blk_mq_exit_hw_queues(struct request_queue *q, + struct blk_mq_tag_set *set, int nr_queue) +{ + struct blk_mq_hw_ctx *hctx; + unsigned int i; + + queue_for_each_hw_ctx(q, hctx, i) { + if (i == nr_queue) + break; + blk_mq_exit_hctx(q, set, hctx, i); + } +} + +static void blk_mq_free_hw_queues(struct request_queue *q, + struct blk_mq_tag_set *set) +{ + struct blk_mq_hw_ctx *hctx; + unsigned int i; + + queue_for_each_hw_ctx(q, hctx, i) + free_cpumask_var(hctx->cpumask); +} + +static int blk_mq_init_hctx(struct request_queue *q, + struct blk_mq_tag_set *set, + struct blk_mq_hw_ctx *hctx, unsigned hctx_idx) +{ + int node; + unsigned flush_start_tag = set->queue_depth; + + node = hctx->numa_node; + if (node == NUMA_NO_NODE) + node = hctx->numa_node = set->numa_node; + + INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn); + INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn); + spin_lock_init(&hctx->lock); + INIT_LIST_HEAD(&hctx->dispatch); + hctx->queue = q; + hctx->queue_num = hctx_idx; + hctx->flags = set->flags; + + blk_mq_init_cpu_notifier(&hctx->cpu_notifier, + blk_mq_hctx_notify, hctx); + blk_mq_register_cpu_notifier(&hctx->cpu_notifier); + + hctx->tags = set->tags[hctx_idx]; + + /* + * Allocate space for all possible cpus to avoid allocation at + * runtime + */ + hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *), + GFP_KERNEL, node); + if (!hctx->ctxs) + goto unregister_cpu_notifier; + + if (blk_mq_alloc_bitmap(&hctx->ctx_map, node)) + goto free_ctxs; + + hctx->nr_ctx = 0; + + if (set->ops->init_hctx && + set->ops->init_hctx(hctx, set->driver_data, hctx_idx)) + goto free_bitmap; + + hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size); + if (!hctx->fq) + goto exit_hctx; + + if (set->ops->init_request && + set->ops->init_request(set->driver_data, + hctx->fq->flush_rq, hctx_idx, + flush_start_tag + hctx_idx, node)) + goto free_fq; + + return 0; + + free_fq: + kfree(hctx->fq); + exit_hctx: + if (set->ops->exit_hctx) + set->ops->exit_hctx(hctx, hctx_idx); + free_bitmap: + blk_mq_free_bitmap(&hctx->ctx_map); + free_ctxs: + kfree(hctx->ctxs); + unregister_cpu_notifier: + blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier); + + return -1; +} + +static int blk_mq_init_hw_queues(struct request_queue *q, + struct blk_mq_tag_set *set) +{ + struct blk_mq_hw_ctx *hctx; + unsigned int i; + + /* + * Initialize hardware queues + */ + queue_for_each_hw_ctx(q, hctx, i) { + if (blk_mq_init_hctx(q, set, hctx, i)) + break; + } + + if (i == q->nr_hw_queues) + return 0; + + /* + * Init failed + */ + blk_mq_exit_hw_queues(q, set, i); + + return 1; +} + +static void blk_mq_init_cpu_queues(struct request_queue *q, + unsigned int nr_hw_queues) +{ + unsigned int i; + + for_each_possible_cpu(i) { + struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i); + struct blk_mq_hw_ctx *hctx; + + memset(__ctx, 0, sizeof(*__ctx)); + __ctx->cpu = i; + spin_lock_init(&__ctx->lock); + INIT_LIST_HEAD(&__ctx->rq_list); + __ctx->queue = q; + + /* If the cpu isn't online, the cpu is mapped to first hctx */ + if (!cpu_online(i)) + continue; + + hctx = q->mq_ops->map_queue(q, i); + + /* + * Set local node, IFF we have more than one hw queue. If + * not, we remain on the home node of the device + */ + if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE) + hctx->numa_node = cpu_to_node(i); + } +} + +static void blk_mq_map_swqueue(struct request_queue *q) +{ + unsigned int i; + struct blk_mq_hw_ctx *hctx; + struct blk_mq_ctx *ctx; + struct blk_mq_tag_set *set = q->tag_set; + + queue_for_each_hw_ctx(q, hctx, i) { + cpumask_clear(hctx->cpumask); + hctx->nr_ctx = 0; + } + + /* + * Map software to hardware queues + */ + queue_for_each_ctx(q, ctx, i) { + /* If the cpu isn't online, the cpu is mapped to first hctx */ + if (!cpu_online(i)) + continue; + + hctx = q->mq_ops->map_queue(q, i); + cpumask_set_cpu(i, hctx->cpumask); + ctx->index_hw = hctx->nr_ctx; + hctx->ctxs[hctx->nr_ctx++] = ctx; + } + + queue_for_each_hw_ctx(q, hctx, i) { + struct blk_mq_ctxmap *map = &hctx->ctx_map; + + /* + * If no software queues are mapped to this hardware queue, + * disable it and free the request entries. + */ + if (!hctx->nr_ctx) { + if (set->tags[i]) { + blk_mq_free_rq_map(set, set->tags[i], i); + set->tags[i] = NULL; + } + hctx->tags = NULL; + continue; + } + + /* unmapped hw queue can be remapped after CPU topo changed */ + if (!set->tags[i]) + set->tags[i] = blk_mq_init_rq_map(set, i); + hctx->tags = set->tags[i]; + WARN_ON(!hctx->tags); + + /* + * Set the map size to the number of mapped software queues. + * This is more accurate and more efficient than looping + * over all possibly mapped software queues. + */ + map->size = DIV_ROUND_UP(hctx->nr_ctx, map->bits_per_word); + + /* + * Initialize batch roundrobin counts + */ + hctx->next_cpu = cpumask_first(hctx->cpumask); + hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; + } +} + +static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set) +{ + struct blk_mq_hw_ctx *hctx; + struct request_queue *q; + bool shared; + int i; + + if (set->tag_list.next == set->tag_list.prev) + shared = false; + else + shared = true; + + list_for_each_entry(q, &set->tag_list, tag_set_list) { + blk_mq_freeze_queue(q); + + queue_for_each_hw_ctx(q, hctx, i) { + if (shared) + hctx->flags |= BLK_MQ_F_TAG_SHARED; + else + hctx->flags &= ~BLK_MQ_F_TAG_SHARED; + } + blk_mq_unfreeze_queue(q); + } +} + +static void blk_mq_del_queue_tag_set(struct request_queue *q) +{ + struct blk_mq_tag_set *set = q->tag_set; + + mutex_lock(&set->tag_list_lock); + list_del_init(&q->tag_set_list); + blk_mq_update_tag_set_depth(set); + mutex_unlock(&set->tag_list_lock); +} + +static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, + struct request_queue *q) +{ + q->tag_set = set; + + mutex_lock(&set->tag_list_lock); + list_add_tail(&q->tag_set_list, &set->tag_list); + blk_mq_update_tag_set_depth(set); + mutex_unlock(&set->tag_list_lock); +} + +/* + * It is the actual release handler for mq, but we do it from + * request queue's release handler for avoiding use-after-free + * and headache because q->mq_kobj shouldn't have been introduced, + * but we can't group ctx/kctx kobj without it. + */ +void blk_mq_release(struct request_queue *q) +{ + struct blk_mq_hw_ctx *hctx; + unsigned int i; + + /* hctx kobj stays in hctx */ + queue_for_each_hw_ctx(q, hctx, i) { + if (!hctx) + continue; + kfree(hctx->ctxs); + kfree(hctx); + } + + kfree(q->queue_hw_ctx); + + /* ctx kobj stays in queue_ctx */ + free_percpu(q->queue_ctx); +} + +struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) +{ + struct request_queue *uninit_q, *q; + + uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node); + if (!uninit_q) + return ERR_PTR(-ENOMEM); + + q = blk_mq_init_allocated_queue(set, uninit_q); + if (IS_ERR(q)) + blk_cleanup_queue(uninit_q); + + return q; +} +EXPORT_SYMBOL(blk_mq_init_queue); + +struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, + struct request_queue *q) +{ + struct blk_mq_hw_ctx **hctxs; + struct blk_mq_ctx __percpu *ctx; + unsigned int *map; + int i; + + ctx = alloc_percpu(struct blk_mq_ctx); + if (!ctx) + return ERR_PTR(-ENOMEM); + + hctxs = kmalloc_node(set->nr_hw_queues * sizeof(*hctxs), GFP_KERNEL, + set->numa_node); + + if (!hctxs) + goto err_percpu; + + map = blk_mq_make_queue_map(set); + if (!map) + goto err_map; + + for (i = 0; i < set->nr_hw_queues; i++) { + int node = blk_mq_hw_queue_to_node(map, i); + + hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx), + GFP_KERNEL, node); + if (!hctxs[i]) + goto err_hctxs; + + if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL, + node)) + goto err_hctxs; + + atomic_set(&hctxs[i]->nr_active, 0); + hctxs[i]->numa_node = node; + hctxs[i]->queue_num = i; + } + + /* + * Init percpu_ref in atomic mode so that it's faster to shutdown. + * See blk_register_queue() for details. + */ + if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release, + PERCPU_REF_INIT_ATOMIC, GFP_KERNEL)) + goto err_hctxs; + + setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q); + blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30000); + + q->nr_queues = nr_cpu_ids; + q->nr_hw_queues = set->nr_hw_queues; + q->mq_map = map; + + q->queue_ctx = ctx; + q->queue_hw_ctx = hctxs; + + q->mq_ops = set->ops; + q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; + + if (!(set->flags & BLK_MQ_F_SG_MERGE)) + q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE; + + q->sg_reserved_size = INT_MAX; + + INIT_WORK(&q->requeue_work, blk_mq_requeue_work); + INIT_LIST_HEAD(&q->requeue_list); + spin_lock_init(&q->requeue_lock); + + if (q->nr_hw_queues > 1) + blk_queue_make_request(q, blk_mq_make_request); + else + blk_queue_make_request(q, blk_sq_make_request); + + /* + * Do this after blk_queue_make_request() overrides it... + */ + q->nr_requests = set->queue_depth; + + if (set->ops->complete) + blk_queue_softirq_done(q, set->ops->complete); + + blk_mq_init_cpu_queues(q, set->nr_hw_queues); + + if (blk_mq_init_hw_queues(q, set)) + goto err_hctxs; + + mutex_lock(&all_q_mutex); + list_add_tail(&q->all_q_node, &all_q_list); + mutex_unlock(&all_q_mutex); + + blk_mq_add_queue_tag_set(set, q); + + blk_mq_map_swqueue(q); + + return q; + +err_hctxs: + kfree(map); + for (i = 0; i < set->nr_hw_queues; i++) { + if (!hctxs[i]) + break; + free_cpumask_var(hctxs[i]->cpumask); + kfree(hctxs[i]); + } +err_map: + kfree(hctxs); +err_percpu: + free_percpu(ctx); + return ERR_PTR(-ENOMEM); +} +EXPORT_SYMBOL(blk_mq_init_allocated_queue); + +void blk_mq_free_queue(struct request_queue *q) +{ + struct blk_mq_tag_set *set = q->tag_set; + + blk_mq_del_queue_tag_set(q); + + blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); + blk_mq_free_hw_queues(q, set); + + percpu_ref_exit(&q->mq_usage_counter); + + kfree(q->mq_map); + + q->mq_map = NULL; + + mutex_lock(&all_q_mutex); + list_del_init(&q->all_q_node); + mutex_unlock(&all_q_mutex); +} + +/* Basically redo blk_mq_init_queue with queue frozen */ +static void blk_mq_queue_reinit(struct request_queue *q) +{ + WARN_ON_ONCE(!q->mq_freeze_depth); + + blk_mq_sysfs_unregister(q); + + blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues); + + /* + * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe + * we should change hctx numa_node according to new topology (this + * involves free and re-allocate memory, worthy doing?) + */ + + blk_mq_map_swqueue(q); + + blk_mq_sysfs_register(q); +} + +static int blk_mq_queue_reinit_notify(struct notifier_block *nb, + unsigned long action, void *hcpu) +{ + struct request_queue *q; + + /* + * Before new mappings are established, hotadded cpu might already + * start handling requests. This doesn't break anything as we map + * offline CPUs to first hardware queue. We will re-init the queue + * below to get optimal settings. + */ + if (action != CPU_DEAD && action != CPU_DEAD_FROZEN && + action != CPU_ONLINE && action != CPU_ONLINE_FROZEN) + return NOTIFY_OK; + + mutex_lock(&all_q_mutex); + + /* + * We need to freeze and reinit all existing queues. Freezing + * involves synchronous wait for an RCU grace period and doing it + * one by one may take a long time. Start freezing all queues in + * one swoop and then wait for the completions so that freezing can + * take place in parallel. + */ + list_for_each_entry(q, &all_q_list, all_q_node) + blk_mq_freeze_queue_start(q); + list_for_each_entry(q, &all_q_list, all_q_node) { + blk_mq_freeze_queue_wait(q); + + /* + * timeout handler can't touch hw queue during the + * reinitialization + */ + del_timer_sync(&q->timeout); + } + + list_for_each_entry(q, &all_q_list, all_q_node) + blk_mq_queue_reinit(q); + + list_for_each_entry(q, &all_q_list, all_q_node) + blk_mq_unfreeze_queue(q); + + mutex_unlock(&all_q_mutex); + return NOTIFY_OK; +} + +static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) +{ + int i; + + for (i = 0; i < set->nr_hw_queues; i++) { + set->tags[i] = blk_mq_init_rq_map(set, i); + if (!set->tags[i]) + goto out_unwind; + } + + return 0; + +out_unwind: + while (--i >= 0) + blk_mq_free_rq_map(set, set->tags[i], i); + + return -ENOMEM; +} + +/* + * Allocate the request maps associated with this tag_set. Note that this + * may reduce the depth asked for, if memory is tight. set->queue_depth + * will be updated to reflect the allocated depth. + */ +static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) +{ + unsigned int depth; + int err; + + depth = set->queue_depth; + do { + err = __blk_mq_alloc_rq_maps(set); + if (!err) + break; + + set->queue_depth >>= 1; + if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) { + err = -ENOMEM; + break; + } + } while (set->queue_depth); + + if (!set->queue_depth || err) { + pr_err("blk-mq: failed to allocate request map\n"); + return -ENOMEM; + } + + if (depth != set->queue_depth) + pr_info("blk-mq: reduced tag depth (%u -> %u)\n", + depth, set->queue_depth); + + return 0; +} + +/* + * Alloc a tag set to be associated with one or more request queues. + * May fail with EINVAL for various error conditions. May adjust the + * requested depth down, if if it too large. In that case, the set + * value will be stored in set->queue_depth. + */ +int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) +{ + BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS); + + if (!set->nr_hw_queues) + return -EINVAL; + if (!set->queue_depth) + return -EINVAL; + if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) + return -EINVAL; + + if (!set->ops->queue_rq || !set->ops->map_queue) + return -EINVAL; + + if (set->queue_depth > BLK_MQ_MAX_DEPTH) { + pr_info("blk-mq: reduced tag depth to %u\n", + BLK_MQ_MAX_DEPTH); + set->queue_depth = BLK_MQ_MAX_DEPTH; + } + + /* + * If a crashdump is active, then we are potentially in a very + * memory constrained environment. Limit us to 1 queue and + * 64 tags to prevent using too much memory. + */ + if (is_kdump_kernel()) { + set->nr_hw_queues = 1; + set->queue_depth = min(64U, set->queue_depth); + } + + set->tags = kmalloc_node(set->nr_hw_queues * + sizeof(struct blk_mq_tags *), + GFP_KERNEL, set->numa_node); + if (!set->tags) + return -ENOMEM; + + if (blk_mq_alloc_rq_maps(set)) + goto enomem; + + mutex_init(&set->tag_list_lock); + INIT_LIST_HEAD(&set->tag_list); + + return 0; +enomem: + kfree(set->tags); + set->tags = NULL; + return -ENOMEM; +} +EXPORT_SYMBOL(blk_mq_alloc_tag_set); + +void blk_mq_free_tag_set(struct blk_mq_tag_set *set) +{ + int i; + + for (i = 0; i < set->nr_hw_queues; i++) { + if (set->tags[i]) + blk_mq_free_rq_map(set, set->tags[i], i); + } + + kfree(set->tags); + set->tags = NULL; +} +EXPORT_SYMBOL(blk_mq_free_tag_set); + +int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) +{ + struct blk_mq_tag_set *set = q->tag_set; + struct blk_mq_hw_ctx *hctx; + int i, ret; + + if (!set || nr > set->queue_depth) + return -EINVAL; + + ret = 0; + queue_for_each_hw_ctx(q, hctx, i) { + ret = blk_mq_tag_update_depth(hctx->tags, nr); + if (ret) + break; + } + + if (!ret) + q->nr_requests = nr; + + return ret; +} + +void blk_mq_disable_hotplug(void) +{ + mutex_lock(&all_q_mutex); +} + +void blk_mq_enable_hotplug(void) +{ + mutex_unlock(&all_q_mutex); +} + +static int __init blk_mq_init(void) +{ + blk_mq_cpu_init(); + + hotcpu_notifier(blk_mq_queue_reinit_notify, 0); + + return 0; +} +subsys_initcall(blk_mq_init); diff --git a/block/blk-mq.h b/block/blk-mq.h new file mode 100644 index 000000000..6a48c4c0d --- /dev/null +++ b/block/blk-mq.h @@ -0,0 +1,126 @@ +#ifndef INT_BLK_MQ_H +#define INT_BLK_MQ_H + +struct blk_mq_tag_set; + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned int index_hw; + + unsigned int last_tag ____cacheline_aligned_in_smp; + + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +void __blk_mq_complete_request(struct request *rq); +void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); +void blk_mq_freeze_queue(struct request_queue *q); +void blk_mq_free_queue(struct request_queue *q); +void blk_mq_clone_flush_request(struct request *flush_rq, + struct request *orig_rq); +int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); +void blk_mq_wake_waiters(struct request_queue *q); + +/* + * CPU hotplug helpers + */ +struct blk_mq_cpu_notifier; +void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier, + int (*fn)(void *, unsigned long, unsigned int), + void *data); +void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier); +void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier); +void blk_mq_cpu_init(void); +void blk_mq_enable_hotplug(void); +void blk_mq_disable_hotplug(void); + +/* + * CPU -> queue mappings + */ +extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set); +extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues); +extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int); + +/* + * sysfs helpers + */ +extern int blk_mq_sysfs_register(struct request_queue *q); +extern void blk_mq_sysfs_unregister(struct request_queue *q); + +extern void blk_mq_rq_timed_out(struct request *req, bool reserved); + +void blk_mq_release(struct request_queue *q); + +/* + * Basic implementation of sparser bitmap, allowing the user to spread + * the bits over more cachelines. + */ +struct blk_align_bitmap { + unsigned long word; + unsigned long depth; +} ____cacheline_aligned_in_smp; + +static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, + unsigned int cpu) +{ + return per_cpu_ptr(q->queue_ctx, cpu); +} + +/* + * This assumes per-cpu software queueing queues. They could be per-node + * as well, for instance. For now this is hardcoded as-is. Note that we don't + * care about preemption, since we know the ctx's are persistent. This does + * mean that we can't rely on ctx always matching the currently running CPU. + */ +static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) +{ + return __blk_mq_get_ctx(q, get_cpu()); +} + +static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx) +{ + put_cpu(); +} + +struct blk_mq_alloc_data { + /* input parameter */ + struct request_queue *q; + gfp_t gfp; + bool reserved; + + /* input & output parameter */ + struct blk_mq_ctx *ctx; + struct blk_mq_hw_ctx *hctx; +}; + +static inline void blk_mq_set_alloc_data(struct blk_mq_alloc_data *data, + struct request_queue *q, gfp_t gfp, bool reserved, + struct blk_mq_ctx *ctx, + struct blk_mq_hw_ctx *hctx) +{ + data->q = q; + data->gfp = gfp; + data->reserved = reserved; + data->ctx = ctx; + data->hctx = hctx; +} + +static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx) +{ + return hctx->nr_ctx && hctx->tags; +} + +#endif diff --git a/block/blk-settings.c b/block/blk-settings.c new file mode 100644 index 000000000..12600bfff --- /dev/null +++ b/block/blk-settings.c @@ -0,0 +1,861 @@ +/* + * Functions related to setting various queue properties from drivers + */ +#include +#include +#include +#include +#include +#include /* for max_pfn/max_low_pfn */ +#include +#include +#include +#include + +#include "blk.h" + +unsigned long blk_max_low_pfn; +EXPORT_SYMBOL(blk_max_low_pfn); + +unsigned long blk_max_pfn; + +/** + * blk_queue_prep_rq - set a prepare_request function for queue + * @q: queue + * @pfn: prepare_request function + * + * It's possible for a queue to register a prepare_request callback which + * is invoked before the request is handed to the request_fn. The goal of + * the function is to prepare a request for I/O, it can be used to build a + * cdb from the request data for instance. + * + */ +void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn) +{ + q->prep_rq_fn = pfn; +} +EXPORT_SYMBOL(blk_queue_prep_rq); + +/** + * blk_queue_unprep_rq - set an unprepare_request function for queue + * @q: queue + * @ufn: unprepare_request function + * + * It's possible for a queue to register an unprepare_request callback + * which is invoked before the request is finally completed. The goal + * of the function is to deallocate any data that was allocated in the + * prepare_request callback. + * + */ +void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn) +{ + q->unprep_rq_fn = ufn; +} +EXPORT_SYMBOL(blk_queue_unprep_rq); + +/** + * blk_queue_merge_bvec - set a merge_bvec function for queue + * @q: queue + * @mbfn: merge_bvec_fn + * + * Usually queues have static limitations on the max sectors or segments that + * we can put in a request. Stacking drivers may have some settings that + * are dynamic, and thus we have to query the queue whether it is ok to + * add a new bio_vec to a bio at a given offset or not. If the block device + * has such limitations, it needs to register a merge_bvec_fn to control + * the size of bio's sent to it. Note that a block device *must* allow a + * single page to be added to an empty bio. The block device driver may want + * to use the bio_split() function to deal with these bio's. By default + * no merge_bvec_fn is defined for a queue, and only the fixed limits are + * honored. + */ +void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn) +{ + q->merge_bvec_fn = mbfn; +} +EXPORT_SYMBOL(blk_queue_merge_bvec); + +void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn) +{ + q->softirq_done_fn = fn; +} +EXPORT_SYMBOL(blk_queue_softirq_done); + +void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout) +{ + q->rq_timeout = timeout; +} +EXPORT_SYMBOL_GPL(blk_queue_rq_timeout); + +void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn) +{ + q->rq_timed_out_fn = fn; +} +EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out); + +void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn) +{ + q->lld_busy_fn = fn; +} +EXPORT_SYMBOL_GPL(blk_queue_lld_busy); + +/** + * blk_set_default_limits - reset limits to default values + * @lim: the queue_limits structure to reset + * + * Description: + * Returns a queue_limit struct to its default state. + */ +void blk_set_default_limits(struct queue_limits *lim) +{ + lim->max_segments = BLK_MAX_SEGMENTS; + lim->max_integrity_segments = 0; + lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; + lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; + lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS; + lim->chunk_sectors = 0; + lim->max_write_same_sectors = 0; + lim->max_discard_sectors = 0; + lim->discard_granularity = 0; + lim->discard_alignment = 0; + lim->discard_misaligned = 0; + lim->discard_zeroes_data = 0; + lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; + lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT); + lim->alignment_offset = 0; + lim->io_opt = 0; + lim->misaligned = 0; + lim->cluster = 1; +} +EXPORT_SYMBOL(blk_set_default_limits); + +/** + * blk_set_stacking_limits - set default limits for stacking devices + * @lim: the queue_limits structure to reset + * + * Description: + * Returns a queue_limit struct to its default state. Should be used + * by stacking drivers like DM that have no internal limits. + */ +void blk_set_stacking_limits(struct queue_limits *lim) +{ + blk_set_default_limits(lim); + + /* Inherit limits from component devices */ + lim->discard_zeroes_data = 1; + lim->max_segments = USHRT_MAX; + lim->max_hw_sectors = UINT_MAX; + lim->max_segment_size = UINT_MAX; + lim->max_sectors = UINT_MAX; + lim->max_write_same_sectors = UINT_MAX; +} +EXPORT_SYMBOL(blk_set_stacking_limits); + +/** + * blk_queue_make_request - define an alternate make_request function for a device + * @q: the request queue for the device to be affected + * @mfn: the alternate make_request function + * + * Description: + * The normal way for &struct bios to be passed to a device + * driver is for them to be collected into requests on a request + * queue, and then to allow the device driver to select requests + * off that queue when it is ready. This works well for many block + * devices. However some block devices (typically virtual devices + * such as md or lvm) do not benefit from the processing on the + * request queue, and are served best by having the requests passed + * directly to them. This can be achieved by providing a function + * to blk_queue_make_request(). + * + * Caveat: + * The driver that does this *must* be able to deal appropriately + * with buffers in "highmemory". This can be accomplished by either calling + * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling + * blk_queue_bounce() to create a buffer in normal memory. + **/ +void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) +{ + /* + * set defaults + */ + q->nr_requests = BLKDEV_MAX_RQ; + + q->make_request_fn = mfn; + blk_queue_dma_alignment(q, 511); + blk_queue_congestion_threshold(q); + q->nr_batching = BLK_BATCH_REQ; + + blk_set_default_limits(&q->limits); + + /* + * by default assume old behaviour and bounce for any highmem page + */ + blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); +} +EXPORT_SYMBOL(blk_queue_make_request); + +/** + * blk_queue_bounce_limit - set bounce buffer limit for queue + * @q: the request queue for the device + * @max_addr: the maximum address the device can handle + * + * Description: + * Different hardware can have different requirements as to what pages + * it can do I/O directly to. A low level driver can call + * blk_queue_bounce_limit to have lower memory pages allocated as bounce + * buffers for doing I/O to pages residing above @max_addr. + **/ +void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr) +{ + unsigned long b_pfn = max_addr >> PAGE_SHIFT; + int dma = 0; + + q->bounce_gfp = GFP_NOIO; +#if BITS_PER_LONG == 64 + /* + * Assume anything <= 4GB can be handled by IOMMU. Actually + * some IOMMUs can handle everything, but I don't know of a + * way to test this here. + */ + if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) + dma = 1; + q->limits.bounce_pfn = max(max_low_pfn, b_pfn); +#else + if (b_pfn < blk_max_low_pfn) + dma = 1; + q->limits.bounce_pfn = b_pfn; +#endif + if (dma) { + init_emergency_isa_pool(); + q->bounce_gfp = GFP_NOIO | GFP_DMA; + q->limits.bounce_pfn = b_pfn; + } +} +EXPORT_SYMBOL(blk_queue_bounce_limit); + +/** + * blk_limits_max_hw_sectors - set hard and soft limit of max sectors for request + * @limits: the queue limits + * @max_hw_sectors: max hardware sectors in the usual 512b unit + * + * Description: + * Enables a low level driver to set a hard upper limit, + * max_hw_sectors, on the size of requests. max_hw_sectors is set by + * the device driver based upon the combined capabilities of I/O + * controller and storage device. + * + * max_sectors is a soft limit imposed by the block layer for + * filesystem type requests. This value can be overridden on a + * per-device basis in /sys/block//queue/max_sectors_kb. + * The soft limit can not exceed max_hw_sectors. + **/ +void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_sectors) +{ + if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) { + max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9); + printk(KERN_INFO "%s: set to minimum %d\n", + __func__, max_hw_sectors); + } + + limits->max_sectors = limits->max_hw_sectors = max_hw_sectors; +} +EXPORT_SYMBOL(blk_limits_max_hw_sectors); + +/** + * blk_queue_max_hw_sectors - set max sectors for a request for this queue + * @q: the request queue for the device + * @max_hw_sectors: max hardware sectors in the usual 512b unit + * + * Description: + * See description for blk_limits_max_hw_sectors(). + **/ +void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) +{ + blk_limits_max_hw_sectors(&q->limits, max_hw_sectors); +} +EXPORT_SYMBOL(blk_queue_max_hw_sectors); + +/** + * blk_queue_chunk_sectors - set size of the chunk for this queue + * @q: the request queue for the device + * @chunk_sectors: chunk sectors in the usual 512b unit + * + * Description: + * If a driver doesn't want IOs to cross a given chunk size, it can set + * this limit and prevent merging across chunks. Note that the chunk size + * must currently be a power-of-2 in sectors. Also note that the block + * layer must accept a page worth of data at any offset. So if the + * crossing of chunks is a hard limitation in the driver, it must still be + * prepared to split single page bios. + **/ +void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors) +{ + BUG_ON(!is_power_of_2(chunk_sectors)); + q->limits.chunk_sectors = chunk_sectors; +} +EXPORT_SYMBOL(blk_queue_chunk_sectors); + +/** + * blk_queue_max_discard_sectors - set max sectors for a single discard + * @q: the request queue for the device + * @max_discard_sectors: maximum number of sectors to discard + **/ +void blk_queue_max_discard_sectors(struct request_queue *q, + unsigned int max_discard_sectors) +{ + q->limits.max_discard_sectors = max_discard_sectors; +} +EXPORT_SYMBOL(blk_queue_max_discard_sectors); + +/** + * blk_queue_max_write_same_sectors - set max sectors for a single write same + * @q: the request queue for the device + * @max_write_same_sectors: maximum number of sectors to write per command + **/ +void blk_queue_max_write_same_sectors(struct request_queue *q, + unsigned int max_write_same_sectors) +{ + q->limits.max_write_same_sectors = max_write_same_sectors; +} +EXPORT_SYMBOL(blk_queue_max_write_same_sectors); + +/** + * blk_queue_max_segments - set max hw segments for a request for this queue + * @q: the request queue for the device + * @max_segments: max number of segments + * + * Description: + * Enables a low level driver to set an upper limit on the number of + * hw data segments in a request. + **/ +void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments) +{ + if (!max_segments) { + max_segments = 1; + printk(KERN_INFO "%s: set to minimum %d\n", + __func__, max_segments); + } + + q->limits.max_segments = max_segments; +} +EXPORT_SYMBOL(blk_queue_max_segments); + +/** + * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg + * @q: the request queue for the device + * @max_size: max size of segment in bytes + * + * Description: + * Enables a low level driver to set an upper limit on the size of a + * coalesced segment + **/ +void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) +{ + if (max_size < PAGE_CACHE_SIZE) { + max_size = PAGE_CACHE_SIZE; + printk(KERN_INFO "%s: set to minimum %d\n", + __func__, max_size); + } + + q->limits.max_segment_size = max_size; +} +EXPORT_SYMBOL(blk_queue_max_segment_size); + +/** + * blk_queue_logical_block_size - set logical block size for the queue + * @q: the request queue for the device + * @size: the logical block size, in bytes + * + * Description: + * This should be set to the lowest possible block size that the + * storage device can address. The default of 512 covers most + * hardware. + **/ +void blk_queue_logical_block_size(struct request_queue *q, unsigned short size) +{ + q->limits.logical_block_size = size; + + if (q->limits.physical_block_size < size) + q->limits.physical_block_size = size; + + if (q->limits.io_min < q->limits.physical_block_size) + q->limits.io_min = q->limits.physical_block_size; +} +EXPORT_SYMBOL(blk_queue_logical_block_size); + +/** + * blk_queue_physical_block_size - set physical block size for the queue + * @q: the request queue for the device + * @size: the physical block size, in bytes + * + * Description: + * This should be set to the lowest possible sector size that the + * hardware can operate on without reverting to read-modify-write + * operations. + */ +void blk_queue_physical_block_size(struct request_queue *q, unsigned int size) +{ + q->limits.physical_block_size = size; + + if (q->limits.physical_block_size < q->limits.logical_block_size) + q->limits.physical_block_size = q->limits.logical_block_size; + + if (q->limits.io_min < q->limits.physical_block_size) + q->limits.io_min = q->limits.physical_block_size; +} +EXPORT_SYMBOL(blk_queue_physical_block_size); + +/** + * blk_queue_alignment_offset - set physical block alignment offset + * @q: the request queue for the device + * @offset: alignment offset in bytes + * + * Description: + * Some devices are naturally misaligned to compensate for things like + * the legacy DOS partition table 63-sector offset. Low-level drivers + * should call this function for devices whose first sector is not + * naturally aligned. + */ +void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset) +{ + q->limits.alignment_offset = + offset & (q->limits.physical_block_size - 1); + q->limits.misaligned = 0; +} +EXPORT_SYMBOL(blk_queue_alignment_offset); + +/** + * blk_limits_io_min - set minimum request size for a device + * @limits: the queue limits + * @min: smallest I/O size in bytes + * + * Description: + * Some devices have an internal block size bigger than the reported + * hardware sector size. This function can be used to signal the + * smallest I/O the device can perform without incurring a performance + * penalty. + */ +void blk_limits_io_min(struct queue_limits *limits, unsigned int min) +{ + limits->io_min = min; + + if (limits->io_min < limits->logical_block_size) + limits->io_min = limits->logical_block_size; + + if (limits->io_min < limits->physical_block_size) + limits->io_min = limits->physical_block_size; +} +EXPORT_SYMBOL(blk_limits_io_min); + +/** + * blk_queue_io_min - set minimum request size for the queue + * @q: the request queue for the device + * @min: smallest I/O size in bytes + * + * Description: + * Storage devices may report a granularity or preferred minimum I/O + * size which is the smallest request the device can perform without + * incurring a performance penalty. For disk drives this is often the + * physical block size. For RAID arrays it is often the stripe chunk + * size. A properly aligned multiple of minimum_io_size is the + * preferred request size for workloads where a high number of I/O + * operations is desired. + */ +void blk_queue_io_min(struct request_queue *q, unsigned int min) +{ + blk_limits_io_min(&q->limits, min); +} +EXPORT_SYMBOL(blk_queue_io_min); + +/** + * blk_limits_io_opt - set optimal request size for a device + * @limits: the queue limits + * @opt: smallest I/O size in bytes + * + * Description: + * Storage devices may report an optimal I/O size, which is the + * device's preferred unit for sustained I/O. This is rarely reported + * for disk drives. For RAID arrays it is usually the stripe width or + * the internal track size. A properly aligned multiple of + * optimal_io_size is the preferred request size for workloads where + * sustained throughput is desired. + */ +void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt) +{ + limits->io_opt = opt; +} +EXPORT_SYMBOL(blk_limits_io_opt); + +/** + * blk_queue_io_opt - set optimal request size for the queue + * @q: the request queue for the device + * @opt: optimal request size in bytes + * + * Description: + * Storage devices may report an optimal I/O size, which is the + * device's preferred unit for sustained I/O. This is rarely reported + * for disk drives. For RAID arrays it is usually the stripe width or + * the internal track size. A properly aligned multiple of + * optimal_io_size is the preferred request size for workloads where + * sustained throughput is desired. + */ +void blk_queue_io_opt(struct request_queue *q, unsigned int opt) +{ + blk_limits_io_opt(&q->limits, opt); +} +EXPORT_SYMBOL(blk_queue_io_opt); + +/** + * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers + * @t: the stacking driver (top) + * @b: the underlying device (bottom) + **/ +void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) +{ + blk_stack_limits(&t->limits, &b->limits, 0); +} +EXPORT_SYMBOL(blk_queue_stack_limits); + +/** + * blk_stack_limits - adjust queue_limits for stacked devices + * @t: the stacking driver limits (top device) + * @b: the underlying queue limits (bottom, component device) + * @start: first data sector within component device + * + * Description: + * This function is used by stacking drivers like MD and DM to ensure + * that all component devices have compatible block sizes and + * alignments. The stacking driver must provide a queue_limits + * struct (top) and then iteratively call the stacking function for + * all component (bottom) devices. The stacking function will + * attempt to combine the values and ensure proper alignment. + * + * Returns 0 if the top and bottom queue_limits are compatible. The + * top device's block sizes and alignment offsets may be adjusted to + * ensure alignment with the bottom device. If no compatible sizes + * and alignments exist, -1 is returned and the resulting top + * queue_limits will have the misaligned flag set to indicate that + * the alignment_offset is undefined. + */ +int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, + sector_t start) +{ + unsigned int top, bottom, alignment, ret = 0; + + t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); + t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); + t->max_write_same_sectors = min(t->max_write_same_sectors, + b->max_write_same_sectors); + t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn); + + t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, + b->seg_boundary_mask); + + t->max_segments = min_not_zero(t->max_segments, b->max_segments); + t->max_integrity_segments = min_not_zero(t->max_integrity_segments, + b->max_integrity_segments); + + t->max_segment_size = min_not_zero(t->max_segment_size, + b->max_segment_size); + + t->misaligned |= b->misaligned; + + alignment = queue_limit_alignment_offset(b, start); + + /* Bottom device has different alignment. Check that it is + * compatible with the current top alignment. + */ + if (t->alignment_offset != alignment) { + + top = max(t->physical_block_size, t->io_min) + + t->alignment_offset; + bottom = max(b->physical_block_size, b->io_min) + alignment; + + /* Verify that top and bottom intervals line up */ + if (max(top, bottom) % min(top, bottom)) { + t->misaligned = 1; + ret = -1; + } + } + + t->logical_block_size = max(t->logical_block_size, + b->logical_block_size); + + t->physical_block_size = max(t->physical_block_size, + b->physical_block_size); + + t->io_min = max(t->io_min, b->io_min); + t->io_opt = lcm_not_zero(t->io_opt, b->io_opt); + + t->cluster &= b->cluster; + t->discard_zeroes_data &= b->discard_zeroes_data; + + /* Physical block size a multiple of the logical block size? */ + if (t->physical_block_size & (t->logical_block_size - 1)) { + t->physical_block_size = t->logical_block_size; + t->misaligned = 1; + ret = -1; + } + + /* Minimum I/O a multiple of the physical block size? */ + if (t->io_min & (t->physical_block_size - 1)) { + t->io_min = t->physical_block_size; + t->misaligned = 1; + ret = -1; + } + + /* Optimal I/O a multiple of the physical block size? */ + if (t->io_opt & (t->physical_block_size - 1)) { + t->io_opt = 0; + t->misaligned = 1; + ret = -1; + } + + t->raid_partial_stripes_expensive = + max(t->raid_partial_stripes_expensive, + b->raid_partial_stripes_expensive); + + /* Find lowest common alignment_offset */ + t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment) + % max(t->physical_block_size, t->io_min); + + /* Verify that new alignment_offset is on a logical block boundary */ + if (t->alignment_offset & (t->logical_block_size - 1)) { + t->misaligned = 1; + ret = -1; + } + + /* Discard alignment and granularity */ + if (b->discard_granularity) { + alignment = queue_limit_discard_alignment(b, start); + + if (t->discard_granularity != 0 && + t->discard_alignment != alignment) { + top = t->discard_granularity + t->discard_alignment; + bottom = b->discard_granularity + alignment; + + /* Verify that top and bottom intervals line up */ + if ((max(top, bottom) % min(top, bottom)) != 0) + t->discard_misaligned = 1; + } + + t->max_discard_sectors = min_not_zero(t->max_discard_sectors, + b->max_discard_sectors); + t->discard_granularity = max(t->discard_granularity, + b->discard_granularity); + t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) % + t->discard_granularity; + } + + return ret; +} +EXPORT_SYMBOL(blk_stack_limits); + +/** + * bdev_stack_limits - adjust queue limits for stacked drivers + * @t: the stacking driver limits (top device) + * @bdev: the component block_device (bottom) + * @start: first data sector within component device + * + * Description: + * Merges queue limits for a top device and a block_device. Returns + * 0 if alignment didn't change. Returns -1 if adding the bottom + * device caused misalignment. + */ +int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, + sector_t start) +{ + struct request_queue *bq = bdev_get_queue(bdev); + + start += get_start_sect(bdev); + + return blk_stack_limits(t, &bq->limits, start); +} +EXPORT_SYMBOL(bdev_stack_limits); + +/** + * disk_stack_limits - adjust queue limits for stacked drivers + * @disk: MD/DM gendisk (top) + * @bdev: the underlying block device (bottom) + * @offset: offset to beginning of data within component device + * + * Description: + * Merges the limits for a top level gendisk and a bottom level + * block_device. + */ +void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, + sector_t offset) +{ + struct request_queue *t = disk->queue; + + if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) { + char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE]; + + disk_name(disk, 0, top); + bdevname(bdev, bottom); + + printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n", + top, bottom); + } +} +EXPORT_SYMBOL(disk_stack_limits); + +/** + * blk_queue_dma_pad - set pad mask + * @q: the request queue for the device + * @mask: pad mask + * + * Set dma pad mask. + * + * Appending pad buffer to a request modifies the last entry of a + * scatter list such that it includes the pad buffer. + **/ +void blk_queue_dma_pad(struct request_queue *q, unsigned int mask) +{ + q->dma_pad_mask = mask; +} +EXPORT_SYMBOL(blk_queue_dma_pad); + +/** + * blk_queue_update_dma_pad - update pad mask + * @q: the request queue for the device + * @mask: pad mask + * + * Update dma pad mask. + * + * Appending pad buffer to a request modifies the last entry of a + * scatter list such that it includes the pad buffer. + **/ +void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask) +{ + if (mask > q->dma_pad_mask) + q->dma_pad_mask = mask; +} +EXPORT_SYMBOL(blk_queue_update_dma_pad); + +/** + * blk_queue_dma_drain - Set up a drain buffer for excess dma. + * @q: the request queue for the device + * @dma_drain_needed: fn which returns non-zero if drain is necessary + * @buf: physically contiguous buffer + * @size: size of the buffer in bytes + * + * Some devices have excess DMA problems and can't simply discard (or + * zero fill) the unwanted piece of the transfer. They have to have a + * real area of memory to transfer it into. The use case for this is + * ATAPI devices in DMA mode. If the packet command causes a transfer + * bigger than the transfer size some HBAs will lock up if there + * aren't DMA elements to contain the excess transfer. What this API + * does is adjust the queue so that the buf is always appended + * silently to the scatterlist. + * + * Note: This routine adjusts max_hw_segments to make room for appending + * the drain buffer. If you call blk_queue_max_segments() after calling + * this routine, you must set the limit to one fewer than your device + * can support otherwise there won't be room for the drain buffer. + */ +int blk_queue_dma_drain(struct request_queue *q, + dma_drain_needed_fn *dma_drain_needed, + void *buf, unsigned int size) +{ + if (queue_max_segments(q) < 2) + return -EINVAL; + /* make room for appending the drain */ + blk_queue_max_segments(q, queue_max_segments(q) - 1); + q->dma_drain_needed = dma_drain_needed; + q->dma_drain_buffer = buf; + q->dma_drain_size = size; + + return 0; +} +EXPORT_SYMBOL_GPL(blk_queue_dma_drain); + +/** + * blk_queue_segment_boundary - set boundary rules for segment merging + * @q: the request queue for the device + * @mask: the memory boundary mask + **/ +void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) +{ + if (mask < PAGE_CACHE_SIZE - 1) { + mask = PAGE_CACHE_SIZE - 1; + printk(KERN_INFO "%s: set to minimum %lx\n", + __func__, mask); + } + + q->limits.seg_boundary_mask = mask; +} +EXPORT_SYMBOL(blk_queue_segment_boundary); + +/** + * blk_queue_dma_alignment - set dma length and memory alignment + * @q: the request queue for the device + * @mask: alignment mask + * + * description: + * set required memory and length alignment for direct dma transactions. + * this is used when building direct io requests for the queue. + * + **/ +void blk_queue_dma_alignment(struct request_queue *q, int mask) +{ + q->dma_alignment = mask; +} +EXPORT_SYMBOL(blk_queue_dma_alignment); + +/** + * blk_queue_update_dma_alignment - update dma length and memory alignment + * @q: the request queue for the device + * @mask: alignment mask + * + * description: + * update required memory and length alignment for direct dma transactions. + * If the requested alignment is larger than the current alignment, then + * the current queue alignment is updated to the new value, otherwise it + * is left alone. The design of this is to allow multiple objects + * (driver, device, transport etc) to set their respective + * alignments without having them interfere. + * + **/ +void blk_queue_update_dma_alignment(struct request_queue *q, int mask) +{ + BUG_ON(mask > PAGE_SIZE); + + if (mask > q->dma_alignment) + q->dma_alignment = mask; +} +EXPORT_SYMBOL(blk_queue_update_dma_alignment); + +/** + * blk_queue_flush - configure queue's cache flush capability + * @q: the request queue for the device + * @flush: 0, REQ_FLUSH or REQ_FLUSH | REQ_FUA + * + * Tell block layer cache flush capability of @q. If it supports + * flushing, REQ_FLUSH should be set. If it supports bypassing + * write cache for individual writes, REQ_FUA should be set. + */ +void blk_queue_flush(struct request_queue *q, unsigned int flush) +{ + WARN_ON_ONCE(flush & ~(REQ_FLUSH | REQ_FUA)); + + if (WARN_ON_ONCE(!(flush & REQ_FLUSH) && (flush & REQ_FUA))) + flush &= ~REQ_FUA; + + q->flush_flags = flush & (REQ_FLUSH | REQ_FUA); +} +EXPORT_SYMBOL_GPL(blk_queue_flush); + +void blk_queue_flush_queueable(struct request_queue *q, bool queueable) +{ + q->flush_not_queueable = !queueable; +} +EXPORT_SYMBOL_GPL(blk_queue_flush_queueable); + +static int __init blk_settings_init(void) +{ + blk_max_low_pfn = max_low_pfn - 1; + blk_max_pfn = max_pfn - 1; + return 0; +} +subsys_initcall(blk_settings_init); diff --git a/block/blk-softirq.c b/block/blk-softirq.c new file mode 100644 index 000000000..53b1737e9 --- /dev/null +++ b/block/blk-softirq.c @@ -0,0 +1,186 @@ +/* + * Functions related to softirq rq completions + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "blk.h" + +static DEFINE_PER_CPU(struct list_head, blk_cpu_done); + +/* + * Softirq action handler - move entries to local list and loop over them + * while passing them to the queue registered handler. + */ +static void blk_done_softirq(struct softirq_action *h) +{ + struct list_head *cpu_list, local_list; + + local_irq_disable(); + cpu_list = this_cpu_ptr(&blk_cpu_done); + list_replace_init(cpu_list, &local_list); + local_irq_enable(); + + while (!list_empty(&local_list)) { + struct request *rq; + + rq = list_entry(local_list.next, struct request, ipi_list); + list_del_init(&rq->ipi_list); + rq->q->softirq_done_fn(rq); + } +} + +#ifdef CONFIG_SMP +static void trigger_softirq(void *data) +{ + struct request *rq = data; + unsigned long flags; + struct list_head *list; + + local_irq_save(flags); + list = this_cpu_ptr(&blk_cpu_done); + list_add_tail(&rq->ipi_list, list); + + if (list->next == &rq->ipi_list) + raise_softirq_irqoff(BLOCK_SOFTIRQ); + + local_irq_restore(flags); +} + +/* + * Setup and invoke a run of 'trigger_softirq' on the given cpu. + */ +static int raise_blk_irq(int cpu, struct request *rq) +{ + if (cpu_online(cpu)) { + struct call_single_data *data = &rq->csd; + + data->func = trigger_softirq; + data->info = rq; + data->flags = 0; + + smp_call_function_single_async(cpu, data); + return 0; + } + + return 1; +} +#else /* CONFIG_SMP */ +static int raise_blk_irq(int cpu, struct request *rq) +{ + return 1; +} +#endif + +static int blk_cpu_notify(struct notifier_block *self, unsigned long action, + void *hcpu) +{ + /* + * If a CPU goes away, splice its entries to the current CPU + * and trigger a run of the softirq + */ + if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { + int cpu = (unsigned long) hcpu; + + local_irq_disable(); + list_splice_init(&per_cpu(blk_cpu_done, cpu), + this_cpu_ptr(&blk_cpu_done)); + raise_softirq_irqoff(BLOCK_SOFTIRQ); + local_irq_enable(); + } + + return NOTIFY_OK; +} + +static struct notifier_block blk_cpu_notifier = { + .notifier_call = blk_cpu_notify, +}; + +void __blk_complete_request(struct request *req) +{ + int ccpu, cpu; + struct request_queue *q = req->q; + unsigned long flags; + bool shared = false; + + BUG_ON(!q->softirq_done_fn); + + local_irq_save(flags); + cpu = smp_processor_id(); + + /* + * Select completion CPU + */ + if (req->cpu != -1) { + ccpu = req->cpu; + if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags)) + shared = cpus_share_cache(cpu, ccpu); + } else + ccpu = cpu; + + /* + * If current CPU and requested CPU share a cache, run the softirq on + * the current CPU. One might concern this is just like + * QUEUE_FLAG_SAME_FORCE, but actually not. blk_complete_request() is + * running in interrupt handler, and currently I/O controller doesn't + * support multiple interrupts, so current CPU is unique actually. This + * avoids IPI sending from current CPU to the first CPU of a group. + */ + if (ccpu == cpu || shared) { + struct list_head *list; +do_local: + list = this_cpu_ptr(&blk_cpu_done); + list_add_tail(&req->ipi_list, list); + + /* + * if the list only contains our just added request, + * signal a raise of the softirq. If there are already + * entries there, someone already raised the irq but it + * hasn't run yet. + */ + if (list->next == &req->ipi_list) + raise_softirq_irqoff(BLOCK_SOFTIRQ); + } else if (raise_blk_irq(ccpu, req)) + goto do_local; + + local_irq_restore(flags); +} + +/** + * blk_complete_request - end I/O on a request + * @req: the request being processed + * + * Description: + * Ends all I/O on a request. It does not handle partial completions, + * unless the driver actually implements this in its completion callback + * through requeueing. The actual completion happens out-of-order, + * through a softirq handler. The user must have registered a completion + * callback through blk_queue_softirq_done(). + **/ +void blk_complete_request(struct request *req) +{ + if (unlikely(blk_should_fake_timeout(req->q))) + return; + if (!blk_mark_rq_complete(req)) + __blk_complete_request(req); +} +EXPORT_SYMBOL(blk_complete_request); + +static __init int blk_softirq_init(void) +{ + int i; + + for_each_possible_cpu(i) + INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); + + open_softirq(BLOCK_SOFTIRQ, blk_done_softirq); + register_hotcpu_notifier(&blk_cpu_notifier); + return 0; +} +subsys_initcall(blk_softirq_init); diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c new file mode 100644 index 000000000..2b8fd302f --- /dev/null +++ b/block/blk-sysfs.c @@ -0,0 +1,612 @@ +/* + * Functions related to sysfs handling + */ +#include +#include +#include +#include +#include +#include +#include + +#include "blk.h" +#include "blk-cgroup.h" +#include "blk-mq.h" + +struct queue_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct request_queue *, char *); + ssize_t (*store)(struct request_queue *, const char *, size_t); +}; + +static ssize_t +queue_var_show(unsigned long var, char *page) +{ + return sprintf(page, "%lu\n", var); +} + +static ssize_t +queue_var_store(unsigned long *var, const char *page, size_t count) +{ + int err; + unsigned long v; + + err = kstrtoul(page, 10, &v); + if (err || v > UINT_MAX) + return -EINVAL; + + *var = v; + + return count; +} + +static ssize_t queue_requests_show(struct request_queue *q, char *page) +{ + return queue_var_show(q->nr_requests, (page)); +} + +static ssize_t +queue_requests_store(struct request_queue *q, const char *page, size_t count) +{ + unsigned long nr; + int ret, err; + + if (!q->request_fn && !q->mq_ops) + return -EINVAL; + + ret = queue_var_store(&nr, page, count); + if (ret < 0) + return ret; + + if (nr < BLKDEV_MIN_RQ) + nr = BLKDEV_MIN_RQ; + + if (q->request_fn) + err = blk_update_nr_requests(q, nr); + else + err = blk_mq_update_nr_requests(q, nr); + + if (err) + return err; + + return ret; +} + +static ssize_t queue_ra_show(struct request_queue *q, char *page) +{ + unsigned long ra_kb = q->backing_dev_info.ra_pages << + (PAGE_CACHE_SHIFT - 10); + + return queue_var_show(ra_kb, (page)); +} + +static ssize_t +queue_ra_store(struct request_queue *q, const char *page, size_t count) +{ + unsigned long ra_kb; + ssize_t ret = queue_var_store(&ra_kb, page, count); + + if (ret < 0) + return ret; + + q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10); + + return ret; +} + +static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) +{ + int max_sectors_kb = queue_max_sectors(q) >> 1; + + return queue_var_show(max_sectors_kb, (page)); +} + +static ssize_t queue_max_segments_show(struct request_queue *q, char *page) +{ + return queue_var_show(queue_max_segments(q), (page)); +} + +static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) +{ + return queue_var_show(q->limits.max_integrity_segments, (page)); +} + +static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) +{ + if (blk_queue_cluster(q)) + return queue_var_show(queue_max_segment_size(q), (page)); + + return queue_var_show(PAGE_CACHE_SIZE, (page)); +} + +static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) +{ + return queue_var_show(queue_logical_block_size(q), page); +} + +static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) +{ + return queue_var_show(queue_physical_block_size(q), page); +} + +static ssize_t queue_io_min_show(struct request_queue *q, char *page) +{ + return queue_var_show(queue_io_min(q), page); +} + +static ssize_t queue_io_opt_show(struct request_queue *q, char *page) +{ + return queue_var_show(queue_io_opt(q), page); +} + +static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) +{ + return queue_var_show(q->limits.discard_granularity, page); +} + +static ssize_t queue_discard_max_show(struct request_queue *q, char *page) +{ + return sprintf(page, "%llu\n", + (unsigned long long)q->limits.max_discard_sectors << 9); +} + +static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) +{ + return queue_var_show(queue_discard_zeroes_data(q), page); +} + +static ssize_t queue_write_same_max_show(struct request_queue *q, char *page) +{ + return sprintf(page, "%llu\n", + (unsigned long long)q->limits.max_write_same_sectors << 9); +} + + +static ssize_t +queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) +{ + unsigned long max_sectors_kb, + max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, + page_kb = 1 << (PAGE_CACHE_SHIFT - 10); + ssize_t ret = queue_var_store(&max_sectors_kb, page, count); + + if (ret < 0) + return ret; + + if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) + return -EINVAL; + + spin_lock_irq(q->queue_lock); + q->limits.max_sectors = max_sectors_kb << 1; + spin_unlock_irq(q->queue_lock); + + return ret; +} + +static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) +{ + int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; + + return queue_var_show(max_hw_sectors_kb, (page)); +} + +#define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \ +static ssize_t \ +queue_show_##name(struct request_queue *q, char *page) \ +{ \ + int bit; \ + bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \ + return queue_var_show(neg ? !bit : bit, page); \ +} \ +static ssize_t \ +queue_store_##name(struct request_queue *q, const char *page, size_t count) \ +{ \ + unsigned long val; \ + ssize_t ret; \ + ret = queue_var_store(&val, page, count); \ + if (ret < 0) \ + return ret; \ + if (neg) \ + val = !val; \ + \ + spin_lock_irq(q->queue_lock); \ + if (val) \ + queue_flag_set(QUEUE_FLAG_##flag, q); \ + else \ + queue_flag_clear(QUEUE_FLAG_##flag, q); \ + spin_unlock_irq(q->queue_lock); \ + return ret; \ +} + +QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1); +QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0); +QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0); +#undef QUEUE_SYSFS_BIT_FNS + +static ssize_t queue_nomerges_show(struct request_queue *q, char *page) +{ + return queue_var_show((blk_queue_nomerges(q) << 1) | + blk_queue_noxmerges(q), page); +} + +static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, + size_t count) +{ + unsigned long nm; + ssize_t ret = queue_var_store(&nm, page, count); + + if (ret < 0) + return ret; + + spin_lock_irq(q->queue_lock); + queue_flag_clear(QUEUE_FLAG_NOMERGES, q); + queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); + if (nm == 2) + queue_flag_set(QUEUE_FLAG_NOMERGES, q); + else if (nm) + queue_flag_set(QUEUE_FLAG_NOXMERGES, q); + spin_unlock_irq(q->queue_lock); + + return ret; +} + +static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) +{ + bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); + bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags); + + return queue_var_show(set << force, page); +} + +static ssize_t +queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) +{ + ssize_t ret = -EINVAL; +#ifdef CONFIG_SMP + unsigned long val; + + ret = queue_var_store(&val, page, count); + if (ret < 0) + return ret; + + spin_lock_irq(q->queue_lock); + if (val == 2) { + queue_flag_set(QUEUE_FLAG_SAME_COMP, q); + queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); + } else if (val == 1) { + queue_flag_set(QUEUE_FLAG_SAME_COMP, q); + queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); + } else if (val == 0) { + queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); + queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); + } + spin_unlock_irq(q->queue_lock); +#endif + return ret; +} + +static struct queue_sysfs_entry queue_requests_entry = { + .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, + .show = queue_requests_show, + .store = queue_requests_store, +}; + +static struct queue_sysfs_entry queue_ra_entry = { + .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR }, + .show = queue_ra_show, + .store = queue_ra_store, +}; + +static struct queue_sysfs_entry queue_max_sectors_entry = { + .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR }, + .show = queue_max_sectors_show, + .store = queue_max_sectors_store, +}; + +static struct queue_sysfs_entry queue_max_hw_sectors_entry = { + .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO }, + .show = queue_max_hw_sectors_show, +}; + +static struct queue_sysfs_entry queue_max_segments_entry = { + .attr = {.name = "max_segments", .mode = S_IRUGO }, + .show = queue_max_segments_show, +}; + +static struct queue_sysfs_entry queue_max_integrity_segments_entry = { + .attr = {.name = "max_integrity_segments", .mode = S_IRUGO }, + .show = queue_max_integrity_segments_show, +}; + +static struct queue_sysfs_entry queue_max_segment_size_entry = { + .attr = {.name = "max_segment_size", .mode = S_IRUGO }, + .show = queue_max_segment_size_show, +}; + +static struct queue_sysfs_entry queue_iosched_entry = { + .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR }, + .show = elv_iosched_show, + .store = elv_iosched_store, +}; + +static struct queue_sysfs_entry queue_hw_sector_size_entry = { + .attr = {.name = "hw_sector_size", .mode = S_IRUGO }, + .show = queue_logical_block_size_show, +}; + +static struct queue_sysfs_entry queue_logical_block_size_entry = { + .attr = {.name = "logical_block_size", .mode = S_IRUGO }, + .show = queue_logical_block_size_show, +}; + +static struct queue_sysfs_entry queue_physical_block_size_entry = { + .attr = {.name = "physical_block_size", .mode = S_IRUGO }, + .show = queue_physical_block_size_show, +}; + +static struct queue_sysfs_entry queue_io_min_entry = { + .attr = {.name = "minimum_io_size", .mode = S_IRUGO }, + .show = queue_io_min_show, +}; + +static struct queue_sysfs_entry queue_io_opt_entry = { + .attr = {.name = "optimal_io_size", .mode = S_IRUGO }, + .show = queue_io_opt_show, +}; + +static struct queue_sysfs_entry queue_discard_granularity_entry = { + .attr = {.name = "discard_granularity", .mode = S_IRUGO }, + .show = queue_discard_granularity_show, +}; + +static struct queue_sysfs_entry queue_discard_max_entry = { + .attr = {.name = "discard_max_bytes", .mode = S_IRUGO }, + .show = queue_discard_max_show, +}; + +static struct queue_sysfs_entry queue_discard_zeroes_data_entry = { + .attr = {.name = "discard_zeroes_data", .mode = S_IRUGO }, + .show = queue_discard_zeroes_data_show, +}; + +static struct queue_sysfs_entry queue_write_same_max_entry = { + .attr = {.name = "write_same_max_bytes", .mode = S_IRUGO }, + .show = queue_write_same_max_show, +}; + +static struct queue_sysfs_entry queue_nonrot_entry = { + .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR }, + .show = queue_show_nonrot, + .store = queue_store_nonrot, +}; + +static struct queue_sysfs_entry queue_nomerges_entry = { + .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR }, + .show = queue_nomerges_show, + .store = queue_nomerges_store, +}; + +static struct queue_sysfs_entry queue_rq_affinity_entry = { + .attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR }, + .show = queue_rq_affinity_show, + .store = queue_rq_affinity_store, +}; + +static struct queue_sysfs_entry queue_iostats_entry = { + .attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR }, + .show = queue_show_iostats, + .store = queue_store_iostats, +}; + +static struct queue_sysfs_entry queue_random_entry = { + .attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR }, + .show = queue_show_random, + .store = queue_store_random, +}; + +static struct attribute *default_attrs[] = { + &queue_requests_entry.attr, + &queue_ra_entry.attr, + &queue_max_hw_sectors_entry.attr, + &queue_max_sectors_entry.attr, + &queue_max_segments_entry.attr, + &queue_max_integrity_segments_entry.attr, + &queue_max_segment_size_entry.attr, + &queue_iosched_entry.attr, + &queue_hw_sector_size_entry.attr, + &queue_logical_block_size_entry.attr, + &queue_physical_block_size_entry.attr, + &queue_io_min_entry.attr, + &queue_io_opt_entry.attr, + &queue_discard_granularity_entry.attr, + &queue_discard_max_entry.attr, + &queue_discard_zeroes_data_entry.attr, + &queue_write_same_max_entry.attr, + &queue_nonrot_entry.attr, + &queue_nomerges_entry.attr, + &queue_rq_affinity_entry.attr, + &queue_iostats_entry.attr, + &queue_random_entry.attr, + NULL, +}; + +#define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr) + +static ssize_t +queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) +{ + struct queue_sysfs_entry *entry = to_queue(attr); + struct request_queue *q = + container_of(kobj, struct request_queue, kobj); + ssize_t res; + + if (!entry->show) + return -EIO; + mutex_lock(&q->sysfs_lock); + if (blk_queue_dying(q)) { + mutex_unlock(&q->sysfs_lock); + return -ENOENT; + } + res = entry->show(q, page); + mutex_unlock(&q->sysfs_lock); + return res; +} + +static ssize_t +queue_attr_store(struct kobject *kobj, struct attribute *attr, + const char *page, size_t length) +{ + struct queue_sysfs_entry *entry = to_queue(attr); + struct request_queue *q; + ssize_t res; + + if (!entry->store) + return -EIO; + + q = container_of(kobj, struct request_queue, kobj); + mutex_lock(&q->sysfs_lock); + if (blk_queue_dying(q)) { + mutex_unlock(&q->sysfs_lock); + return -ENOENT; + } + res = entry->store(q, page, length); + mutex_unlock(&q->sysfs_lock); + return res; +} + +static void blk_free_queue_rcu(struct rcu_head *rcu_head) +{ + struct request_queue *q = container_of(rcu_head, struct request_queue, + rcu_head); + kmem_cache_free(blk_requestq_cachep, q); +} + +/** + * blk_release_queue: - release a &struct request_queue when it is no longer needed + * @kobj: the kobj belonging to the request queue to be released + * + * Description: + * blk_release_queue is the pair to blk_init_queue() or + * blk_queue_make_request(). It should be called when a request queue is + * being released; typically when a block device is being de-registered. + * Currently, its primary task it to free all the &struct request + * structures that were allocated to the queue and the queue itself. + * + * Note: + * The low level driver must have finished any outstanding requests first + * via blk_cleanup_queue(). + **/ +static void blk_release_queue(struct kobject *kobj) +{ + struct request_queue *q = + container_of(kobj, struct request_queue, kobj); + + blkcg_exit_queue(q); + + if (q->elevator) { + spin_lock_irq(q->queue_lock); + ioc_clear_queue(q); + spin_unlock_irq(q->queue_lock); + elevator_exit(q->elevator); + } + + blk_exit_rl(&q->root_rl); + + if (q->queue_tags) + __blk_queue_free_tags(q); + + if (!q->mq_ops) + blk_free_flush_queue(q->fq); + else + blk_mq_release(q); + + blk_trace_shutdown(q); + + ida_simple_remove(&blk_queue_ida, q->id); + call_rcu(&q->rcu_head, blk_free_queue_rcu); +} + +static const struct sysfs_ops queue_sysfs_ops = { + .show = queue_attr_show, + .store = queue_attr_store, +}; + +struct kobj_type blk_queue_ktype = { + .sysfs_ops = &queue_sysfs_ops, + .default_attrs = default_attrs, + .release = blk_release_queue, +}; + +int blk_register_queue(struct gendisk *disk) +{ + int ret; + struct device *dev = disk_to_dev(disk); + struct request_queue *q = disk->queue; + + if (WARN_ON(!q)) + return -ENXIO; + + /* + * SCSI probing may synchronously create and destroy a lot of + * request_queues for non-existent devices. Shutting down a fully + * functional queue takes measureable wallclock time as RCU grace + * periods are involved. To avoid excessive latency in these + * cases, a request_queue starts out in a degraded mode which is + * faster to shut down and is made fully functional here as + * request_queues for non-existent devices never get registered. + */ + if (!blk_queue_init_done(q)) { + queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q); + blk_queue_bypass_end(q); + if (q->mq_ops) + blk_mq_finish_init(q); + } + + ret = blk_trace_init_sysfs(dev); + if (ret) + return ret; + + ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); + if (ret < 0) { + blk_trace_remove_sysfs(dev); + return ret; + } + + kobject_uevent(&q->kobj, KOBJ_ADD); + + if (q->mq_ops) + blk_mq_register_disk(disk); + + if (!q->request_fn) + return 0; + + ret = elv_register_queue(q); + if (ret) { + kobject_uevent(&q->kobj, KOBJ_REMOVE); + kobject_del(&q->kobj); + blk_trace_remove_sysfs(dev); + kobject_put(&dev->kobj); + return ret; + } + + return 0; +} + +void blk_unregister_queue(struct gendisk *disk) +{ + struct request_queue *q = disk->queue; + + if (WARN_ON(!q)) + return; + + if (q->mq_ops) + blk_mq_unregister_disk(disk); + + if (q->request_fn) + elv_unregister_queue(q); + + kobject_uevent(&q->kobj, KOBJ_REMOVE); + kobject_del(&q->kobj); + blk_trace_remove_sysfs(disk_to_dev(disk)); + kobject_put(&disk_to_dev(disk)->kobj); +} diff --git a/block/blk-tag.c b/block/blk-tag.c new file mode 100644 index 000000000..f0344e693 --- /dev/null +++ b/block/blk-tag.c @@ -0,0 +1,402 @@ +/* + * Functions related to tagged command queuing + */ +#include +#include +#include +#include +#include + +#include "blk.h" + +/** + * blk_queue_find_tag - find a request by its tag and queue + * @q: The request queue for the device + * @tag: The tag of the request + * + * Notes: + * Should be used when a device returns a tag and you want to match + * it with a request. + * + * no locks need be held. + **/ +struct request *blk_queue_find_tag(struct request_queue *q, int tag) +{ + return blk_map_queue_find_tag(q->queue_tags, tag); +} +EXPORT_SYMBOL(blk_queue_find_tag); + +/** + * blk_free_tags - release a given set of tag maintenance info + * @bqt: the tag map to free + * + * Drop the reference count on @bqt and frees it when the last reference + * is dropped. + */ +void blk_free_tags(struct blk_queue_tag *bqt) +{ + if (atomic_dec_and_test(&bqt->refcnt)) { + BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) < + bqt->max_depth); + + kfree(bqt->tag_index); + bqt->tag_index = NULL; + + kfree(bqt->tag_map); + bqt->tag_map = NULL; + + kfree(bqt); + } +} +EXPORT_SYMBOL(blk_free_tags); + +/** + * __blk_queue_free_tags - release tag maintenance info + * @q: the request queue for the device + * + * Notes: + * blk_cleanup_queue() will take care of calling this function, if tagging + * has been used. So there's no need to call this directly. + **/ +void __blk_queue_free_tags(struct request_queue *q) +{ + struct blk_queue_tag *bqt = q->queue_tags; + + if (!bqt) + return; + + blk_free_tags(bqt); + + q->queue_tags = NULL; + queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q); +} + +/** + * blk_queue_free_tags - release tag maintenance info + * @q: the request queue for the device + * + * Notes: + * This is used to disable tagged queuing to a device, yet leave + * queue in function. + **/ +void blk_queue_free_tags(struct request_queue *q) +{ + queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q); +} +EXPORT_SYMBOL(blk_queue_free_tags); + +static int +init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth) +{ + struct request **tag_index; + unsigned long *tag_map; + int nr_ulongs; + + if (q && depth > q->nr_requests * 2) { + depth = q->nr_requests * 2; + printk(KERN_ERR "%s: adjusted depth to %d\n", + __func__, depth); + } + + tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC); + if (!tag_index) + goto fail; + + nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG; + tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC); + if (!tag_map) + goto fail; + + tags->real_max_depth = depth; + tags->max_depth = depth; + tags->tag_index = tag_index; + tags->tag_map = tag_map; + + return 0; +fail: + kfree(tag_index); + return -ENOMEM; +} + +static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q, + int depth, int alloc_policy) +{ + struct blk_queue_tag *tags; + + tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC); + if (!tags) + goto fail; + + if (init_tag_map(q, tags, depth)) + goto fail; + + atomic_set(&tags->refcnt, 1); + tags->alloc_policy = alloc_policy; + tags->next_tag = 0; + return tags; +fail: + kfree(tags); + return NULL; +} + +/** + * blk_init_tags - initialize the tag info for an external tag map + * @depth: the maximum queue depth supported + * @alloc_policy: tag allocation policy + **/ +struct blk_queue_tag *blk_init_tags(int depth, int alloc_policy) +{ + return __blk_queue_init_tags(NULL, depth, alloc_policy); +} +EXPORT_SYMBOL(blk_init_tags); + +/** + * blk_queue_init_tags - initialize the queue tag info + * @q: the request queue for the device + * @depth: the maximum queue depth supported + * @tags: the tag to use + * @alloc_policy: tag allocation policy + * + * Queue lock must be held here if the function is called to resize an + * existing map. + **/ +int blk_queue_init_tags(struct request_queue *q, int depth, + struct blk_queue_tag *tags, int alloc_policy) +{ + int rc; + + BUG_ON(tags && q->queue_tags && tags != q->queue_tags); + + if (!tags && !q->queue_tags) { + tags = __blk_queue_init_tags(q, depth, alloc_policy); + + if (!tags) + return -ENOMEM; + + } else if (q->queue_tags) { + rc = blk_queue_resize_tags(q, depth); + if (rc) + return rc; + queue_flag_set(QUEUE_FLAG_QUEUED, q); + return 0; + } else + atomic_inc(&tags->refcnt); + + /* + * assign it, all done + */ + q->queue_tags = tags; + queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q); + INIT_LIST_HEAD(&q->tag_busy_list); + return 0; +} +EXPORT_SYMBOL(blk_queue_init_tags); + +/** + * blk_queue_resize_tags - change the queueing depth + * @q: the request queue for the device + * @new_depth: the new max command queueing depth + * + * Notes: + * Must be called with the queue lock held. + **/ +int blk_queue_resize_tags(struct request_queue *q, int new_depth) +{ + struct blk_queue_tag *bqt = q->queue_tags; + struct request **tag_index; + unsigned long *tag_map; + int max_depth, nr_ulongs; + + if (!bqt) + return -ENXIO; + + /* + * if we already have large enough real_max_depth. just + * adjust max_depth. *NOTE* as requests with tag value + * between new_depth and real_max_depth can be in-flight, tag + * map can not be shrunk blindly here. + */ + if (new_depth <= bqt->real_max_depth) { + bqt->max_depth = new_depth; + return 0; + } + + /* + * Currently cannot replace a shared tag map with a new + * one, so error out if this is the case + */ + if (atomic_read(&bqt->refcnt) != 1) + return -EBUSY; + + /* + * save the old state info, so we can copy it back + */ + tag_index = bqt->tag_index; + tag_map = bqt->tag_map; + max_depth = bqt->real_max_depth; + + if (init_tag_map(q, bqt, new_depth)) + return -ENOMEM; + + memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *)); + nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG; + memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long)); + + kfree(tag_index); + kfree(tag_map); + return 0; +} +EXPORT_SYMBOL(blk_queue_resize_tags); + +/** + * blk_queue_end_tag - end tag operations for a request + * @q: the request queue for the device + * @rq: the request that has completed + * + * Description: + * Typically called when end_that_request_first() returns %0, meaning + * all transfers have been done for a request. It's important to call + * this function before end_that_request_last(), as that will put the + * request back on the free list thus corrupting the internal tag list. + * + * Notes: + * queue lock must be held. + **/ +void blk_queue_end_tag(struct request_queue *q, struct request *rq) +{ + struct blk_queue_tag *bqt = q->queue_tags; + unsigned tag = rq->tag; /* negative tags invalid */ + + BUG_ON(tag >= bqt->real_max_depth); + + list_del_init(&rq->queuelist); + rq->cmd_flags &= ~REQ_QUEUED; + rq->tag = -1; + + if (unlikely(bqt->tag_index[tag] == NULL)) + printk(KERN_ERR "%s: tag %d is missing\n", + __func__, tag); + + bqt->tag_index[tag] = NULL; + + if (unlikely(!test_bit(tag, bqt->tag_map))) { + printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n", + __func__, tag); + return; + } + /* + * The tag_map bit acts as a lock for tag_index[bit], so we need + * unlock memory barrier semantics. + */ + clear_bit_unlock(tag, bqt->tag_map); +} +EXPORT_SYMBOL(blk_queue_end_tag); + +/** + * blk_queue_start_tag - find a free tag and assign it + * @q: the request queue for the device + * @rq: the block request that needs tagging + * + * Description: + * This can either be used as a stand-alone helper, or possibly be + * assigned as the queue &prep_rq_fn (in which case &struct request + * automagically gets a tag assigned). Note that this function + * assumes that any type of request can be queued! if this is not + * true for your device, you must check the request type before + * calling this function. The request will also be removed from + * the request queue, so it's the drivers responsibility to readd + * it if it should need to be restarted for some reason. + * + * Notes: + * queue lock must be held. + **/ +int blk_queue_start_tag(struct request_queue *q, struct request *rq) +{ + struct blk_queue_tag *bqt = q->queue_tags; + unsigned max_depth; + int tag; + + if (unlikely((rq->cmd_flags & REQ_QUEUED))) { + printk(KERN_ERR + "%s: request %p for device [%s] already tagged %d", + __func__, rq, + rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag); + BUG(); + } + + /* + * Protect against shared tag maps, as we may not have exclusive + * access to the tag map. + * + * We reserve a few tags just for sync IO, since we don't want + * to starve sync IO on behalf of flooding async IO. + */ + max_depth = bqt->max_depth; + if (!rq_is_sync(rq) && max_depth > 1) { + switch (max_depth) { + case 2: + max_depth = 1; + break; + case 3: + max_depth = 2; + break; + default: + max_depth -= 2; + } + if (q->in_flight[BLK_RW_ASYNC] > max_depth) + return 1; + } + + do { + if (bqt->alloc_policy == BLK_TAG_ALLOC_FIFO) { + tag = find_first_zero_bit(bqt->tag_map, max_depth); + if (tag >= max_depth) + return 1; + } else { + int start = bqt->next_tag; + int size = min_t(int, bqt->max_depth, max_depth + start); + tag = find_next_zero_bit(bqt->tag_map, size, start); + if (tag >= size && start + size > bqt->max_depth) { + size = start + size - bqt->max_depth; + tag = find_first_zero_bit(bqt->tag_map, size); + } + if (tag >= size) + return 1; + } + + } while (test_and_set_bit_lock(tag, bqt->tag_map)); + /* + * We need lock ordering semantics given by test_and_set_bit_lock. + * See blk_queue_end_tag for details. + */ + + bqt->next_tag = (tag + 1) % bqt->max_depth; + rq->cmd_flags |= REQ_QUEUED; + rq->tag = tag; + bqt->tag_index[tag] = rq; + blk_start_request(rq); + list_add(&rq->queuelist, &q->tag_busy_list); + return 0; +} +EXPORT_SYMBOL(blk_queue_start_tag); + +/** + * blk_queue_invalidate_tags - invalidate all pending tags + * @q: the request queue for the device + * + * Description: + * Hardware conditions may dictate a need to stop all pending requests. + * In this case, we will safely clear the block side of the tag queue and + * readd all requests to the request queue in the right order. + * + * Notes: + * queue lock must be held. + **/ +void blk_queue_invalidate_tags(struct request_queue *q) +{ + struct list_head *tmp, *n; + + list_for_each_safe(tmp, n, &q->tag_busy_list) + blk_requeue_request(q, list_entry_rq(tmp)); +} +EXPORT_SYMBOL(blk_queue_invalidate_tags); diff --git a/block/blk-throttle.c b/block/blk-throttle.c new file mode 100644 index 000000000..5b9c6d5c3 --- /dev/null +++ b/block/blk-throttle.c @@ -0,0 +1,1699 @@ +/* + * Interface for controlling IO bandwidth on a request queue + * + * Copyright (C) 2010 Vivek Goyal + */ + +#include +#include +#include +#include +#include +#include "blk-cgroup.h" +#include "blk.h" + +/* Max dispatch from a group in 1 round */ +static int throtl_grp_quantum = 8; + +/* Total max dispatch from all groups in one round */ +static int throtl_quantum = 32; + +/* Throttling is performed over 100ms slice and after that slice is renewed */ +static unsigned long throtl_slice = HZ/10; /* 100 ms */ + +static struct blkcg_policy blkcg_policy_throtl; + +/* A workqueue to queue throttle related work */ +static struct workqueue_struct *kthrotld_workqueue; + +/* + * To implement hierarchical throttling, throtl_grps form a tree and bios + * are dispatched upwards level by level until they reach the top and get + * issued. When dispatching bios from the children and local group at each + * level, if the bios are dispatched into a single bio_list, there's a risk + * of a local or child group which can queue many bios at once filling up + * the list starving others. + * + * To avoid such starvation, dispatched bios are queued separately + * according to where they came from. When they are again dispatched to + * the parent, they're popped in round-robin order so that no single source + * hogs the dispatch window. + * + * throtl_qnode is used to keep the queued bios separated by their sources. + * Bios are queued to throtl_qnode which in turn is queued to + * throtl_service_queue and then dispatched in round-robin order. + * + * It's also used to track the reference counts on blkg's. A qnode always + * belongs to a throtl_grp and gets queued on itself or the parent, so + * incrementing the reference of the associated throtl_grp when a qnode is + * queued and decrementing when dequeued is enough to keep the whole blkg + * tree pinned while bios are in flight. + */ +struct throtl_qnode { + struct list_head node; /* service_queue->queued[] */ + struct bio_list bios; /* queued bios */ + struct throtl_grp *tg; /* tg this qnode belongs to */ +}; + +struct throtl_service_queue { + struct throtl_service_queue *parent_sq; /* the parent service_queue */ + + /* + * Bios queued directly to this service_queue or dispatched from + * children throtl_grp's. + */ + struct list_head queued[2]; /* throtl_qnode [READ/WRITE] */ + unsigned int nr_queued[2]; /* number of queued bios */ + + /* + * RB tree of active children throtl_grp's, which are sorted by + * their ->disptime. + */ + struct rb_root pending_tree; /* RB tree of active tgs */ + struct rb_node *first_pending; /* first node in the tree */ + unsigned int nr_pending; /* # queued in the tree */ + unsigned long first_pending_disptime; /* disptime of the first tg */ + struct timer_list pending_timer; /* fires on first_pending_disptime */ +}; + +enum tg_state_flags { + THROTL_TG_PENDING = 1 << 0, /* on parent's pending tree */ + THROTL_TG_WAS_EMPTY = 1 << 1, /* bio_lists[] became non-empty */ +}; + +#define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node) + +/* Per-cpu group stats */ +struct tg_stats_cpu { + /* total bytes transferred */ + struct blkg_rwstat service_bytes; + /* total IOs serviced, post merge */ + struct blkg_rwstat serviced; +}; + +struct throtl_grp { + /* must be the first member */ + struct blkg_policy_data pd; + + /* active throtl group service_queue member */ + struct rb_node rb_node; + + /* throtl_data this group belongs to */ + struct throtl_data *td; + + /* this group's service queue */ + struct throtl_service_queue service_queue; + + /* + * qnode_on_self is used when bios are directly queued to this + * throtl_grp so that local bios compete fairly with bios + * dispatched from children. qnode_on_parent is used when bios are + * dispatched from this throtl_grp into its parent and will compete + * with the sibling qnode_on_parents and the parent's + * qnode_on_self. + */ + struct throtl_qnode qnode_on_self[2]; + struct throtl_qnode qnode_on_parent[2]; + + /* + * Dispatch time in jiffies. This is the estimated time when group + * will unthrottle and is ready to dispatch more bio. It is used as + * key to sort active groups in service tree. + */ + unsigned long disptime; + + unsigned int flags; + + /* are there any throtl rules between this group and td? */ + bool has_rules[2]; + + /* bytes per second rate limits */ + uint64_t bps[2]; + + /* IOPS limits */ + unsigned int iops[2]; + + /* Number of bytes disptached in current slice */ + uint64_t bytes_disp[2]; + /* Number of bio's dispatched in current slice */ + unsigned int io_disp[2]; + + /* When did we start a new slice */ + unsigned long slice_start[2]; + unsigned long slice_end[2]; + + /* Per cpu stats pointer */ + struct tg_stats_cpu __percpu *stats_cpu; + + /* List of tgs waiting for per cpu stats memory to be allocated */ + struct list_head stats_alloc_node; +}; + +struct throtl_data +{ + /* service tree for active throtl groups */ + struct throtl_service_queue service_queue; + + struct request_queue *queue; + + /* Total Number of queued bios on READ and WRITE lists */ + unsigned int nr_queued[2]; + + /* + * number of total undestroyed groups + */ + unsigned int nr_undestroyed_grps; + + /* Work for dispatching throttled bios */ + struct work_struct dispatch_work; +}; + +/* list and work item to allocate percpu group stats */ +static DEFINE_SPINLOCK(tg_stats_alloc_lock); +static LIST_HEAD(tg_stats_alloc_list); + +static void tg_stats_alloc_fn(struct work_struct *); +static DECLARE_DELAYED_WORK(tg_stats_alloc_work, tg_stats_alloc_fn); + +static void throtl_pending_timer_fn(unsigned long arg); + +static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd) +{ + return pd ? container_of(pd, struct throtl_grp, pd) : NULL; +} + +static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg) +{ + return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl)); +} + +static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg) +{ + return pd_to_blkg(&tg->pd); +} + +static inline struct throtl_grp *td_root_tg(struct throtl_data *td) +{ + return blkg_to_tg(td->queue->root_blkg); +} + +/** + * sq_to_tg - return the throl_grp the specified service queue belongs to + * @sq: the throtl_service_queue of interest + * + * Return the throtl_grp @sq belongs to. If @sq is the top-level one + * embedded in throtl_data, %NULL is returned. + */ +static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq) +{ + if (sq && sq->parent_sq) + return container_of(sq, struct throtl_grp, service_queue); + else + return NULL; +} + +/** + * sq_to_td - return throtl_data the specified service queue belongs to + * @sq: the throtl_service_queue of interest + * + * A service_queue can be embeded in either a throtl_grp or throtl_data. + * Determine the associated throtl_data accordingly and return it. + */ +static struct throtl_data *sq_to_td(struct throtl_service_queue *sq) +{ + struct throtl_grp *tg = sq_to_tg(sq); + + if (tg) + return tg->td; + else + return container_of(sq, struct throtl_data, service_queue); +} + +/** + * throtl_log - log debug message via blktrace + * @sq: the service_queue being reported + * @fmt: printf format string + * @args: printf args + * + * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a + * throtl_grp; otherwise, just "throtl". + * + * TODO: this should be made a function and name formatting should happen + * after testing whether blktrace is enabled. + */ +#define throtl_log(sq, fmt, args...) do { \ + struct throtl_grp *__tg = sq_to_tg((sq)); \ + struct throtl_data *__td = sq_to_td((sq)); \ + \ + (void)__td; \ + if ((__tg)) { \ + char __pbuf[128]; \ + \ + blkg_path(tg_to_blkg(__tg), __pbuf, sizeof(__pbuf)); \ + blk_add_trace_msg(__td->queue, "throtl %s " fmt, __pbuf, ##args); \ + } else { \ + blk_add_trace_msg(__td->queue, "throtl " fmt, ##args); \ + } \ +} while (0) + +static void tg_stats_init(struct tg_stats_cpu *tg_stats) +{ + blkg_rwstat_init(&tg_stats->service_bytes); + blkg_rwstat_init(&tg_stats->serviced); +} + +/* + * Worker for allocating per cpu stat for tgs. This is scheduled on the + * system_wq once there are some groups on the alloc_list waiting for + * allocation. + */ +static void tg_stats_alloc_fn(struct work_struct *work) +{ + static struct tg_stats_cpu *stats_cpu; /* this fn is non-reentrant */ + struct delayed_work *dwork = to_delayed_work(work); + bool empty = false; + +alloc_stats: + if (!stats_cpu) { + int cpu; + + stats_cpu = alloc_percpu(struct tg_stats_cpu); + if (!stats_cpu) { + /* allocation failed, try again after some time */ + schedule_delayed_work(dwork, msecs_to_jiffies(10)); + return; + } + for_each_possible_cpu(cpu) + tg_stats_init(per_cpu_ptr(stats_cpu, cpu)); + } + + spin_lock_irq(&tg_stats_alloc_lock); + + if (!list_empty(&tg_stats_alloc_list)) { + struct throtl_grp *tg = list_first_entry(&tg_stats_alloc_list, + struct throtl_grp, + stats_alloc_node); + swap(tg->stats_cpu, stats_cpu); + list_del_init(&tg->stats_alloc_node); + } + + empty = list_empty(&tg_stats_alloc_list); + spin_unlock_irq(&tg_stats_alloc_lock); + if (!empty) + goto alloc_stats; +} + +static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg) +{ + INIT_LIST_HEAD(&qn->node); + bio_list_init(&qn->bios); + qn->tg = tg; +} + +/** + * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it + * @bio: bio being added + * @qn: qnode to add bio to + * @queued: the service_queue->queued[] list @qn belongs to + * + * Add @bio to @qn and put @qn on @queued if it's not already on. + * @qn->tg's reference count is bumped when @qn is activated. See the + * comment on top of throtl_qnode definition for details. + */ +static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn, + struct list_head *queued) +{ + bio_list_add(&qn->bios, bio); + if (list_empty(&qn->node)) { + list_add_tail(&qn->node, queued); + blkg_get(tg_to_blkg(qn->tg)); + } +} + +/** + * throtl_peek_queued - peek the first bio on a qnode list + * @queued: the qnode list to peek + */ +static struct bio *throtl_peek_queued(struct list_head *queued) +{ + struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node); + struct bio *bio; + + if (list_empty(queued)) + return NULL; + + bio = bio_list_peek(&qn->bios); + WARN_ON_ONCE(!bio); + return bio; +} + +/** + * throtl_pop_queued - pop the first bio form a qnode list + * @queued: the qnode list to pop a bio from + * @tg_to_put: optional out argument for throtl_grp to put + * + * Pop the first bio from the qnode list @queued. After popping, the first + * qnode is removed from @queued if empty or moved to the end of @queued so + * that the popping order is round-robin. + * + * When the first qnode is removed, its associated throtl_grp should be put + * too. If @tg_to_put is NULL, this function automatically puts it; + * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is + * responsible for putting it. + */ +static struct bio *throtl_pop_queued(struct list_head *queued, + struct throtl_grp **tg_to_put) +{ + struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node); + struct bio *bio; + + if (list_empty(queued)) + return NULL; + + bio = bio_list_pop(&qn->bios); + WARN_ON_ONCE(!bio); + + if (bio_list_empty(&qn->bios)) { + list_del_init(&qn->node); + if (tg_to_put) + *tg_to_put = qn->tg; + else + blkg_put(tg_to_blkg(qn->tg)); + } else { + list_move_tail(&qn->node, queued); + } + + return bio; +} + +/* init a service_queue, assumes the caller zeroed it */ +static void throtl_service_queue_init(struct throtl_service_queue *sq, + struct throtl_service_queue *parent_sq) +{ + INIT_LIST_HEAD(&sq->queued[0]); + INIT_LIST_HEAD(&sq->queued[1]); + sq->pending_tree = RB_ROOT; + sq->parent_sq = parent_sq; + setup_timer(&sq->pending_timer, throtl_pending_timer_fn, + (unsigned long)sq); +} + +static void throtl_service_queue_exit(struct throtl_service_queue *sq) +{ + del_timer_sync(&sq->pending_timer); +} + +static void throtl_pd_init(struct blkcg_gq *blkg) +{ + struct throtl_grp *tg = blkg_to_tg(blkg); + struct throtl_data *td = blkg->q->td; + struct throtl_service_queue *parent_sq; + unsigned long flags; + int rw; + + /* + * If on the default hierarchy, we switch to properly hierarchical + * behavior where limits on a given throtl_grp are applied to the + * whole subtree rather than just the group itself. e.g. If 16M + * read_bps limit is set on the root group, the whole system can't + * exceed 16M for the device. + * + * If not on the default hierarchy, the broken flat hierarchy + * behavior is retained where all throtl_grps are treated as if + * they're all separate root groups right below throtl_data. + * Limits of a group don't interact with limits of other groups + * regardless of the position of the group in the hierarchy. + */ + parent_sq = &td->service_queue; + + if (cgroup_on_dfl(blkg->blkcg->css.cgroup) && blkg->parent) + parent_sq = &blkg_to_tg(blkg->parent)->service_queue; + + throtl_service_queue_init(&tg->service_queue, parent_sq); + + for (rw = READ; rw <= WRITE; rw++) { + throtl_qnode_init(&tg->qnode_on_self[rw], tg); + throtl_qnode_init(&tg->qnode_on_parent[rw], tg); + } + + RB_CLEAR_NODE(&tg->rb_node); + tg->td = td; + + tg->bps[READ] = -1; + tg->bps[WRITE] = -1; + tg->iops[READ] = -1; + tg->iops[WRITE] = -1; + + /* + * Ugh... We need to perform per-cpu allocation for tg->stats_cpu + * but percpu allocator can't be called from IO path. Queue tg on + * tg_stats_alloc_list and allocate from work item. + */ + spin_lock_irqsave(&tg_stats_alloc_lock, flags); + list_add(&tg->stats_alloc_node, &tg_stats_alloc_list); + schedule_delayed_work(&tg_stats_alloc_work, 0); + spin_unlock_irqrestore(&tg_stats_alloc_lock, flags); +} + +/* + * Set has_rules[] if @tg or any of its parents have limits configured. + * This doesn't require walking up to the top of the hierarchy as the + * parent's has_rules[] is guaranteed to be correct. + */ +static void tg_update_has_rules(struct throtl_grp *tg) +{ + struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq); + int rw; + + for (rw = READ; rw <= WRITE; rw++) + tg->has_rules[rw] = (parent_tg && parent_tg->has_rules[rw]) || + (tg->bps[rw] != -1 || tg->iops[rw] != -1); +} + +static void throtl_pd_online(struct blkcg_gq *blkg) +{ + /* + * We don't want new groups to escape the limits of its ancestors. + * Update has_rules[] after a new group is brought online. + */ + tg_update_has_rules(blkg_to_tg(blkg)); +} + +static void throtl_pd_exit(struct blkcg_gq *blkg) +{ + struct throtl_grp *tg = blkg_to_tg(blkg); + unsigned long flags; + + spin_lock_irqsave(&tg_stats_alloc_lock, flags); + list_del_init(&tg->stats_alloc_node); + spin_unlock_irqrestore(&tg_stats_alloc_lock, flags); + + free_percpu(tg->stats_cpu); + + throtl_service_queue_exit(&tg->service_queue); +} + +static void throtl_pd_reset_stats(struct blkcg_gq *blkg) +{ + struct throtl_grp *tg = blkg_to_tg(blkg); + int cpu; + + if (tg->stats_cpu == NULL) + return; + + for_each_possible_cpu(cpu) { + struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu); + + blkg_rwstat_reset(&sc->service_bytes); + blkg_rwstat_reset(&sc->serviced); + } +} + +static struct throtl_grp *throtl_lookup_tg(struct throtl_data *td, + struct blkcg *blkcg) +{ + /* + * This is the common case when there are no blkcgs. Avoid lookup + * in this case + */ + if (blkcg == &blkcg_root) + return td_root_tg(td); + + return blkg_to_tg(blkg_lookup(blkcg, td->queue)); +} + +static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td, + struct blkcg *blkcg) +{ + struct request_queue *q = td->queue; + struct throtl_grp *tg = NULL; + + /* + * This is the common case when there are no blkcgs. Avoid lookup + * in this case + */ + if (blkcg == &blkcg_root) { + tg = td_root_tg(td); + } else { + struct blkcg_gq *blkg; + + blkg = blkg_lookup_create(blkcg, q); + + /* if %NULL and @q is alive, fall back to root_tg */ + if (!IS_ERR(blkg)) + tg = blkg_to_tg(blkg); + else if (!blk_queue_dying(q)) + tg = td_root_tg(td); + } + + return tg; +} + +static struct throtl_grp * +throtl_rb_first(struct throtl_service_queue *parent_sq) +{ + /* Service tree is empty */ + if (!parent_sq->nr_pending) + return NULL; + + if (!parent_sq->first_pending) + parent_sq->first_pending = rb_first(&parent_sq->pending_tree); + + if (parent_sq->first_pending) + return rb_entry_tg(parent_sq->first_pending); + + return NULL; +} + +static void rb_erase_init(struct rb_node *n, struct rb_root *root) +{ + rb_erase(n, root); + RB_CLEAR_NODE(n); +} + +static void throtl_rb_erase(struct rb_node *n, + struct throtl_service_queue *parent_sq) +{ + if (parent_sq->first_pending == n) + parent_sq->first_pending = NULL; + rb_erase_init(n, &parent_sq->pending_tree); + --parent_sq->nr_pending; +} + +static void update_min_dispatch_time(struct throtl_service_queue *parent_sq) +{ + struct throtl_grp *tg; + + tg = throtl_rb_first(parent_sq); + if (!tg) + return; + + parent_sq->first_pending_disptime = tg->disptime; +} + +static void tg_service_queue_add(struct throtl_grp *tg) +{ + struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq; + struct rb_node **node = &parent_sq->pending_tree.rb_node; + struct rb_node *parent = NULL; + struct throtl_grp *__tg; + unsigned long key = tg->disptime; + int left = 1; + + while (*node != NULL) { + parent = *node; + __tg = rb_entry_tg(parent); + + if (time_before(key, __tg->disptime)) + node = &parent->rb_left; + else { + node = &parent->rb_right; + left = 0; + } + } + + if (left) + parent_sq->first_pending = &tg->rb_node; + + rb_link_node(&tg->rb_node, parent, node); + rb_insert_color(&tg->rb_node, &parent_sq->pending_tree); +} + +static void __throtl_enqueue_tg(struct throtl_grp *tg) +{ + tg_service_queue_add(tg); + tg->flags |= THROTL_TG_PENDING; + tg->service_queue.parent_sq->nr_pending++; +} + +static void throtl_enqueue_tg(struct throtl_grp *tg) +{ + if (!(tg->flags & THROTL_TG_PENDING)) + __throtl_enqueue_tg(tg); +} + +static void __throtl_dequeue_tg(struct throtl_grp *tg) +{ + throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq); + tg->flags &= ~THROTL_TG_PENDING; +} + +static void throtl_dequeue_tg(struct throtl_grp *tg) +{ + if (tg->flags & THROTL_TG_PENDING) + __throtl_dequeue_tg(tg); +} + +/* Call with queue lock held */ +static void throtl_schedule_pending_timer(struct throtl_service_queue *sq, + unsigned long expires) +{ + mod_timer(&sq->pending_timer, expires); + throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu", + expires - jiffies, jiffies); +} + +/** + * throtl_schedule_next_dispatch - schedule the next dispatch cycle + * @sq: the service_queue to schedule dispatch for + * @force: force scheduling + * + * Arm @sq->pending_timer so that the next dispatch cycle starts on the + * dispatch time of the first pending child. Returns %true if either timer + * is armed or there's no pending child left. %false if the current + * dispatch window is still open and the caller should continue + * dispatching. + * + * If @force is %true, the dispatch timer is always scheduled and this + * function is guaranteed to return %true. This is to be used when the + * caller can't dispatch itself and needs to invoke pending_timer + * unconditionally. Note that forced scheduling is likely to induce short + * delay before dispatch starts even if @sq->first_pending_disptime is not + * in the future and thus shouldn't be used in hot paths. + */ +static bool throtl_schedule_next_dispatch(struct throtl_service_queue *sq, + bool force) +{ + /* any pending children left? */ + if (!sq->nr_pending) + return true; + + update_min_dispatch_time(sq); + + /* is the next dispatch time in the future? */ + if (force || time_after(sq->first_pending_disptime, jiffies)) { + throtl_schedule_pending_timer(sq, sq->first_pending_disptime); + return true; + } + + /* tell the caller to continue dispatching */ + return false; +} + +static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg, + bool rw, unsigned long start) +{ + tg->bytes_disp[rw] = 0; + tg->io_disp[rw] = 0; + + /* + * Previous slice has expired. We must have trimmed it after last + * bio dispatch. That means since start of last slice, we never used + * that bandwidth. Do try to make use of that bandwidth while giving + * credit. + */ + if (time_after_eq(start, tg->slice_start[rw])) + tg->slice_start[rw] = start; + + tg->slice_end[rw] = jiffies + throtl_slice; + throtl_log(&tg->service_queue, + "[%c] new slice with credit start=%lu end=%lu jiffies=%lu", + rw == READ ? 'R' : 'W', tg->slice_start[rw], + tg->slice_end[rw], jiffies); +} + +static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw) +{ + tg->bytes_disp[rw] = 0; + tg->io_disp[rw] = 0; + tg->slice_start[rw] = jiffies; + tg->slice_end[rw] = jiffies + throtl_slice; + throtl_log(&tg->service_queue, + "[%c] new slice start=%lu end=%lu jiffies=%lu", + rw == READ ? 'R' : 'W', tg->slice_start[rw], + tg->slice_end[rw], jiffies); +} + +static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw, + unsigned long jiffy_end) +{ + tg->slice_end[rw] = roundup(jiffy_end, throtl_slice); +} + +static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw, + unsigned long jiffy_end) +{ + tg->slice_end[rw] = roundup(jiffy_end, throtl_slice); + throtl_log(&tg->service_queue, + "[%c] extend slice start=%lu end=%lu jiffies=%lu", + rw == READ ? 'R' : 'W', tg->slice_start[rw], + tg->slice_end[rw], jiffies); +} + +/* Determine if previously allocated or extended slice is complete or not */ +static bool throtl_slice_used(struct throtl_grp *tg, bool rw) +{ + if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw])) + return false; + + return 1; +} + +/* Trim the used slices and adjust slice start accordingly */ +static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw) +{ + unsigned long nr_slices, time_elapsed, io_trim; + u64 bytes_trim, tmp; + + BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw])); + + /* + * If bps are unlimited (-1), then time slice don't get + * renewed. Don't try to trim the slice if slice is used. A new + * slice will start when appropriate. + */ + if (throtl_slice_used(tg, rw)) + return; + + /* + * A bio has been dispatched. Also adjust slice_end. It might happen + * that initially cgroup limit was very low resulting in high + * slice_end, but later limit was bumped up and bio was dispached + * sooner, then we need to reduce slice_end. A high bogus slice_end + * is bad because it does not allow new slice to start. + */ + + throtl_set_slice_end(tg, rw, jiffies + throtl_slice); + + time_elapsed = jiffies - tg->slice_start[rw]; + + nr_slices = time_elapsed / throtl_slice; + + if (!nr_slices) + return; + tmp = tg->bps[rw] * throtl_slice * nr_slices; + do_div(tmp, HZ); + bytes_trim = tmp; + + io_trim = (tg->iops[rw] * throtl_slice * nr_slices)/HZ; + + if (!bytes_trim && !io_trim) + return; + + if (tg->bytes_disp[rw] >= bytes_trim) + tg->bytes_disp[rw] -= bytes_trim; + else + tg->bytes_disp[rw] = 0; + + if (tg->io_disp[rw] >= io_trim) + tg->io_disp[rw] -= io_trim; + else + tg->io_disp[rw] = 0; + + tg->slice_start[rw] += nr_slices * throtl_slice; + + throtl_log(&tg->service_queue, + "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu", + rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim, + tg->slice_start[rw], tg->slice_end[rw], jiffies); +} + +static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio, + unsigned long *wait) +{ + bool rw = bio_data_dir(bio); + unsigned int io_allowed; + unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd; + u64 tmp; + + jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw]; + + /* Slice has just started. Consider one slice interval */ + if (!jiffy_elapsed) + jiffy_elapsed_rnd = throtl_slice; + + jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice); + + /* + * jiffy_elapsed_rnd should not be a big value as minimum iops can be + * 1 then at max jiffy elapsed should be equivalent of 1 second as we + * will allow dispatch after 1 second and after that slice should + * have been trimmed. + */ + + tmp = (u64)tg->iops[rw] * jiffy_elapsed_rnd; + do_div(tmp, HZ); + + if (tmp > UINT_MAX) + io_allowed = UINT_MAX; + else + io_allowed = tmp; + + if (tg->io_disp[rw] + 1 <= io_allowed) { + if (wait) + *wait = 0; + return true; + } + + /* Calc approx time to dispatch */ + jiffy_wait = ((tg->io_disp[rw] + 1) * HZ)/tg->iops[rw] + 1; + + if (jiffy_wait > jiffy_elapsed) + jiffy_wait = jiffy_wait - jiffy_elapsed; + else + jiffy_wait = 1; + + if (wait) + *wait = jiffy_wait; + return 0; +} + +static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio, + unsigned long *wait) +{ + bool rw = bio_data_dir(bio); + u64 bytes_allowed, extra_bytes, tmp; + unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd; + + jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw]; + + /* Slice has just started. Consider one slice interval */ + if (!jiffy_elapsed) + jiffy_elapsed_rnd = throtl_slice; + + jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice); + + tmp = tg->bps[rw] * jiffy_elapsed_rnd; + do_div(tmp, HZ); + bytes_allowed = tmp; + + if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) { + if (wait) + *wait = 0; + return true; + } + + /* Calc approx time to dispatch */ + extra_bytes = tg->bytes_disp[rw] + bio->bi_iter.bi_size - bytes_allowed; + jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]); + + if (!jiffy_wait) + jiffy_wait = 1; + + /* + * This wait time is without taking into consideration the rounding + * up we did. Add that time also. + */ + jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed); + if (wait) + *wait = jiffy_wait; + return 0; +} + +/* + * Returns whether one can dispatch a bio or not. Also returns approx number + * of jiffies to wait before this bio is with-in IO rate and can be dispatched + */ +static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio, + unsigned long *wait) +{ + bool rw = bio_data_dir(bio); + unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0; + + /* + * Currently whole state machine of group depends on first bio + * queued in the group bio list. So one should not be calling + * this function with a different bio if there are other bios + * queued. + */ + BUG_ON(tg->service_queue.nr_queued[rw] && + bio != throtl_peek_queued(&tg->service_queue.queued[rw])); + + /* If tg->bps = -1, then BW is unlimited */ + if (tg->bps[rw] == -1 && tg->iops[rw] == -1) { + if (wait) + *wait = 0; + return true; + } + + /* + * If previous slice expired, start a new one otherwise renew/extend + * existing slice to make sure it is at least throtl_slice interval + * long since now. + */ + if (throtl_slice_used(tg, rw)) + throtl_start_new_slice(tg, rw); + else { + if (time_before(tg->slice_end[rw], jiffies + throtl_slice)) + throtl_extend_slice(tg, rw, jiffies + throtl_slice); + } + + if (tg_with_in_bps_limit(tg, bio, &bps_wait) && + tg_with_in_iops_limit(tg, bio, &iops_wait)) { + if (wait) + *wait = 0; + return 1; + } + + max_wait = max(bps_wait, iops_wait); + + if (wait) + *wait = max_wait; + + if (time_before(tg->slice_end[rw], jiffies + max_wait)) + throtl_extend_slice(tg, rw, jiffies + max_wait); + + return 0; +} + +static void throtl_update_dispatch_stats(struct blkcg_gq *blkg, u64 bytes, + int rw) +{ + struct throtl_grp *tg = blkg_to_tg(blkg); + struct tg_stats_cpu *stats_cpu; + unsigned long flags; + + /* If per cpu stats are not allocated yet, don't do any accounting. */ + if (tg->stats_cpu == NULL) + return; + + /* + * Disabling interrupts to provide mutual exclusion between two + * writes on same cpu. It probably is not needed for 64bit. Not + * optimizing that case yet. + */ + local_irq_save(flags); + + stats_cpu = this_cpu_ptr(tg->stats_cpu); + + blkg_rwstat_add(&stats_cpu->serviced, rw, 1); + blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes); + + local_irq_restore(flags); +} + +static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio) +{ + bool rw = bio_data_dir(bio); + + /* Charge the bio to the group */ + tg->bytes_disp[rw] += bio->bi_iter.bi_size; + tg->io_disp[rw]++; + + /* + * REQ_THROTTLED is used to prevent the same bio to be throttled + * more than once as a throttled bio will go through blk-throtl the + * second time when it eventually gets issued. Set it when a bio + * is being charged to a tg. + * + * Dispatch stats aren't recursive and each @bio should only be + * accounted by the @tg it was originally associated with. Let's + * update the stats when setting REQ_THROTTLED for the first time + * which is guaranteed to be for the @bio's original tg. + */ + if (!(bio->bi_rw & REQ_THROTTLED)) { + bio->bi_rw |= REQ_THROTTLED; + throtl_update_dispatch_stats(tg_to_blkg(tg), + bio->bi_iter.bi_size, bio->bi_rw); + } +} + +/** + * throtl_add_bio_tg - add a bio to the specified throtl_grp + * @bio: bio to add + * @qn: qnode to use + * @tg: the target throtl_grp + * + * Add @bio to @tg's service_queue using @qn. If @qn is not specified, + * tg->qnode_on_self[] is used. + */ +static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn, + struct throtl_grp *tg) +{ + struct throtl_service_queue *sq = &tg->service_queue; + bool rw = bio_data_dir(bio); + + if (!qn) + qn = &tg->qnode_on_self[rw]; + + /* + * If @tg doesn't currently have any bios queued in the same + * direction, queueing @bio can change when @tg should be + * dispatched. Mark that @tg was empty. This is automatically + * cleaered on the next tg_update_disptime(). + */ + if (!sq->nr_queued[rw]) + tg->flags |= THROTL_TG_WAS_EMPTY; + + throtl_qnode_add_bio(bio, qn, &sq->queued[rw]); + + sq->nr_queued[rw]++; + throtl_enqueue_tg(tg); +} + +static void tg_update_disptime(struct throtl_grp *tg) +{ + struct throtl_service_queue *sq = &tg->service_queue; + unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime; + struct bio *bio; + + if ((bio = throtl_peek_queued(&sq->queued[READ]))) + tg_may_dispatch(tg, bio, &read_wait); + + if ((bio = throtl_peek_queued(&sq->queued[WRITE]))) + tg_may_dispatch(tg, bio, &write_wait); + + min_wait = min(read_wait, write_wait); + disptime = jiffies + min_wait; + + /* Update dispatch time */ + throtl_dequeue_tg(tg); + tg->disptime = disptime; + throtl_enqueue_tg(tg); + + /* see throtl_add_bio_tg() */ + tg->flags &= ~THROTL_TG_WAS_EMPTY; +} + +static void start_parent_slice_with_credit(struct throtl_grp *child_tg, + struct throtl_grp *parent_tg, bool rw) +{ + if (throtl_slice_used(parent_tg, rw)) { + throtl_start_new_slice_with_credit(parent_tg, rw, + child_tg->slice_start[rw]); + } + +} + +static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw) +{ + struct throtl_service_queue *sq = &tg->service_queue; + struct throtl_service_queue *parent_sq = sq->parent_sq; + struct throtl_grp *parent_tg = sq_to_tg(parent_sq); + struct throtl_grp *tg_to_put = NULL; + struct bio *bio; + + /* + * @bio is being transferred from @tg to @parent_sq. Popping a bio + * from @tg may put its reference and @parent_sq might end up + * getting released prematurely. Remember the tg to put and put it + * after @bio is transferred to @parent_sq. + */ + bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put); + sq->nr_queued[rw]--; + + throtl_charge_bio(tg, bio); + + /* + * If our parent is another tg, we just need to transfer @bio to + * the parent using throtl_add_bio_tg(). If our parent is + * @td->service_queue, @bio is ready to be issued. Put it on its + * bio_lists[] and decrease total number queued. The caller is + * responsible for issuing these bios. + */ + if (parent_tg) { + throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg); + start_parent_slice_with_credit(tg, parent_tg, rw); + } else { + throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw], + &parent_sq->queued[rw]); + BUG_ON(tg->td->nr_queued[rw] <= 0); + tg->td->nr_queued[rw]--; + } + + throtl_trim_slice(tg, rw); + + if (tg_to_put) + blkg_put(tg_to_blkg(tg_to_put)); +} + +static int throtl_dispatch_tg(struct throtl_grp *tg) +{ + struct throtl_service_queue *sq = &tg->service_queue; + unsigned int nr_reads = 0, nr_writes = 0; + unsigned int max_nr_reads = throtl_grp_quantum*3/4; + unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads; + struct bio *bio; + + /* Try to dispatch 75% READS and 25% WRITES */ + + while ((bio = throtl_peek_queued(&sq->queued[READ])) && + tg_may_dispatch(tg, bio, NULL)) { + + tg_dispatch_one_bio(tg, bio_data_dir(bio)); + nr_reads++; + + if (nr_reads >= max_nr_reads) + break; + } + + while ((bio = throtl_peek_queued(&sq->queued[WRITE])) && + tg_may_dispatch(tg, bio, NULL)) { + + tg_dispatch_one_bio(tg, bio_data_dir(bio)); + nr_writes++; + + if (nr_writes >= max_nr_writes) + break; + } + + return nr_reads + nr_writes; +} + +static int throtl_select_dispatch(struct throtl_service_queue *parent_sq) +{ + unsigned int nr_disp = 0; + + while (1) { + struct throtl_grp *tg = throtl_rb_first(parent_sq); + struct throtl_service_queue *sq = &tg->service_queue; + + if (!tg) + break; + + if (time_before(jiffies, tg->disptime)) + break; + + throtl_dequeue_tg(tg); + + nr_disp += throtl_dispatch_tg(tg); + + if (sq->nr_queued[0] || sq->nr_queued[1]) + tg_update_disptime(tg); + + if (nr_disp >= throtl_quantum) + break; + } + + return nr_disp; +} + +/** + * throtl_pending_timer_fn - timer function for service_queue->pending_timer + * @arg: the throtl_service_queue being serviced + * + * This timer is armed when a child throtl_grp with active bio's become + * pending and queued on the service_queue's pending_tree and expires when + * the first child throtl_grp should be dispatched. This function + * dispatches bio's from the children throtl_grps to the parent + * service_queue. + * + * If the parent's parent is another throtl_grp, dispatching is propagated + * by either arming its pending_timer or repeating dispatch directly. If + * the top-level service_tree is reached, throtl_data->dispatch_work is + * kicked so that the ready bio's are issued. + */ +static void throtl_pending_timer_fn(unsigned long arg) +{ + struct throtl_service_queue *sq = (void *)arg; + struct throtl_grp *tg = sq_to_tg(sq); + struct throtl_data *td = sq_to_td(sq); + struct request_queue *q = td->queue; + struct throtl_service_queue *parent_sq; + bool dispatched; + int ret; + + spin_lock_irq(q->queue_lock); +again: + parent_sq = sq->parent_sq; + dispatched = false; + + while (true) { + throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u", + sq->nr_queued[READ] + sq->nr_queued[WRITE], + sq->nr_queued[READ], sq->nr_queued[WRITE]); + + ret = throtl_select_dispatch(sq); + if (ret) { + throtl_log(sq, "bios disp=%u", ret); + dispatched = true; + } + + if (throtl_schedule_next_dispatch(sq, false)) + break; + + /* this dispatch windows is still open, relax and repeat */ + spin_unlock_irq(q->queue_lock); + cpu_relax(); + spin_lock_irq(q->queue_lock); + } + + if (!dispatched) + goto out_unlock; + + if (parent_sq) { + /* @parent_sq is another throl_grp, propagate dispatch */ + if (tg->flags & THROTL_TG_WAS_EMPTY) { + tg_update_disptime(tg); + if (!throtl_schedule_next_dispatch(parent_sq, false)) { + /* window is already open, repeat dispatching */ + sq = parent_sq; + tg = sq_to_tg(sq); + goto again; + } + } + } else { + /* reached the top-level, queue issueing */ + queue_work(kthrotld_workqueue, &td->dispatch_work); + } +out_unlock: + spin_unlock_irq(q->queue_lock); +} + +/** + * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work + * @work: work item being executed + * + * This function is queued for execution when bio's reach the bio_lists[] + * of throtl_data->service_queue. Those bio's are ready and issued by this + * function. + */ +static void blk_throtl_dispatch_work_fn(struct work_struct *work) +{ + struct throtl_data *td = container_of(work, struct throtl_data, + dispatch_work); + struct throtl_service_queue *td_sq = &td->service_queue; + struct request_queue *q = td->queue; + struct bio_list bio_list_on_stack; + struct bio *bio; + struct blk_plug plug; + int rw; + + bio_list_init(&bio_list_on_stack); + + spin_lock_irq(q->queue_lock); + for (rw = READ; rw <= WRITE; rw++) + while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL))) + bio_list_add(&bio_list_on_stack, bio); + spin_unlock_irq(q->queue_lock); + + if (!bio_list_empty(&bio_list_on_stack)) { + blk_start_plug(&plug); + while((bio = bio_list_pop(&bio_list_on_stack))) + generic_make_request(bio); + blk_finish_plug(&plug); + } +} + +static u64 tg_prfill_cpu_rwstat(struct seq_file *sf, + struct blkg_policy_data *pd, int off) +{ + struct throtl_grp *tg = pd_to_tg(pd); + struct blkg_rwstat rwstat = { }, tmp; + int i, cpu; + + if (tg->stats_cpu == NULL) + return 0; + + for_each_possible_cpu(cpu) { + struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu); + + tmp = blkg_rwstat_read((void *)sc + off); + for (i = 0; i < BLKG_RWSTAT_NR; i++) + rwstat.cnt[i] += tmp.cnt[i]; + } + + return __blkg_prfill_rwstat(sf, pd, &rwstat); +} + +static int tg_print_cpu_rwstat(struct seq_file *sf, void *v) +{ + blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_cpu_rwstat, + &blkcg_policy_throtl, seq_cft(sf)->private, true); + return 0; +} + +static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd, + int off) +{ + struct throtl_grp *tg = pd_to_tg(pd); + u64 v = *(u64 *)((void *)tg + off); + + if (v == -1) + return 0; + return __blkg_prfill_u64(sf, pd, v); +} + +static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd, + int off) +{ + struct throtl_grp *tg = pd_to_tg(pd); + unsigned int v = *(unsigned int *)((void *)tg + off); + + if (v == -1) + return 0; + return __blkg_prfill_u64(sf, pd, v); +} + +static int tg_print_conf_u64(struct seq_file *sf, void *v) +{ + blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_u64, + &blkcg_policy_throtl, seq_cft(sf)->private, false); + return 0; +} + +static int tg_print_conf_uint(struct seq_file *sf, void *v) +{ + blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_uint, + &blkcg_policy_throtl, seq_cft(sf)->private, false); + return 0; +} + +static ssize_t tg_set_conf(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off, bool is_u64) +{ + struct blkcg *blkcg = css_to_blkcg(of_css(of)); + struct blkg_conf_ctx ctx; + struct throtl_grp *tg; + struct throtl_service_queue *sq; + struct blkcg_gq *blkg; + struct cgroup_subsys_state *pos_css; + int ret; + + ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx); + if (ret) + return ret; + + tg = blkg_to_tg(ctx.blkg); + sq = &tg->service_queue; + + if (!ctx.v) + ctx.v = -1; + + if (is_u64) + *(u64 *)((void *)tg + of_cft(of)->private) = ctx.v; + else + *(unsigned int *)((void *)tg + of_cft(of)->private) = ctx.v; + + throtl_log(&tg->service_queue, + "limit change rbps=%llu wbps=%llu riops=%u wiops=%u", + tg->bps[READ], tg->bps[WRITE], + tg->iops[READ], tg->iops[WRITE]); + + /* + * Update has_rules[] flags for the updated tg's subtree. A tg is + * considered to have rules if either the tg itself or any of its + * ancestors has rules. This identifies groups without any + * restrictions in the whole hierarchy and allows them to bypass + * blk-throttle. + */ + blkg_for_each_descendant_pre(blkg, pos_css, ctx.blkg) + tg_update_has_rules(blkg_to_tg(blkg)); + + /* + * We're already holding queue_lock and know @tg is valid. Let's + * apply the new config directly. + * + * Restart the slices for both READ and WRITES. It might happen + * that a group's limit are dropped suddenly and we don't want to + * account recently dispatched IO with new low rate. + */ + throtl_start_new_slice(tg, 0); + throtl_start_new_slice(tg, 1); + + if (tg->flags & THROTL_TG_PENDING) { + tg_update_disptime(tg); + throtl_schedule_next_dispatch(sq->parent_sq, true); + } + + blkg_conf_finish(&ctx); + return nbytes; +} + +static ssize_t tg_set_conf_u64(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off) +{ + return tg_set_conf(of, buf, nbytes, off, true); +} + +static ssize_t tg_set_conf_uint(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off) +{ + return tg_set_conf(of, buf, nbytes, off, false); +} + +static struct cftype throtl_files[] = { + { + .name = "throttle.read_bps_device", + .private = offsetof(struct throtl_grp, bps[READ]), + .seq_show = tg_print_conf_u64, + .write = tg_set_conf_u64, + }, + { + .name = "throttle.write_bps_device", + .private = offsetof(struct throtl_grp, bps[WRITE]), + .seq_show = tg_print_conf_u64, + .write = tg_set_conf_u64, + }, + { + .name = "throttle.read_iops_device", + .private = offsetof(struct throtl_grp, iops[READ]), + .seq_show = tg_print_conf_uint, + .write = tg_set_conf_uint, + }, + { + .name = "throttle.write_iops_device", + .private = offsetof(struct throtl_grp, iops[WRITE]), + .seq_show = tg_print_conf_uint, + .write = tg_set_conf_uint, + }, + { + .name = "throttle.io_service_bytes", + .private = offsetof(struct tg_stats_cpu, service_bytes), + .seq_show = tg_print_cpu_rwstat, + }, + { + .name = "throttle.io_serviced", + .private = offsetof(struct tg_stats_cpu, serviced), + .seq_show = tg_print_cpu_rwstat, + }, + { } /* terminate */ +}; + +static void throtl_shutdown_wq(struct request_queue *q) +{ + struct throtl_data *td = q->td; + + cancel_work_sync(&td->dispatch_work); +} + +static struct blkcg_policy blkcg_policy_throtl = { + .pd_size = sizeof(struct throtl_grp), + .cftypes = throtl_files, + + .pd_init_fn = throtl_pd_init, + .pd_online_fn = throtl_pd_online, + .pd_exit_fn = throtl_pd_exit, + .pd_reset_stats_fn = throtl_pd_reset_stats, +}; + +bool blk_throtl_bio(struct request_queue *q, struct bio *bio) +{ + struct throtl_data *td = q->td; + struct throtl_qnode *qn = NULL; + struct throtl_grp *tg; + struct throtl_service_queue *sq; + bool rw = bio_data_dir(bio); + struct blkcg *blkcg; + bool throttled = false; + + /* see throtl_charge_bio() */ + if (bio->bi_rw & REQ_THROTTLED) + goto out; + + /* + * A throtl_grp pointer retrieved under rcu can be used to access + * basic fields like stats and io rates. If a group has no rules, + * just update the dispatch stats in lockless manner and return. + */ + rcu_read_lock(); + blkcg = bio_blkcg(bio); + tg = throtl_lookup_tg(td, blkcg); + if (tg) { + if (!tg->has_rules[rw]) { + throtl_update_dispatch_stats(tg_to_blkg(tg), + bio->bi_iter.bi_size, bio->bi_rw); + goto out_unlock_rcu; + } + } + + /* + * Either group has not been allocated yet or it is not an unlimited + * IO group + */ + spin_lock_irq(q->queue_lock); + tg = throtl_lookup_create_tg(td, blkcg); + if (unlikely(!tg)) + goto out_unlock; + + sq = &tg->service_queue; + + while (true) { + /* throtl is FIFO - if bios are already queued, should queue */ + if (sq->nr_queued[rw]) + break; + + /* if above limits, break to queue */ + if (!tg_may_dispatch(tg, bio, NULL)) + break; + + /* within limits, let's charge and dispatch directly */ + throtl_charge_bio(tg, bio); + + /* + * We need to trim slice even when bios are not being queued + * otherwise it might happen that a bio is not queued for + * a long time and slice keeps on extending and trim is not + * called for a long time. Now if limits are reduced suddenly + * we take into account all the IO dispatched so far at new + * low rate and * newly queued IO gets a really long dispatch + * time. + * + * So keep on trimming slice even if bio is not queued. + */ + throtl_trim_slice(tg, rw); + + /* + * @bio passed through this layer without being throttled. + * Climb up the ladder. If we''re already at the top, it + * can be executed directly. + */ + qn = &tg->qnode_on_parent[rw]; + sq = sq->parent_sq; + tg = sq_to_tg(sq); + if (!tg) + goto out_unlock; + } + + /* out-of-limit, queue to @tg */ + throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d", + rw == READ ? 'R' : 'W', + tg->bytes_disp[rw], bio->bi_iter.bi_size, tg->bps[rw], + tg->io_disp[rw], tg->iops[rw], + sq->nr_queued[READ], sq->nr_queued[WRITE]); + + bio_associate_current(bio); + tg->td->nr_queued[rw]++; + throtl_add_bio_tg(bio, qn, tg); + throttled = true; + + /* + * Update @tg's dispatch time and force schedule dispatch if @tg + * was empty before @bio. The forced scheduling isn't likely to + * cause undue delay as @bio is likely to be dispatched directly if + * its @tg's disptime is not in the future. + */ + if (tg->flags & THROTL_TG_WAS_EMPTY) { + tg_update_disptime(tg); + throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true); + } + +out_unlock: + spin_unlock_irq(q->queue_lock); +out_unlock_rcu: + rcu_read_unlock(); +out: + /* + * As multiple blk-throtls may stack in the same issue path, we + * don't want bios to leave with the flag set. Clear the flag if + * being issued. + */ + if (!throttled) + bio->bi_rw &= ~REQ_THROTTLED; + return throttled; +} + +/* + * Dispatch all bios from all children tg's queued on @parent_sq. On + * return, @parent_sq is guaranteed to not have any active children tg's + * and all bios from previously active tg's are on @parent_sq->bio_lists[]. + */ +static void tg_drain_bios(struct throtl_service_queue *parent_sq) +{ + struct throtl_grp *tg; + + while ((tg = throtl_rb_first(parent_sq))) { + struct throtl_service_queue *sq = &tg->service_queue; + struct bio *bio; + + throtl_dequeue_tg(tg); + + while ((bio = throtl_peek_queued(&sq->queued[READ]))) + tg_dispatch_one_bio(tg, bio_data_dir(bio)); + while ((bio = throtl_peek_queued(&sq->queued[WRITE]))) + tg_dispatch_one_bio(tg, bio_data_dir(bio)); + } +} + +/** + * blk_throtl_drain - drain throttled bios + * @q: request_queue to drain throttled bios for + * + * Dispatch all currently throttled bios on @q through ->make_request_fn(). + */ +void blk_throtl_drain(struct request_queue *q) + __releases(q->queue_lock) __acquires(q->queue_lock) +{ + struct throtl_data *td = q->td; + struct blkcg_gq *blkg; + struct cgroup_subsys_state *pos_css; + struct bio *bio; + int rw; + + queue_lockdep_assert_held(q); + rcu_read_lock(); + + /* + * Drain each tg while doing post-order walk on the blkg tree, so + * that all bios are propagated to td->service_queue. It'd be + * better to walk service_queue tree directly but blkg walk is + * easier. + */ + blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) + tg_drain_bios(&blkg_to_tg(blkg)->service_queue); + + /* finally, transfer bios from top-level tg's into the td */ + tg_drain_bios(&td->service_queue); + + rcu_read_unlock(); + spin_unlock_irq(q->queue_lock); + + /* all bios now should be in td->service_queue, issue them */ + for (rw = READ; rw <= WRITE; rw++) + while ((bio = throtl_pop_queued(&td->service_queue.queued[rw], + NULL))) + generic_make_request(bio); + + spin_lock_irq(q->queue_lock); +} + +int blk_throtl_init(struct request_queue *q) +{ + struct throtl_data *td; + int ret; + + td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node); + if (!td) + return -ENOMEM; + + INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn); + throtl_service_queue_init(&td->service_queue, NULL); + + q->td = td; + td->queue = q; + + /* activate policy */ + ret = blkcg_activate_policy(q, &blkcg_policy_throtl); + if (ret) + kfree(td); + return ret; +} + +void blk_throtl_exit(struct request_queue *q) +{ + BUG_ON(!q->td); + throtl_shutdown_wq(q); + blkcg_deactivate_policy(q, &blkcg_policy_throtl); + kfree(q->td); +} + +static int __init throtl_init(void) +{ + kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0); + if (!kthrotld_workqueue) + panic("Failed to create kthrotld\n"); + + return blkcg_policy_register(&blkcg_policy_throtl); +} + +module_init(throtl_init); diff --git a/block/blk-timeout.c b/block/blk-timeout.c new file mode 100644 index 000000000..246dfb16c --- /dev/null +++ b/block/blk-timeout.c @@ -0,0 +1,235 @@ +/* + * Functions related to generic timeout handling of requests. + */ +#include +#include +#include +#include + +#include "blk.h" +#include "blk-mq.h" + +#ifdef CONFIG_FAIL_IO_TIMEOUT + +static DECLARE_FAULT_ATTR(fail_io_timeout); + +static int __init setup_fail_io_timeout(char *str) +{ + return setup_fault_attr(&fail_io_timeout, str); +} +__setup("fail_io_timeout=", setup_fail_io_timeout); + +int blk_should_fake_timeout(struct request_queue *q) +{ + if (!test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags)) + return 0; + + return should_fail(&fail_io_timeout, 1); +} + +static int __init fail_io_timeout_debugfs(void) +{ + struct dentry *dir = fault_create_debugfs_attr("fail_io_timeout", + NULL, &fail_io_timeout); + + return PTR_ERR_OR_ZERO(dir); +} + +late_initcall(fail_io_timeout_debugfs); + +ssize_t part_timeout_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct gendisk *disk = dev_to_disk(dev); + int set = test_bit(QUEUE_FLAG_FAIL_IO, &disk->queue->queue_flags); + + return sprintf(buf, "%d\n", set != 0); +} + +ssize_t part_timeout_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct gendisk *disk = dev_to_disk(dev); + int val; + + if (count) { + struct request_queue *q = disk->queue; + char *p = (char *) buf; + + val = simple_strtoul(p, &p, 10); + spin_lock_irq(q->queue_lock); + if (val) + queue_flag_set(QUEUE_FLAG_FAIL_IO, q); + else + queue_flag_clear(QUEUE_FLAG_FAIL_IO, q); + spin_unlock_irq(q->queue_lock); + } + + return count; +} + +#endif /* CONFIG_FAIL_IO_TIMEOUT */ + +/* + * blk_delete_timer - Delete/cancel timer for a given function. + * @req: request that we are canceling timer for + * + */ +void blk_delete_timer(struct request *req) +{ + list_del_init(&req->timeout_list); +} + +static void blk_rq_timed_out(struct request *req) +{ + struct request_queue *q = req->q; + enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER; + + if (q->rq_timed_out_fn) + ret = q->rq_timed_out_fn(req); + switch (ret) { + case BLK_EH_HANDLED: + /* Can we use req->errors here? */ + __blk_complete_request(req); + break; + case BLK_EH_RESET_TIMER: + blk_add_timer(req); + blk_clear_rq_complete(req); + break; + case BLK_EH_NOT_HANDLED: + /* + * LLD handles this for now but in the future + * we can send a request msg to abort the command + * and we can move more of the generic scsi eh code to + * the blk layer. + */ + break; + default: + printk(KERN_ERR "block: bad eh return: %d\n", ret); + break; + } +} + +static void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout, + unsigned int *next_set) +{ + if (time_after_eq(jiffies, rq->deadline)) { + list_del_init(&rq->timeout_list); + + /* + * Check if we raced with end io completion + */ + if (!blk_mark_rq_complete(rq)) + blk_rq_timed_out(rq); + } else if (!*next_set || time_after(*next_timeout, rq->deadline)) { + *next_timeout = rq->deadline; + *next_set = 1; + } +} + +void blk_rq_timed_out_timer(unsigned long data) +{ + struct request_queue *q = (struct request_queue *) data; + unsigned long flags, next = 0; + struct request *rq, *tmp; + int next_set = 0; + + spin_lock_irqsave(q->queue_lock, flags); + + list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) + blk_rq_check_expired(rq, &next, &next_set); + + if (next_set) + mod_timer(&q->timeout, round_jiffies_up(next)); + + spin_unlock_irqrestore(q->queue_lock, flags); +} + +/** + * blk_abort_request -- Request request recovery for the specified command + * @req: pointer to the request of interest + * + * This function requests that the block layer start recovery for the + * request by deleting the timer and calling the q's timeout function. + * LLDDs who implement their own error recovery MAY ignore the timeout + * event if they generated blk_abort_req. Must hold queue lock. + */ +void blk_abort_request(struct request *req) +{ + if (blk_mark_rq_complete(req)) + return; + blk_delete_timer(req); + if (req->q->mq_ops) + blk_mq_rq_timed_out(req, false); + else + blk_rq_timed_out(req); +} +EXPORT_SYMBOL_GPL(blk_abort_request); + +unsigned long blk_rq_timeout(unsigned long timeout) +{ + unsigned long maxt; + + maxt = round_jiffies_up(jiffies + BLK_MAX_TIMEOUT); + if (time_after(timeout, maxt)) + timeout = maxt; + + return timeout; +} + +/** + * blk_add_timer - Start timeout timer for a single request + * @req: request that is about to start running. + * + * Notes: + * Each request has its own timer, and as it is added to the queue, we + * set up the timer. When the request completes, we cancel the timer. + */ +void blk_add_timer(struct request *req) +{ + struct request_queue *q = req->q; + unsigned long expiry; + + if (req->cmd_flags & REQ_NO_TIMEOUT) + return; + + /* blk-mq has its own handler, so we don't need ->rq_timed_out_fn */ + if (!q->mq_ops && !q->rq_timed_out_fn) + return; + + BUG_ON(!list_empty(&req->timeout_list)); + + /* + * Some LLDs, like scsi, peek at the timeout to prevent a + * command from being retried forever. + */ + if (!req->timeout) + req->timeout = q->rq_timeout; + + req->deadline = jiffies + req->timeout; + if (!q->mq_ops) + list_add_tail(&req->timeout_list, &req->q->timeout_list); + + /* + * If the timer isn't already pending or this timeout is earlier + * than an existing one, modify the timer. Round up to next nearest + * second. + */ + expiry = blk_rq_timeout(round_jiffies_up(req->deadline)); + + if (!timer_pending(&q->timeout) || + time_before(expiry, q->timeout.expires)) { + unsigned long diff = q->timeout.expires - expiry; + + /* + * Due to added timer slack to group timers, the timer + * will often be a little in front of what we asked for. + * So apply some tolerance here too, otherwise we keep + * modifying the timer because expires for value X + * will be X + something. + */ + if (!timer_pending(&q->timeout) || (diff >= HZ / 2)) + mod_timer(&q->timeout, expiry); + } + +} diff --git a/block/blk.h b/block/blk.h new file mode 100644 index 000000000..43b036185 --- /dev/null +++ b/block/blk.h @@ -0,0 +1,284 @@ +#ifndef BLK_INTERNAL_H +#define BLK_INTERNAL_H + +#include +#include +#include "blk-mq.h" + +/* Amount of time in which a process may batch requests */ +#define BLK_BATCH_TIME (HZ/50UL) + +/* Number of requests a "batching" process may submit */ +#define BLK_BATCH_REQ 32 + +/* Max future timer expiry for timeouts */ +#define BLK_MAX_TIMEOUT (5 * HZ) + +struct blk_flush_queue { + unsigned int flush_queue_delayed:1; + unsigned int flush_pending_idx:1; + unsigned int flush_running_idx:1; + unsigned long flush_pending_since; + struct list_head flush_queue[2]; + struct list_head flush_data_in_flight; + struct request *flush_rq; + spinlock_t mq_flush_lock; +}; + +extern struct kmem_cache *blk_requestq_cachep; +extern struct kmem_cache *request_cachep; +extern struct kobj_type blk_queue_ktype; +extern struct ida blk_queue_ida; + +static inline struct blk_flush_queue *blk_get_flush_queue( + struct request_queue *q, struct blk_mq_ctx *ctx) +{ + struct blk_mq_hw_ctx *hctx; + + if (!q->mq_ops) + return q->fq; + + hctx = q->mq_ops->map_queue(q, ctx->cpu); + + return hctx->fq; +} + +static inline void __blk_get_queue(struct request_queue *q) +{ + kobject_get(&q->kobj); +} + +struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q, + int node, int cmd_size); +void blk_free_flush_queue(struct blk_flush_queue *q); + +int blk_init_rl(struct request_list *rl, struct request_queue *q, + gfp_t gfp_mask); +void blk_exit_rl(struct request_list *rl); +void init_request_from_bio(struct request *req, struct bio *bio); +void blk_rq_bio_prep(struct request_queue *q, struct request *rq, + struct bio *bio); +int blk_rq_append_bio(struct request_queue *q, struct request *rq, + struct bio *bio); +void blk_queue_bypass_start(struct request_queue *q); +void blk_queue_bypass_end(struct request_queue *q); +void blk_dequeue_request(struct request *rq); +void __blk_queue_free_tags(struct request_queue *q); +bool __blk_end_bidi_request(struct request *rq, int error, + unsigned int nr_bytes, unsigned int bidi_bytes); + +void blk_rq_timed_out_timer(unsigned long data); +unsigned long blk_rq_timeout(unsigned long timeout); +void blk_add_timer(struct request *req); +void blk_delete_timer(struct request *); + + +bool bio_attempt_front_merge(struct request_queue *q, struct request *req, + struct bio *bio); +bool bio_attempt_back_merge(struct request_queue *q, struct request *req, + struct bio *bio); +bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, + unsigned int *request_count); + +void blk_account_io_start(struct request *req, bool new_io); +void blk_account_io_completion(struct request *req, unsigned int bytes); +void blk_account_io_done(struct request *req); + +/* + * Internal atomic flags for request handling + */ +enum rq_atomic_flags { + REQ_ATOM_COMPLETE = 0, + REQ_ATOM_STARTED, +}; + +/* + * EH timer and IO completion will both attempt to 'grab' the request, make + * sure that only one of them succeeds + */ +static inline int blk_mark_rq_complete(struct request *rq) +{ + return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); +} + +static inline void blk_clear_rq_complete(struct request *rq) +{ + clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); +} + +/* + * Internal elevator interface + */ +#define ELV_ON_HASH(rq) ((rq)->cmd_flags & REQ_HASHED) + +void blk_insert_flush(struct request *rq); + +static inline struct request *__elv_next_request(struct request_queue *q) +{ + struct request *rq; + struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL); + + while (1) { + if (!list_empty(&q->queue_head)) { + rq = list_entry_rq(q->queue_head.next); + return rq; + } + + /* + * Flush request is running and flush request isn't queueable + * in the drive, we can hold the queue till flush request is + * finished. Even we don't do this, driver can't dispatch next + * requests and will requeue them. And this can improve + * throughput too. For example, we have request flush1, write1, + * flush 2. flush1 is dispatched, then queue is hold, write1 + * isn't inserted to queue. After flush1 is finished, flush2 + * will be dispatched. Since disk cache is already clean, + * flush2 will be finished very soon, so looks like flush2 is + * folded to flush1. + * Since the queue is hold, a flag is set to indicate the queue + * should be restarted later. Please see flush_end_io() for + * details. + */ + if (fq->flush_pending_idx != fq->flush_running_idx && + !queue_flush_queueable(q)) { + fq->flush_queue_delayed = 1; + return NULL; + } + if (unlikely(blk_queue_bypass(q)) || + !q->elevator->type->ops.elevator_dispatch_fn(q, 0)) + return NULL; + } +} + +static inline void elv_activate_rq(struct request_queue *q, struct request *rq) +{ + struct elevator_queue *e = q->elevator; + + if (e->type->ops.elevator_activate_req_fn) + e->type->ops.elevator_activate_req_fn(q, rq); +} + +static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq) +{ + struct elevator_queue *e = q->elevator; + + if (e->type->ops.elevator_deactivate_req_fn) + e->type->ops.elevator_deactivate_req_fn(q, rq); +} + +#ifdef CONFIG_FAIL_IO_TIMEOUT +int blk_should_fake_timeout(struct request_queue *); +ssize_t part_timeout_show(struct device *, struct device_attribute *, char *); +ssize_t part_timeout_store(struct device *, struct device_attribute *, + const char *, size_t); +#else +static inline int blk_should_fake_timeout(struct request_queue *q) +{ + return 0; +} +#endif + +int ll_back_merge_fn(struct request_queue *q, struct request *req, + struct bio *bio); +int ll_front_merge_fn(struct request_queue *q, struct request *req, + struct bio *bio); +int attempt_back_merge(struct request_queue *q, struct request *rq); +int attempt_front_merge(struct request_queue *q, struct request *rq); +int blk_attempt_req_merge(struct request_queue *q, struct request *rq, + struct request *next); +void blk_recalc_rq_segments(struct request *rq); +void blk_rq_set_mixed_merge(struct request *rq); +bool blk_rq_merge_ok(struct request *rq, struct bio *bio); +int blk_try_merge(struct request *rq, struct bio *bio); + +void blk_queue_congestion_threshold(struct request_queue *q); + +void __blk_run_queue_uncond(struct request_queue *q); + +int blk_dev_init(void); + + +/* + * Return the threshold (number of used requests) at which the queue is + * considered to be congested. It include a little hysteresis to keep the + * context switch rate down. + */ +static inline int queue_congestion_on_threshold(struct request_queue *q) +{ + return q->nr_congestion_on; +} + +/* + * The threshold at which a queue is considered to be uncongested + */ +static inline int queue_congestion_off_threshold(struct request_queue *q) +{ + return q->nr_congestion_off; +} + +extern int blk_update_nr_requests(struct request_queue *, unsigned int); + +/* + * Contribute to IO statistics IFF: + * + * a) it's attached to a gendisk, and + * b) the queue had IO stats enabled when this request was started, and + * c) it's a file system request + */ +static inline int blk_do_io_stat(struct request *rq) +{ + return rq->rq_disk && + (rq->cmd_flags & REQ_IO_STAT) && + (rq->cmd_type == REQ_TYPE_FS); +} + +/* + * Internal io_context interface + */ +void get_io_context(struct io_context *ioc); +struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q); +struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q, + gfp_t gfp_mask); +void ioc_clear_queue(struct request_queue *q); + +int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node); + +/** + * create_io_context - try to create task->io_context + * @gfp_mask: allocation mask + * @node: allocation node + * + * If %current->io_context is %NULL, allocate a new io_context and install + * it. Returns the current %current->io_context which may be %NULL if + * allocation failed. + * + * Note that this function can't be called with IRQ disabled because + * task_lock which protects %current->io_context is IRQ-unsafe. + */ +static inline struct io_context *create_io_context(gfp_t gfp_mask, int node) +{ + WARN_ON_ONCE(irqs_disabled()); + if (unlikely(!current->io_context)) + create_task_io_context(current, gfp_mask, node); + return current->io_context; +} + +/* + * Internal throttling interface + */ +#ifdef CONFIG_BLK_DEV_THROTTLING +extern bool blk_throtl_bio(struct request_queue *q, struct bio *bio); +extern void blk_throtl_drain(struct request_queue *q); +extern int blk_throtl_init(struct request_queue *q); +extern void blk_throtl_exit(struct request_queue *q); +#else /* CONFIG_BLK_DEV_THROTTLING */ +static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio) +{ + return false; +} +static inline void blk_throtl_drain(struct request_queue *q) { } +static inline int blk_throtl_init(struct request_queue *q) { return 0; } +static inline void blk_throtl_exit(struct request_queue *q) { } +#endif /* CONFIG_BLK_DEV_THROTTLING */ + +#endif /* BLK_INTERNAL_H */ diff --git a/block/bounce.c b/block/bounce.c new file mode 100644 index 000000000..ed9dd8067 --- /dev/null +++ b/block/bounce.c @@ -0,0 +1,290 @@ +/* bounce buffer handling for block devices + * + * - Split from highmem.c + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define POOL_SIZE 64 +#define ISA_POOL_SIZE 16 + +static mempool_t *page_pool, *isa_page_pool; + +#if defined(CONFIG_HIGHMEM) || defined(CONFIG_NEED_BOUNCE_POOL) +static __init int init_emergency_pool(void) +{ +#if defined(CONFIG_HIGHMEM) && !defined(CONFIG_MEMORY_HOTPLUG) + if (max_pfn <= max_low_pfn) + return 0; +#endif + + page_pool = mempool_create_page_pool(POOL_SIZE, 0); + BUG_ON(!page_pool); + pr_info("pool size: %d pages\n", POOL_SIZE); + + return 0; +} + +__initcall(init_emergency_pool); +#endif + +#ifdef CONFIG_HIGHMEM +/* + * highmem version, map in to vec + */ +static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom) +{ + unsigned long flags; + unsigned char *vto; + + local_irq_save(flags); + vto = kmap_atomic(to->bv_page); + memcpy(vto + to->bv_offset, vfrom, to->bv_len); + kunmap_atomic(vto); + local_irq_restore(flags); +} + +#else /* CONFIG_HIGHMEM */ + +#define bounce_copy_vec(to, vfrom) \ + memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len) + +#endif /* CONFIG_HIGHMEM */ + +/* + * allocate pages in the DMA region for the ISA pool + */ +static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data) +{ + return mempool_alloc_pages(gfp_mask | GFP_DMA, data); +} + +/* + * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA + * as the max address, so check if the pool has already been created. + */ +int init_emergency_isa_pool(void) +{ + if (isa_page_pool) + return 0; + + isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa, + mempool_free_pages, (void *) 0); + BUG_ON(!isa_page_pool); + + pr_info("isa pool size: %d pages\n", ISA_POOL_SIZE); + return 0; +} + +/* + * Simple bounce buffer support for highmem pages. Depending on the + * queue gfp mask set, *to may or may not be a highmem page. kmap it + * always, it will do the Right Thing + */ +static void copy_to_high_bio_irq(struct bio *to, struct bio *from) +{ + unsigned char *vfrom; + struct bio_vec tovec, *fromvec = from->bi_io_vec; + struct bvec_iter iter; + + bio_for_each_segment(tovec, to, iter) { + if (tovec.bv_page != fromvec->bv_page) { + /* + * fromvec->bv_offset and fromvec->bv_len might have + * been modified by the block layer, so use the original + * copy, bounce_copy_vec already uses tovec->bv_len + */ + vfrom = page_address(fromvec->bv_page) + + tovec.bv_offset; + + bounce_copy_vec(&tovec, vfrom); + flush_dcache_page(tovec.bv_page); + } + + fromvec++; + } +} + +static void bounce_end_io(struct bio *bio, mempool_t *pool, int err) +{ + struct bio *bio_orig = bio->bi_private; + struct bio_vec *bvec, *org_vec; + int i; + + if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags)) + set_bit(BIO_EOPNOTSUPP, &bio_orig->bi_flags); + + /* + * free up bounce indirect pages used + */ + bio_for_each_segment_all(bvec, bio, i) { + org_vec = bio_orig->bi_io_vec + i; + if (bvec->bv_page == org_vec->bv_page) + continue; + + dec_zone_page_state(bvec->bv_page, NR_BOUNCE); + mempool_free(bvec->bv_page, pool); + } + + bio_endio(bio_orig, err); + bio_put(bio); +} + +static void bounce_end_io_write(struct bio *bio, int err) +{ + bounce_end_io(bio, page_pool, err); +} + +static void bounce_end_io_write_isa(struct bio *bio, int err) +{ + + bounce_end_io(bio, isa_page_pool, err); +} + +static void __bounce_end_io_read(struct bio *bio, mempool_t *pool, int err) +{ + struct bio *bio_orig = bio->bi_private; + + if (test_bit(BIO_UPTODATE, &bio->bi_flags)) + copy_to_high_bio_irq(bio_orig, bio); + + bounce_end_io(bio, pool, err); +} + +static void bounce_end_io_read(struct bio *bio, int err) +{ + __bounce_end_io_read(bio, page_pool, err); +} + +static void bounce_end_io_read_isa(struct bio *bio, int err) +{ + __bounce_end_io_read(bio, isa_page_pool, err); +} + +#ifdef CONFIG_NEED_BOUNCE_POOL +static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio) +{ + if (bio_data_dir(bio) != WRITE) + return 0; + + if (!bdi_cap_stable_pages_required(&q->backing_dev_info)) + return 0; + + return test_bit(BIO_SNAP_STABLE, &bio->bi_flags); +} +#else +static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio) +{ + return 0; +} +#endif /* CONFIG_NEED_BOUNCE_POOL */ + +static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, + mempool_t *pool, int force) +{ + struct bio *bio; + int rw = bio_data_dir(*bio_orig); + struct bio_vec *to, from; + struct bvec_iter iter; + unsigned i; + + if (force) + goto bounce; + bio_for_each_segment(from, *bio_orig, iter) + if (page_to_pfn(from.bv_page) > queue_bounce_pfn(q)) + goto bounce; + + return; +bounce: + bio = bio_clone_bioset(*bio_orig, GFP_NOIO, fs_bio_set); + + bio_for_each_segment_all(to, bio, i) { + struct page *page = to->bv_page; + + if (page_to_pfn(page) <= queue_bounce_pfn(q) && !force) + continue; + + to->bv_page = mempool_alloc(pool, q->bounce_gfp); + inc_zone_page_state(to->bv_page, NR_BOUNCE); + + if (rw == WRITE) { + char *vto, *vfrom; + + flush_dcache_page(page); + + vto = page_address(to->bv_page) + to->bv_offset; + vfrom = kmap_atomic(page) + to->bv_offset; + memcpy(vto, vfrom, to->bv_len); + kunmap_atomic(vfrom); + } + } + + trace_block_bio_bounce(q, *bio_orig); + + bio->bi_flags |= (1 << BIO_BOUNCED); + + if (pool == page_pool) { + bio->bi_end_io = bounce_end_io_write; + if (rw == READ) + bio->bi_end_io = bounce_end_io_read; + } else { + bio->bi_end_io = bounce_end_io_write_isa; + if (rw == READ) + bio->bi_end_io = bounce_end_io_read_isa; + } + + bio->bi_private = *bio_orig; + *bio_orig = bio; +} + +void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig) +{ + int must_bounce; + mempool_t *pool; + + /* + * Data-less bio, nothing to bounce + */ + if (!bio_has_data(*bio_orig)) + return; + + must_bounce = must_snapshot_stable_pages(q, *bio_orig); + + /* + * for non-isa bounce case, just check if the bounce pfn is equal + * to or bigger than the highest pfn in the system -- in that case, + * don't waste time iterating over bio segments + */ + if (!(q->bounce_gfp & GFP_DMA)) { + if (queue_bounce_pfn(q) >= blk_max_pfn && !must_bounce) + return; + pool = page_pool; + } else { + BUG_ON(!isa_page_pool); + pool = isa_page_pool; + } + + /* + * slow path + */ + __blk_queue_bounce(q, bio_orig, pool, must_bounce); +} + +EXPORT_SYMBOL(blk_queue_bounce); diff --git a/block/bsg-lib.c b/block/bsg-lib.c new file mode 100644 index 000000000..650f427d9 --- /dev/null +++ b/block/bsg-lib.c @@ -0,0 +1,232 @@ +/* + * BSG helper library + * + * Copyright (C) 2008 James Smart, Emulex Corporation + * Copyright (C) 2011 Red Hat, Inc. All rights reserved. + * Copyright (C) 2011 Mike Christie + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ +#include +#include +#include +#include +#include +#include +#include + +/** + * bsg_destroy_job - routine to teardown/delete a bsg job + * @job: bsg_job that is to be torn down + */ +static void bsg_destroy_job(struct bsg_job *job) +{ + put_device(job->dev); /* release reference for the request */ + + kfree(job->request_payload.sg_list); + kfree(job->reply_payload.sg_list); + kfree(job); +} + +/** + * bsg_job_done - completion routine for bsg requests + * @job: bsg_job that is complete + * @result: job reply result + * @reply_payload_rcv_len: length of payload recvd + * + * The LLD should call this when the bsg job has completed. + */ +void bsg_job_done(struct bsg_job *job, int result, + unsigned int reply_payload_rcv_len) +{ + struct request *req = job->req; + struct request *rsp = req->next_rq; + int err; + + err = job->req->errors = result; + if (err < 0) + /* we're only returning the result field in the reply */ + job->req->sense_len = sizeof(u32); + else + job->req->sense_len = job->reply_len; + /* we assume all request payload was transferred, residual == 0 */ + req->resid_len = 0; + + if (rsp) { + WARN_ON(reply_payload_rcv_len > rsp->resid_len); + + /* set reply (bidi) residual */ + rsp->resid_len -= min(reply_payload_rcv_len, rsp->resid_len); + } + blk_complete_request(req); +} +EXPORT_SYMBOL_GPL(bsg_job_done); + +/** + * bsg_softirq_done - softirq done routine for destroying the bsg requests + * @rq: BSG request that holds the job to be destroyed + */ +static void bsg_softirq_done(struct request *rq) +{ + struct bsg_job *job = rq->special; + + blk_end_request_all(rq, rq->errors); + bsg_destroy_job(job); +} + +static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req) +{ + size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments); + + BUG_ON(!req->nr_phys_segments); + + buf->sg_list = kzalloc(sz, GFP_KERNEL); + if (!buf->sg_list) + return -ENOMEM; + sg_init_table(buf->sg_list, req->nr_phys_segments); + buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list); + buf->payload_len = blk_rq_bytes(req); + return 0; +} + +/** + * bsg_create_job - create the bsg_job structure for the bsg request + * @dev: device that is being sent the bsg request + * @req: BSG request that needs a job structure + */ +static int bsg_create_job(struct device *dev, struct request *req) +{ + struct request *rsp = req->next_rq; + struct request_queue *q = req->q; + struct bsg_job *job; + int ret; + + BUG_ON(req->special); + + job = kzalloc(sizeof(struct bsg_job) + q->bsg_job_size, GFP_KERNEL); + if (!job) + return -ENOMEM; + + req->special = job; + job->req = req; + if (q->bsg_job_size) + job->dd_data = (void *)&job[1]; + job->request = req->cmd; + job->request_len = req->cmd_len; + job->reply = req->sense; + job->reply_len = SCSI_SENSE_BUFFERSIZE; /* Size of sense buffer + * allocated */ + if (req->bio) { + ret = bsg_map_buffer(&job->request_payload, req); + if (ret) + goto failjob_rls_job; + } + if (rsp && rsp->bio) { + ret = bsg_map_buffer(&job->reply_payload, rsp); + if (ret) + goto failjob_rls_rqst_payload; + } + job->dev = dev; + /* take a reference for the request */ + get_device(job->dev); + return 0; + +failjob_rls_rqst_payload: + kfree(job->request_payload.sg_list); +failjob_rls_job: + kfree(job); + return -ENOMEM; +} + +/** + * bsg_request_fn - generic handler for bsg requests + * @q: request queue to manage + * + * On error the create_bsg_job function should return a -Exyz error value + * that will be set to the req->errors. + * + * Drivers/subsys should pass this to the queue init function. + */ +void bsg_request_fn(struct request_queue *q) +{ + struct device *dev = q->queuedata; + struct request *req; + struct bsg_job *job; + int ret; + + if (!get_device(dev)) + return; + + while (1) { + req = blk_fetch_request(q); + if (!req) + break; + spin_unlock_irq(q->queue_lock); + + ret = bsg_create_job(dev, req); + if (ret) { + req->errors = ret; + blk_end_request_all(req, ret); + spin_lock_irq(q->queue_lock); + continue; + } + + job = req->special; + ret = q->bsg_job_fn(job); + spin_lock_irq(q->queue_lock); + if (ret) + break; + } + + spin_unlock_irq(q->queue_lock); + put_device(dev); + spin_lock_irq(q->queue_lock); +} +EXPORT_SYMBOL_GPL(bsg_request_fn); + +/** + * bsg_setup_queue - Create and add the bsg hooks so we can receive requests + * @dev: device to attach bsg device to + * @q: request queue setup by caller + * @name: device to give bsg device + * @job_fn: bsg job handler + * @dd_job_size: size of LLD data needed for each job + * + * The caller should have setup the reuqest queue with bsg_request_fn + * as the request_fn. + */ +int bsg_setup_queue(struct device *dev, struct request_queue *q, + char *name, bsg_job_fn *job_fn, int dd_job_size) +{ + int ret; + + q->queuedata = dev; + q->bsg_job_size = dd_job_size; + q->bsg_job_fn = job_fn; + queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q); + blk_queue_softirq_done(q, bsg_softirq_done); + blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT); + + ret = bsg_register_queue(q, dev, name, NULL); + if (ret) { + printk(KERN_ERR "%s: bsg interface failed to " + "initialize - register queue\n", dev->kobj.name); + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(bsg_setup_queue); diff --git a/block/bsg.c b/block/bsg.c new file mode 100644 index 000000000..d214e929c --- /dev/null +++ b/block/bsg.c @@ -0,0 +1,1097 @@ +/* + * bsg.c - block layer implementation of the sg v4 interface + * + * Copyright (C) 2004 Jens Axboe SUSE Labs + * Copyright (C) 2004 Peter M. Jones + * + * This file is subject to the terms and conditions of the GNU General Public + * License version 2. See the file "COPYING" in the main directory of this + * archive for more details. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#define BSG_DESCRIPTION "Block layer SCSI generic (bsg) driver" +#define BSG_VERSION "0.4" + +struct bsg_device { + struct request_queue *queue; + spinlock_t lock; + struct list_head busy_list; + struct list_head done_list; + struct hlist_node dev_list; + atomic_t ref_count; + int queued_cmds; + int done_cmds; + wait_queue_head_t wq_done; + wait_queue_head_t wq_free; + char name[20]; + int max_queue; + unsigned long flags; +}; + +enum { + BSG_F_BLOCK = 1, +}; + +#define BSG_DEFAULT_CMDS 64 +#define BSG_MAX_DEVS 32768 + +#undef BSG_DEBUG + +#ifdef BSG_DEBUG +#define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ##args) +#else +#define dprintk(fmt, args...) +#endif + +static DEFINE_MUTEX(bsg_mutex); +static DEFINE_IDR(bsg_minor_idr); + +#define BSG_LIST_ARRAY_SIZE 8 +static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE]; + +static struct class *bsg_class; +static int bsg_major; + +static struct kmem_cache *bsg_cmd_cachep; + +/* + * our internal command type + */ +struct bsg_command { + struct bsg_device *bd; + struct list_head list; + struct request *rq; + struct bio *bio; + struct bio *bidi_bio; + int err; + struct sg_io_v4 hdr; + char sense[SCSI_SENSE_BUFFERSIZE]; +}; + +static void bsg_free_command(struct bsg_command *bc) +{ + struct bsg_device *bd = bc->bd; + unsigned long flags; + + kmem_cache_free(bsg_cmd_cachep, bc); + + spin_lock_irqsave(&bd->lock, flags); + bd->queued_cmds--; + spin_unlock_irqrestore(&bd->lock, flags); + + wake_up(&bd->wq_free); +} + +static struct bsg_command *bsg_alloc_command(struct bsg_device *bd) +{ + struct bsg_command *bc = ERR_PTR(-EINVAL); + + spin_lock_irq(&bd->lock); + + if (bd->queued_cmds >= bd->max_queue) + goto out; + + bd->queued_cmds++; + spin_unlock_irq(&bd->lock); + + bc = kmem_cache_zalloc(bsg_cmd_cachep, GFP_KERNEL); + if (unlikely(!bc)) { + spin_lock_irq(&bd->lock); + bd->queued_cmds--; + bc = ERR_PTR(-ENOMEM); + goto out; + } + + bc->bd = bd; + INIT_LIST_HEAD(&bc->list); + dprintk("%s: returning free cmd %p\n", bd->name, bc); + return bc; +out: + spin_unlock_irq(&bd->lock); + return bc; +} + +static inline struct hlist_head *bsg_dev_idx_hash(int index) +{ + return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)]; +} + +static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq, + struct sg_io_v4 *hdr, struct bsg_device *bd, + fmode_t has_write_perm) +{ + if (hdr->request_len > BLK_MAX_CDB) { + rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL); + if (!rq->cmd) + return -ENOMEM; + } + + if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request, + hdr->request_len)) + return -EFAULT; + + if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) { + if (blk_verify_command(rq->cmd, has_write_perm)) + return -EPERM; + } else if (!capable(CAP_SYS_RAWIO)) + return -EPERM; + + /* + * fill in request structure + */ + rq->cmd_len = hdr->request_len; + + rq->timeout = msecs_to_jiffies(hdr->timeout); + if (!rq->timeout) + rq->timeout = q->sg_timeout; + if (!rq->timeout) + rq->timeout = BLK_DEFAULT_SG_TIMEOUT; + if (rq->timeout < BLK_MIN_SG_TIMEOUT) + rq->timeout = BLK_MIN_SG_TIMEOUT; + + return 0; +} + +/* + * Check if sg_io_v4 from user is allowed and valid + */ +static int +bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw) +{ + int ret = 0; + + if (hdr->guard != 'Q') + return -EINVAL; + + switch (hdr->protocol) { + case BSG_PROTOCOL_SCSI: + switch (hdr->subprotocol) { + case BSG_SUB_PROTOCOL_SCSI_CMD: + case BSG_SUB_PROTOCOL_SCSI_TRANSPORT: + break; + default: + ret = -EINVAL; + } + break; + default: + ret = -EINVAL; + } + + *rw = hdr->dout_xfer_len ? WRITE : READ; + return ret; +} + +/* + * map sg_io_v4 to a request. + */ +static struct request * +bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm, + u8 *sense) +{ + struct request_queue *q = bd->queue; + struct request *rq, *next_rq = NULL; + int ret, rw; + unsigned int dxfer_len; + void __user *dxferp = NULL; + struct bsg_class_device *bcd = &q->bsg_dev; + + /* if the LLD has been removed then the bsg_unregister_queue will + * eventually be called and the class_dev was freed, so we can no + * longer use this request_queue. Return no such address. + */ + if (!bcd->class_dev) + return ERR_PTR(-ENXIO); + + dprintk("map hdr %llx/%u %llx/%u\n", (unsigned long long) hdr->dout_xferp, + hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp, + hdr->din_xfer_len); + + ret = bsg_validate_sgv4_hdr(q, hdr, &rw); + if (ret) + return ERR_PTR(ret); + + /* + * map scatter-gather elements separately and string them to request + */ + rq = blk_get_request(q, rw, GFP_KERNEL); + if (IS_ERR(rq)) + return rq; + blk_rq_set_block_pc(rq); + + ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, has_write_perm); + if (ret) + goto out; + + if (rw == WRITE && hdr->din_xfer_len) { + if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) { + ret = -EOPNOTSUPP; + goto out; + } + + next_rq = blk_get_request(q, READ, GFP_KERNEL); + if (IS_ERR(next_rq)) { + ret = PTR_ERR(next_rq); + next_rq = NULL; + goto out; + } + rq->next_rq = next_rq; + next_rq->cmd_type = rq->cmd_type; + + dxferp = (void __user *)(unsigned long)hdr->din_xferp; + ret = blk_rq_map_user(q, next_rq, NULL, dxferp, + hdr->din_xfer_len, GFP_KERNEL); + if (ret) + goto out; + } + + if (hdr->dout_xfer_len) { + dxfer_len = hdr->dout_xfer_len; + dxferp = (void __user *)(unsigned long)hdr->dout_xferp; + } else if (hdr->din_xfer_len) { + dxfer_len = hdr->din_xfer_len; + dxferp = (void __user *)(unsigned long)hdr->din_xferp; + } else + dxfer_len = 0; + + if (dxfer_len) { + ret = blk_rq_map_user(q, rq, NULL, dxferp, dxfer_len, + GFP_KERNEL); + if (ret) + goto out; + } + + rq->sense = sense; + rq->sense_len = 0; + + return rq; +out: + if (rq->cmd != rq->__cmd) + kfree(rq->cmd); + blk_put_request(rq); + if (next_rq) { + blk_rq_unmap_user(next_rq->bio); + blk_put_request(next_rq); + } + return ERR_PTR(ret); +} + +/* + * async completion call-back from the block layer, when scsi/ide/whatever + * calls end_that_request_last() on a request + */ +static void bsg_rq_end_io(struct request *rq, int uptodate) +{ + struct bsg_command *bc = rq->end_io_data; + struct bsg_device *bd = bc->bd; + unsigned long flags; + + dprintk("%s: finished rq %p bc %p, bio %p stat %d\n", + bd->name, rq, bc, bc->bio, uptodate); + + bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration); + + spin_lock_irqsave(&bd->lock, flags); + list_move_tail(&bc->list, &bd->done_list); + bd->done_cmds++; + spin_unlock_irqrestore(&bd->lock, flags); + + wake_up(&bd->wq_done); +} + +/* + * do final setup of a 'bc' and submit the matching 'rq' to the block + * layer for io + */ +static void bsg_add_command(struct bsg_device *bd, struct request_queue *q, + struct bsg_command *bc, struct request *rq) +{ + int at_head = (0 == (bc->hdr.flags & BSG_FLAG_Q_AT_TAIL)); + + /* + * add bc command to busy queue and submit rq for io + */ + bc->rq = rq; + bc->bio = rq->bio; + if (rq->next_rq) + bc->bidi_bio = rq->next_rq->bio; + bc->hdr.duration = jiffies; + spin_lock_irq(&bd->lock); + list_add_tail(&bc->list, &bd->busy_list); + spin_unlock_irq(&bd->lock); + + dprintk("%s: queueing rq %p, bc %p\n", bd->name, rq, bc); + + rq->end_io_data = bc; + blk_execute_rq_nowait(q, NULL, rq, at_head, bsg_rq_end_io); +} + +static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd) +{ + struct bsg_command *bc = NULL; + + spin_lock_irq(&bd->lock); + if (bd->done_cmds) { + bc = list_first_entry(&bd->done_list, struct bsg_command, list); + list_del(&bc->list); + bd->done_cmds--; + } + spin_unlock_irq(&bd->lock); + + return bc; +} + +/* + * Get a finished command from the done list + */ +static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd) +{ + struct bsg_command *bc; + int ret; + + do { + bc = bsg_next_done_cmd(bd); + if (bc) + break; + + if (!test_bit(BSG_F_BLOCK, &bd->flags)) { + bc = ERR_PTR(-EAGAIN); + break; + } + + ret = wait_event_interruptible(bd->wq_done, bd->done_cmds); + if (ret) { + bc = ERR_PTR(-ERESTARTSYS); + break; + } + } while (1); + + dprintk("%s: returning done %p\n", bd->name, bc); + + return bc; +} + +static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, + struct bio *bio, struct bio *bidi_bio) +{ + int ret = 0; + + dprintk("rq %p bio %p 0x%x\n", rq, bio, rq->errors); + /* + * fill in all the output members + */ + hdr->device_status = rq->errors & 0xff; + hdr->transport_status = host_byte(rq->errors); + hdr->driver_status = driver_byte(rq->errors); + hdr->info = 0; + if (hdr->device_status || hdr->transport_status || hdr->driver_status) + hdr->info |= SG_INFO_CHECK; + hdr->response_len = 0; + + if (rq->sense_len && hdr->response) { + int len = min_t(unsigned int, hdr->max_response_len, + rq->sense_len); + + ret = copy_to_user((void __user *)(unsigned long)hdr->response, + rq->sense, len); + if (!ret) + hdr->response_len = len; + else + ret = -EFAULT; + } + + if (rq->next_rq) { + hdr->dout_resid = rq->resid_len; + hdr->din_resid = rq->next_rq->resid_len; + blk_rq_unmap_user(bidi_bio); + blk_put_request(rq->next_rq); + } else if (rq_data_dir(rq) == READ) + hdr->din_resid = rq->resid_len; + else + hdr->dout_resid = rq->resid_len; + + /* + * If the request generated a negative error number, return it + * (providing we aren't already returning an error); if it's + * just a protocol response (i.e. non negative), that gets + * processed above. + */ + if (!ret && rq->errors < 0) + ret = rq->errors; + + blk_rq_unmap_user(bio); + if (rq->cmd != rq->__cmd) + kfree(rq->cmd); + blk_put_request(rq); + + return ret; +} + +static bool bsg_complete(struct bsg_device *bd) +{ + bool ret = false; + bool spin; + + do { + spin_lock_irq(&bd->lock); + + BUG_ON(bd->done_cmds > bd->queued_cmds); + + /* + * All commands consumed. + */ + if (bd->done_cmds == bd->queued_cmds) + ret = true; + + spin = !test_bit(BSG_F_BLOCK, &bd->flags); + + spin_unlock_irq(&bd->lock); + } while (!ret && spin); + + return ret; +} + +static int bsg_complete_all_commands(struct bsg_device *bd) +{ + struct bsg_command *bc; + int ret, tret; + + dprintk("%s: entered\n", bd->name); + + /* + * wait for all commands to complete + */ + io_wait_event(bd->wq_done, bsg_complete(bd)); + + /* + * discard done commands + */ + ret = 0; + do { + spin_lock_irq(&bd->lock); + if (!bd->queued_cmds) { + spin_unlock_irq(&bd->lock); + break; + } + spin_unlock_irq(&bd->lock); + + bc = bsg_get_done_cmd(bd); + if (IS_ERR(bc)) + break; + + tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio, + bc->bidi_bio); + if (!ret) + ret = tret; + + bsg_free_command(bc); + } while (1); + + return ret; +} + +static int +__bsg_read(char __user *buf, size_t count, struct bsg_device *bd, + const struct iovec *iov, ssize_t *bytes_read) +{ + struct bsg_command *bc; + int nr_commands, ret; + + if (count % sizeof(struct sg_io_v4)) + return -EINVAL; + + ret = 0; + nr_commands = count / sizeof(struct sg_io_v4); + while (nr_commands) { + bc = bsg_get_done_cmd(bd); + if (IS_ERR(bc)) { + ret = PTR_ERR(bc); + break; + } + + /* + * this is the only case where we need to copy data back + * after completing the request. so do that here, + * bsg_complete_work() cannot do that for us + */ + ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio, + bc->bidi_bio); + + if (copy_to_user(buf, &bc->hdr, sizeof(bc->hdr))) + ret = -EFAULT; + + bsg_free_command(bc); + + if (ret) + break; + + buf += sizeof(struct sg_io_v4); + *bytes_read += sizeof(struct sg_io_v4); + nr_commands--; + } + + return ret; +} + +static inline void bsg_set_block(struct bsg_device *bd, struct file *file) +{ + if (file->f_flags & O_NONBLOCK) + clear_bit(BSG_F_BLOCK, &bd->flags); + else + set_bit(BSG_F_BLOCK, &bd->flags); +} + +/* + * Check if the error is a "real" error that we should return. + */ +static inline int err_block_err(int ret) +{ + if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN) + return 1; + + return 0; +} + +static ssize_t +bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) +{ + struct bsg_device *bd = file->private_data; + int ret; + ssize_t bytes_read; + + dprintk("%s: read %Zd bytes\n", bd->name, count); + + bsg_set_block(bd, file); + + bytes_read = 0; + ret = __bsg_read(buf, count, bd, NULL, &bytes_read); + *ppos = bytes_read; + + if (!bytes_read || err_block_err(ret)) + bytes_read = ret; + + return bytes_read; +} + +static int __bsg_write(struct bsg_device *bd, const char __user *buf, + size_t count, ssize_t *bytes_written, + fmode_t has_write_perm) +{ + struct bsg_command *bc; + struct request *rq; + int ret, nr_commands; + + if (count % sizeof(struct sg_io_v4)) + return -EINVAL; + + nr_commands = count / sizeof(struct sg_io_v4); + rq = NULL; + bc = NULL; + ret = 0; + while (nr_commands) { + struct request_queue *q = bd->queue; + + bc = bsg_alloc_command(bd); + if (IS_ERR(bc)) { + ret = PTR_ERR(bc); + bc = NULL; + break; + } + + if (copy_from_user(&bc->hdr, buf, sizeof(bc->hdr))) { + ret = -EFAULT; + break; + } + + /* + * get a request, fill in the blanks, and add to request queue + */ + rq = bsg_map_hdr(bd, &bc->hdr, has_write_perm, bc->sense); + if (IS_ERR(rq)) { + ret = PTR_ERR(rq); + rq = NULL; + break; + } + + bsg_add_command(bd, q, bc, rq); + bc = NULL; + rq = NULL; + nr_commands--; + buf += sizeof(struct sg_io_v4); + *bytes_written += sizeof(struct sg_io_v4); + } + + if (bc) + bsg_free_command(bc); + + return ret; +} + +static ssize_t +bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) +{ + struct bsg_device *bd = file->private_data; + ssize_t bytes_written; + int ret; + + dprintk("%s: write %Zd bytes\n", bd->name, count); + + bsg_set_block(bd, file); + + bytes_written = 0; + ret = __bsg_write(bd, buf, count, &bytes_written, + file->f_mode & FMODE_WRITE); + + *ppos = bytes_written; + + /* + * return bytes written on non-fatal errors + */ + if (!bytes_written || err_block_err(ret)) + bytes_written = ret; + + dprintk("%s: returning %Zd\n", bd->name, bytes_written); + return bytes_written; +} + +static struct bsg_device *bsg_alloc_device(void) +{ + struct bsg_device *bd; + + bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL); + if (unlikely(!bd)) + return NULL; + + spin_lock_init(&bd->lock); + + bd->max_queue = BSG_DEFAULT_CMDS; + + INIT_LIST_HEAD(&bd->busy_list); + INIT_LIST_HEAD(&bd->done_list); + INIT_HLIST_NODE(&bd->dev_list); + + init_waitqueue_head(&bd->wq_free); + init_waitqueue_head(&bd->wq_done); + return bd; +} + +static void bsg_kref_release_function(struct kref *kref) +{ + struct bsg_class_device *bcd = + container_of(kref, struct bsg_class_device, ref); + struct device *parent = bcd->parent; + + if (bcd->release) + bcd->release(bcd->parent); + + put_device(parent); +} + +static int bsg_put_device(struct bsg_device *bd) +{ + int ret = 0, do_free; + struct request_queue *q = bd->queue; + + mutex_lock(&bsg_mutex); + + do_free = atomic_dec_and_test(&bd->ref_count); + if (!do_free) { + mutex_unlock(&bsg_mutex); + goto out; + } + + hlist_del(&bd->dev_list); + mutex_unlock(&bsg_mutex); + + dprintk("%s: tearing down\n", bd->name); + + /* + * close can always block + */ + set_bit(BSG_F_BLOCK, &bd->flags); + + /* + * correct error detection baddies here again. it's the responsibility + * of the app to properly reap commands before close() if it wants + * fool-proof error detection + */ + ret = bsg_complete_all_commands(bd); + + kfree(bd); +out: + kref_put(&q->bsg_dev.ref, bsg_kref_release_function); + if (do_free) + blk_put_queue(q); + return ret; +} + +static struct bsg_device *bsg_add_device(struct inode *inode, + struct request_queue *rq, + struct file *file) +{ + struct bsg_device *bd; +#ifdef BSG_DEBUG + unsigned char buf[32]; +#endif + if (!blk_get_queue(rq)) + return ERR_PTR(-ENXIO); + + bd = bsg_alloc_device(); + if (!bd) { + blk_put_queue(rq); + return ERR_PTR(-ENOMEM); + } + + bd->queue = rq; + + bsg_set_block(bd, file); + + atomic_set(&bd->ref_count, 1); + mutex_lock(&bsg_mutex); + hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode))); + + strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1); + dprintk("bound to <%s>, max queue %d\n", + format_dev_t(buf, inode->i_rdev), bd->max_queue); + + mutex_unlock(&bsg_mutex); + return bd; +} + +static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q) +{ + struct bsg_device *bd; + + mutex_lock(&bsg_mutex); + + hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) { + if (bd->queue == q) { + atomic_inc(&bd->ref_count); + goto found; + } + } + bd = NULL; +found: + mutex_unlock(&bsg_mutex); + return bd; +} + +static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file) +{ + struct bsg_device *bd; + struct bsg_class_device *bcd; + + /* + * find the class device + */ + mutex_lock(&bsg_mutex); + bcd = idr_find(&bsg_minor_idr, iminor(inode)); + if (bcd) + kref_get(&bcd->ref); + mutex_unlock(&bsg_mutex); + + if (!bcd) + return ERR_PTR(-ENODEV); + + bd = __bsg_get_device(iminor(inode), bcd->queue); + if (bd) + return bd; + + bd = bsg_add_device(inode, bcd->queue, file); + if (IS_ERR(bd)) + kref_put(&bcd->ref, bsg_kref_release_function); + + return bd; +} + +static int bsg_open(struct inode *inode, struct file *file) +{ + struct bsg_device *bd; + + bd = bsg_get_device(inode, file); + + if (IS_ERR(bd)) + return PTR_ERR(bd); + + file->private_data = bd; + return 0; +} + +static int bsg_release(struct inode *inode, struct file *file) +{ + struct bsg_device *bd = file->private_data; + + file->private_data = NULL; + return bsg_put_device(bd); +} + +static unsigned int bsg_poll(struct file *file, poll_table *wait) +{ + struct bsg_device *bd = file->private_data; + unsigned int mask = 0; + + poll_wait(file, &bd->wq_done, wait); + poll_wait(file, &bd->wq_free, wait); + + spin_lock_irq(&bd->lock); + if (!list_empty(&bd->done_list)) + mask |= POLLIN | POLLRDNORM; + if (bd->queued_cmds < bd->max_queue) + mask |= POLLOUT; + spin_unlock_irq(&bd->lock); + + return mask; +} + +static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct bsg_device *bd = file->private_data; + int __user *uarg = (int __user *) arg; + int ret; + + switch (cmd) { + /* + * our own ioctls + */ + case SG_GET_COMMAND_Q: + return put_user(bd->max_queue, uarg); + case SG_SET_COMMAND_Q: { + int queue; + + if (get_user(queue, uarg)) + return -EFAULT; + if (queue < 1) + return -EINVAL; + + spin_lock_irq(&bd->lock); + bd->max_queue = queue; + spin_unlock_irq(&bd->lock); + return 0; + } + + /* + * SCSI/sg ioctls + */ + case SG_GET_VERSION_NUM: + case SCSI_IOCTL_GET_IDLUN: + case SCSI_IOCTL_GET_BUS_NUMBER: + case SG_SET_TIMEOUT: + case SG_GET_TIMEOUT: + case SG_GET_RESERVED_SIZE: + case SG_SET_RESERVED_SIZE: + case SG_EMULATED_HOST: + case SCSI_IOCTL_SEND_COMMAND: { + void __user *uarg = (void __user *) arg; + return scsi_cmd_ioctl(bd->queue, NULL, file->f_mode, cmd, uarg); + } + case SG_IO: { + struct request *rq; + struct bio *bio, *bidi_bio = NULL; + struct sg_io_v4 hdr; + int at_head; + u8 sense[SCSI_SENSE_BUFFERSIZE]; + + if (copy_from_user(&hdr, uarg, sizeof(hdr))) + return -EFAULT; + + rq = bsg_map_hdr(bd, &hdr, file->f_mode & FMODE_WRITE, sense); + if (IS_ERR(rq)) + return PTR_ERR(rq); + + bio = rq->bio; + if (rq->next_rq) + bidi_bio = rq->next_rq->bio; + + at_head = (0 == (hdr.flags & BSG_FLAG_Q_AT_TAIL)); + blk_execute_rq(bd->queue, NULL, rq, at_head); + ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio); + + if (copy_to_user(uarg, &hdr, sizeof(hdr))) + return -EFAULT; + + return ret; + } + /* + * block device ioctls + */ + default: +#if 0 + return ioctl_by_bdev(bd->bdev, cmd, arg); +#else + return -ENOTTY; +#endif + } +} + +static const struct file_operations bsg_fops = { + .read = bsg_read, + .write = bsg_write, + .poll = bsg_poll, + .open = bsg_open, + .release = bsg_release, + .unlocked_ioctl = bsg_ioctl, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +void bsg_unregister_queue(struct request_queue *q) +{ + struct bsg_class_device *bcd = &q->bsg_dev; + + if (!bcd->class_dev) + return; + + mutex_lock(&bsg_mutex); + idr_remove(&bsg_minor_idr, bcd->minor); + if (q->kobj.sd) + sysfs_remove_link(&q->kobj, "bsg"); + device_unregister(bcd->class_dev); + bcd->class_dev = NULL; + kref_put(&bcd->ref, bsg_kref_release_function); + mutex_unlock(&bsg_mutex); +} +EXPORT_SYMBOL_GPL(bsg_unregister_queue); + +int bsg_register_queue(struct request_queue *q, struct device *parent, + const char *name, void (*release)(struct device *)) +{ + struct bsg_class_device *bcd; + dev_t dev; + int ret; + struct device *class_dev = NULL; + const char *devname; + + if (name) + devname = name; + else + devname = dev_name(parent); + + /* + * we need a proper transport to send commands, not a stacked device + */ + if (!queue_is_rq_based(q)) + return 0; + + bcd = &q->bsg_dev; + memset(bcd, 0, sizeof(*bcd)); + + mutex_lock(&bsg_mutex); + + ret = idr_alloc(&bsg_minor_idr, bcd, 0, BSG_MAX_DEVS, GFP_KERNEL); + if (ret < 0) { + if (ret == -ENOSPC) { + printk(KERN_ERR "bsg: too many bsg devices\n"); + ret = -EINVAL; + } + goto unlock; + } + + bcd->minor = ret; + bcd->queue = q; + bcd->parent = get_device(parent); + bcd->release = release; + kref_init(&bcd->ref); + dev = MKDEV(bsg_major, bcd->minor); + class_dev = device_create(bsg_class, parent, dev, NULL, "%s", devname); + if (IS_ERR(class_dev)) { + ret = PTR_ERR(class_dev); + goto put_dev; + } + bcd->class_dev = class_dev; + + if (q->kobj.sd) { + ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg"); + if (ret) + goto unregister_class_dev; + } + + mutex_unlock(&bsg_mutex); + return 0; + +unregister_class_dev: + device_unregister(class_dev); +put_dev: + put_device(parent); + idr_remove(&bsg_minor_idr, bcd->minor); +unlock: + mutex_unlock(&bsg_mutex); + return ret; +} +EXPORT_SYMBOL_GPL(bsg_register_queue); + +static struct cdev bsg_cdev; + +static char *bsg_devnode(struct device *dev, umode_t *mode) +{ + return kasprintf(GFP_KERNEL, "bsg/%s", dev_name(dev)); +} + +static int __init bsg_init(void) +{ + int ret, i; + dev_t devid; + + bsg_cmd_cachep = kmem_cache_create("bsg_cmd", + sizeof(struct bsg_command), 0, 0, NULL); + if (!bsg_cmd_cachep) { + printk(KERN_ERR "bsg: failed creating slab cache\n"); + return -ENOMEM; + } + + for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++) + INIT_HLIST_HEAD(&bsg_device_list[i]); + + bsg_class = class_create(THIS_MODULE, "bsg"); + if (IS_ERR(bsg_class)) { + ret = PTR_ERR(bsg_class); + goto destroy_kmemcache; + } + bsg_class->devnode = bsg_devnode; + + ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg"); + if (ret) + goto destroy_bsg_class; + + bsg_major = MAJOR(devid); + + cdev_init(&bsg_cdev, &bsg_fops); + ret = cdev_add(&bsg_cdev, MKDEV(bsg_major, 0), BSG_MAX_DEVS); + if (ret) + goto unregister_chrdev; + + printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION + " loaded (major %d)\n", bsg_major); + return 0; +unregister_chrdev: + unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS); +destroy_bsg_class: + class_destroy(bsg_class); +destroy_kmemcache: + kmem_cache_destroy(bsg_cmd_cachep); + return ret; +} + +MODULE_AUTHOR("Jens Axboe"); +MODULE_DESCRIPTION(BSG_DESCRIPTION); +MODULE_LICENSE("GPL"); + +device_initcall(bsg_init); diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c new file mode 100644 index 000000000..5da8e6e9a --- /dev/null +++ b/block/cfq-iosched.c @@ -0,0 +1,4671 @@ +/* + * CFQ, or complete fairness queueing, disk scheduler. + * + * Based on ideas from a previously unfinished io + * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli. + * + * Copyright (C) 2003 Jens Axboe + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include "blk.h" +#include "blk-cgroup.h" + +/* + * tunables + */ +/* max queue in one round of service */ +static const int cfq_quantum = 8; +static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 }; +/* maximum backwards seek, in KiB */ +static const int cfq_back_max = 16 * 1024; +/* penalty of a backwards seek */ +static const int cfq_back_penalty = 2; +static const int cfq_slice_sync = HZ / 10; +static int cfq_slice_async = HZ / 25; +static const int cfq_slice_async_rq = 2; +static int cfq_slice_idle = HZ / 125; +static int cfq_group_idle = HZ / 125; +static const int cfq_target_latency = HZ * 3/10; /* 300 ms */ +static const int cfq_hist_divisor = 4; + +/* + * offset from end of service tree + */ +#define CFQ_IDLE_DELAY (HZ / 5) + +/* + * below this threshold, we consider thinktime immediate + */ +#define CFQ_MIN_TT (2) + +#define CFQ_SLICE_SCALE (5) +#define CFQ_HW_QUEUE_MIN (5) +#define CFQ_SERVICE_SHIFT 12 + +#define CFQQ_SEEK_THR (sector_t)(8 * 100) +#define CFQQ_CLOSE_THR (sector_t)(8 * 1024) +#define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32) +#define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8) + +#define RQ_CIC(rq) icq_to_cic((rq)->elv.icq) +#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elv.priv[0]) +#define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elv.priv[1]) + +static struct kmem_cache *cfq_pool; + +#define CFQ_PRIO_LISTS IOPRIO_BE_NR +#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE) +#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT) + +#define sample_valid(samples) ((samples) > 80) +#define rb_entry_cfqg(node) rb_entry((node), struct cfq_group, rb_node) + +struct cfq_ttime { + unsigned long last_end_request; + + unsigned long ttime_total; + unsigned long ttime_samples; + unsigned long ttime_mean; +}; + +/* + * Most of our rbtree usage is for sorting with min extraction, so + * if we cache the leftmost node we don't have to walk down the tree + * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should + * move this into the elevator for the rq sorting as well. + */ +struct cfq_rb_root { + struct rb_root rb; + struct rb_node *left; + unsigned count; + u64 min_vdisktime; + struct cfq_ttime ttime; +}; +#define CFQ_RB_ROOT (struct cfq_rb_root) { .rb = RB_ROOT, \ + .ttime = {.last_end_request = jiffies,},} + +/* + * Per process-grouping structure + */ +struct cfq_queue { + /* reference count */ + int ref; + /* various state flags, see below */ + unsigned int flags; + /* parent cfq_data */ + struct cfq_data *cfqd; + /* service_tree member */ + struct rb_node rb_node; + /* service_tree key */ + unsigned long rb_key; + /* prio tree member */ + struct rb_node p_node; + /* prio tree root we belong to, if any */ + struct rb_root *p_root; + /* sorted list of pending requests */ + struct rb_root sort_list; + /* if fifo isn't expired, next request to serve */ + struct request *next_rq; + /* requests queued in sort_list */ + int queued[2]; + /* currently allocated requests */ + int allocated[2]; + /* fifo list of requests in sort_list */ + struct list_head fifo; + + /* time when queue got scheduled in to dispatch first request. */ + unsigned long dispatch_start; + unsigned int allocated_slice; + unsigned int slice_dispatch; + /* time when first request from queue completed and slice started. */ + unsigned long slice_start; + unsigned long slice_end; + long slice_resid; + + /* pending priority requests */ + int prio_pending; + /* number of requests that are on the dispatch list or inside driver */ + int dispatched; + + /* io prio of this group */ + unsigned short ioprio, org_ioprio; + unsigned short ioprio_class; + + pid_t pid; + + u32 seek_history; + sector_t last_request_pos; + + struct cfq_rb_root *service_tree; + struct cfq_queue *new_cfqq; + struct cfq_group *cfqg; + /* Number of sectors dispatched from queue in single dispatch round */ + unsigned long nr_sectors; +}; + +/* + * First index in the service_trees. + * IDLE is handled separately, so it has negative index + */ +enum wl_class_t { + BE_WORKLOAD = 0, + RT_WORKLOAD = 1, + IDLE_WORKLOAD = 2, + CFQ_PRIO_NR, +}; + +/* + * Second index in the service_trees. + */ +enum wl_type_t { + ASYNC_WORKLOAD = 0, + SYNC_NOIDLE_WORKLOAD = 1, + SYNC_WORKLOAD = 2 +}; + +struct cfqg_stats { +#ifdef CONFIG_CFQ_GROUP_IOSCHED + /* total bytes transferred */ + struct blkg_rwstat service_bytes; + /* total IOs serviced, post merge */ + struct blkg_rwstat serviced; + /* number of ios merged */ + struct blkg_rwstat merged; + /* total time spent on device in ns, may not be accurate w/ queueing */ + struct blkg_rwstat service_time; + /* total time spent waiting in scheduler queue in ns */ + struct blkg_rwstat wait_time; + /* number of IOs queued up */ + struct blkg_rwstat queued; + /* total sectors transferred */ + struct blkg_stat sectors; + /* total disk time and nr sectors dispatched by this group */ + struct blkg_stat time; +#ifdef CONFIG_DEBUG_BLK_CGROUP + /* time not charged to this cgroup */ + struct blkg_stat unaccounted_time; + /* sum of number of ios queued across all samples */ + struct blkg_stat avg_queue_size_sum; + /* count of samples taken for average */ + struct blkg_stat avg_queue_size_samples; + /* how many times this group has been removed from service tree */ + struct blkg_stat dequeue; + /* total time spent waiting for it to be assigned a timeslice. */ + struct blkg_stat group_wait_time; + /* time spent idling for this blkcg_gq */ + struct blkg_stat idle_time; + /* total time with empty current active q with other requests queued */ + struct blkg_stat empty_time; + /* fields after this shouldn't be cleared on stat reset */ + uint64_t start_group_wait_time; + uint64_t start_idle_time; + uint64_t start_empty_time; + uint16_t flags; +#endif /* CONFIG_DEBUG_BLK_CGROUP */ +#endif /* CONFIG_CFQ_GROUP_IOSCHED */ +}; + +/* This is per cgroup per device grouping structure */ +struct cfq_group { + /* must be the first member */ + struct blkg_policy_data pd; + + /* group service_tree member */ + struct rb_node rb_node; + + /* group service_tree key */ + u64 vdisktime; + + /* + * The number of active cfqgs and sum of their weights under this + * cfqg. This covers this cfqg's leaf_weight and all children's + * weights, but does not cover weights of further descendants. + * + * If a cfqg is on the service tree, it's active. An active cfqg + * also activates its parent and contributes to the children_weight + * of the parent. + */ + int nr_active; + unsigned int children_weight; + + /* + * vfraction is the fraction of vdisktime that the tasks in this + * cfqg are entitled to. This is determined by compounding the + * ratios walking up from this cfqg to the root. + * + * It is in fixed point w/ CFQ_SERVICE_SHIFT and the sum of all + * vfractions on a service tree is approximately 1. The sum may + * deviate a bit due to rounding errors and fluctuations caused by + * cfqgs entering and leaving the service tree. + */ + unsigned int vfraction; + + /* + * There are two weights - (internal) weight is the weight of this + * cfqg against the sibling cfqgs. leaf_weight is the wight of + * this cfqg against the child cfqgs. For the root cfqg, both + * weights are kept in sync for backward compatibility. + */ + unsigned int weight; + unsigned int new_weight; + unsigned int dev_weight; + + unsigned int leaf_weight; + unsigned int new_leaf_weight; + unsigned int dev_leaf_weight; + + /* number of cfqq currently on this group */ + int nr_cfqq; + + /* + * Per group busy queues average. Useful for workload slice calc. We + * create the array for each prio class but at run time it is used + * only for RT and BE class and slot for IDLE class remains unused. + * This is primarily done to avoid confusion and a gcc warning. + */ + unsigned int busy_queues_avg[CFQ_PRIO_NR]; + /* + * rr lists of queues with requests. We maintain service trees for + * RT and BE classes. These trees are subdivided in subclasses + * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE + * class there is no subclassification and all the cfq queues go on + * a single tree service_tree_idle. + * Counts are embedded in the cfq_rb_root + */ + struct cfq_rb_root service_trees[2][3]; + struct cfq_rb_root service_tree_idle; + + unsigned long saved_wl_slice; + enum wl_type_t saved_wl_type; + enum wl_class_t saved_wl_class; + + /* number of requests that are on the dispatch list or inside driver */ + int dispatched; + struct cfq_ttime ttime; + struct cfqg_stats stats; /* stats for this cfqg */ + struct cfqg_stats dead_stats; /* stats pushed from dead children */ +}; + +struct cfq_io_cq { + struct io_cq icq; /* must be the first member */ + struct cfq_queue *cfqq[2]; + struct cfq_ttime ttime; + int ioprio; /* the current ioprio */ +#ifdef CONFIG_CFQ_GROUP_IOSCHED + uint64_t blkcg_serial_nr; /* the current blkcg serial */ +#endif +}; + +/* + * Per block device queue structure + */ +struct cfq_data { + struct request_queue *queue; + /* Root service tree for cfq_groups */ + struct cfq_rb_root grp_service_tree; + struct cfq_group *root_group; + + /* + * The priority currently being served + */ + enum wl_class_t serving_wl_class; + enum wl_type_t serving_wl_type; + unsigned long workload_expires; + struct cfq_group *serving_group; + + /* + * Each priority tree is sorted by next_request position. These + * trees are used when determining if two or more queues are + * interleaving requests (see cfq_close_cooperator). + */ + struct rb_root prio_trees[CFQ_PRIO_LISTS]; + + unsigned int busy_queues; + unsigned int busy_sync_queues; + + int rq_in_driver; + int rq_in_flight[2]; + + /* + * queue-depth detection + */ + int rq_queued; + int hw_tag; + /* + * hw_tag can be + * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection) + * 1 => NCQ is present (hw_tag_est_depth is the estimated max depth) + * 0 => no NCQ + */ + int hw_tag_est_depth; + unsigned int hw_tag_samples; + + /* + * idle window management + */ + struct timer_list idle_slice_timer; + struct work_struct unplug_work; + + struct cfq_queue *active_queue; + struct cfq_io_cq *active_cic; + + /* + * async queue for each priority case + */ + struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR]; + struct cfq_queue *async_idle_cfqq; + + sector_t last_position; + + /* + * tunables, see top of file + */ + unsigned int cfq_quantum; + unsigned int cfq_fifo_expire[2]; + unsigned int cfq_back_penalty; + unsigned int cfq_back_max; + unsigned int cfq_slice[2]; + unsigned int cfq_slice_async_rq; + unsigned int cfq_slice_idle; + unsigned int cfq_group_idle; + unsigned int cfq_latency; + unsigned int cfq_target_latency; + + /* + * Fallback dummy cfqq for extreme OOM conditions + */ + struct cfq_queue oom_cfqq; + + unsigned long last_delayed_sync; +}; + +static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd); + +static struct cfq_rb_root *st_for(struct cfq_group *cfqg, + enum wl_class_t class, + enum wl_type_t type) +{ + if (!cfqg) + return NULL; + + if (class == IDLE_WORKLOAD) + return &cfqg->service_tree_idle; + + return &cfqg->service_trees[class][type]; +} + +enum cfqq_state_flags { + CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */ + CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */ + CFQ_CFQQ_FLAG_must_dispatch, /* must be allowed a dispatch */ + CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */ + CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */ + CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */ + CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */ + CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */ + CFQ_CFQQ_FLAG_sync, /* synchronous queue */ + CFQ_CFQQ_FLAG_coop, /* cfqq is shared */ + CFQ_CFQQ_FLAG_split_coop, /* shared cfqq will be splitted */ + CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */ + CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */ +}; + +#define CFQ_CFQQ_FNS(name) \ +static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \ +{ \ + (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \ +} \ +static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \ +{ \ + (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \ +} \ +static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \ +{ \ + return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \ +} + +CFQ_CFQQ_FNS(on_rr); +CFQ_CFQQ_FNS(wait_request); +CFQ_CFQQ_FNS(must_dispatch); +CFQ_CFQQ_FNS(must_alloc_slice); +CFQ_CFQQ_FNS(fifo_expire); +CFQ_CFQQ_FNS(idle_window); +CFQ_CFQQ_FNS(prio_changed); +CFQ_CFQQ_FNS(slice_new); +CFQ_CFQQ_FNS(sync); +CFQ_CFQQ_FNS(coop); +CFQ_CFQQ_FNS(split_coop); +CFQ_CFQQ_FNS(deep); +CFQ_CFQQ_FNS(wait_busy); +#undef CFQ_CFQQ_FNS + +static inline struct cfq_group *pd_to_cfqg(struct blkg_policy_data *pd) +{ + return pd ? container_of(pd, struct cfq_group, pd) : NULL; +} + +static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg) +{ + return pd_to_blkg(&cfqg->pd); +} + +#if defined(CONFIG_CFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP) + +/* cfqg stats flags */ +enum cfqg_stats_flags { + CFQG_stats_waiting = 0, + CFQG_stats_idling, + CFQG_stats_empty, +}; + +#define CFQG_FLAG_FNS(name) \ +static inline void cfqg_stats_mark_##name(struct cfqg_stats *stats) \ +{ \ + stats->flags |= (1 << CFQG_stats_##name); \ +} \ +static inline void cfqg_stats_clear_##name(struct cfqg_stats *stats) \ +{ \ + stats->flags &= ~(1 << CFQG_stats_##name); \ +} \ +static inline int cfqg_stats_##name(struct cfqg_stats *stats) \ +{ \ + return (stats->flags & (1 << CFQG_stats_##name)) != 0; \ +} \ + +CFQG_FLAG_FNS(waiting) +CFQG_FLAG_FNS(idling) +CFQG_FLAG_FNS(empty) +#undef CFQG_FLAG_FNS + +/* This should be called with the queue_lock held. */ +static void cfqg_stats_update_group_wait_time(struct cfqg_stats *stats) +{ + unsigned long long now; + + if (!cfqg_stats_waiting(stats)) + return; + + now = sched_clock(); + if (time_after64(now, stats->start_group_wait_time)) + blkg_stat_add(&stats->group_wait_time, + now - stats->start_group_wait_time); + cfqg_stats_clear_waiting(stats); +} + +/* This should be called with the queue_lock held. */ +static void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, + struct cfq_group *curr_cfqg) +{ + struct cfqg_stats *stats = &cfqg->stats; + + if (cfqg_stats_waiting(stats)) + return; + if (cfqg == curr_cfqg) + return; + stats->start_group_wait_time = sched_clock(); + cfqg_stats_mark_waiting(stats); +} + +/* This should be called with the queue_lock held. */ +static void cfqg_stats_end_empty_time(struct cfqg_stats *stats) +{ + unsigned long long now; + + if (!cfqg_stats_empty(stats)) + return; + + now = sched_clock(); + if (time_after64(now, stats->start_empty_time)) + blkg_stat_add(&stats->empty_time, + now - stats->start_empty_time); + cfqg_stats_clear_empty(stats); +} + +static void cfqg_stats_update_dequeue(struct cfq_group *cfqg) +{ + blkg_stat_add(&cfqg->stats.dequeue, 1); +} + +static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) +{ + struct cfqg_stats *stats = &cfqg->stats; + + if (blkg_rwstat_total(&stats->queued)) + return; + + /* + * group is already marked empty. This can happen if cfqq got new + * request in parent group and moved to this group while being added + * to service tree. Just ignore the event and move on. + */ + if (cfqg_stats_empty(stats)) + return; + + stats->start_empty_time = sched_clock(); + cfqg_stats_mark_empty(stats); +} + +static void cfqg_stats_update_idle_time(struct cfq_group *cfqg) +{ + struct cfqg_stats *stats = &cfqg->stats; + + if (cfqg_stats_idling(stats)) { + unsigned long long now = sched_clock(); + + if (time_after64(now, stats->start_idle_time)) + blkg_stat_add(&stats->idle_time, + now - stats->start_idle_time); + cfqg_stats_clear_idling(stats); + } +} + +static void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) +{ + struct cfqg_stats *stats = &cfqg->stats; + + BUG_ON(cfqg_stats_idling(stats)); + + stats->start_idle_time = sched_clock(); + cfqg_stats_mark_idling(stats); +} + +static void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) +{ + struct cfqg_stats *stats = &cfqg->stats; + + blkg_stat_add(&stats->avg_queue_size_sum, + blkg_rwstat_total(&stats->queued)); + blkg_stat_add(&stats->avg_queue_size_samples, 1); + cfqg_stats_update_group_wait_time(stats); +} + +#else /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */ + +static inline void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, struct cfq_group *curr_cfqg) { } +static inline void cfqg_stats_end_empty_time(struct cfqg_stats *stats) { } +static inline void cfqg_stats_update_dequeue(struct cfq_group *cfqg) { } +static inline void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) { } +static inline void cfqg_stats_update_idle_time(struct cfq_group *cfqg) { } +static inline void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) { } +static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { } + +#endif /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */ + +#ifdef CONFIG_CFQ_GROUP_IOSCHED + +static struct blkcg_policy blkcg_policy_cfq; + +static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg) +{ + return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq)); +} + +static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg) +{ + struct blkcg_gq *pblkg = cfqg_to_blkg(cfqg)->parent; + + return pblkg ? blkg_to_cfqg(pblkg) : NULL; +} + +static inline void cfqg_get(struct cfq_group *cfqg) +{ + return blkg_get(cfqg_to_blkg(cfqg)); +} + +static inline void cfqg_put(struct cfq_group *cfqg) +{ + return blkg_put(cfqg_to_blkg(cfqg)); +} + +#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) do { \ + char __pbuf[128]; \ + \ + blkg_path(cfqg_to_blkg((cfqq)->cfqg), __pbuf, sizeof(__pbuf)); \ + blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c %s " fmt, (cfqq)->pid, \ + cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \ + cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\ + __pbuf, ##args); \ +} while (0) + +#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do { \ + char __pbuf[128]; \ + \ + blkg_path(cfqg_to_blkg(cfqg), __pbuf, sizeof(__pbuf)); \ + blk_add_trace_msg((cfqd)->queue, "%s " fmt, __pbuf, ##args); \ +} while (0) + +static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg, + struct cfq_group *curr_cfqg, int rw) +{ + blkg_rwstat_add(&cfqg->stats.queued, rw, 1); + cfqg_stats_end_empty_time(&cfqg->stats); + cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg); +} + +static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg, + unsigned long time, unsigned long unaccounted_time) +{ + blkg_stat_add(&cfqg->stats.time, time); +#ifdef CONFIG_DEBUG_BLK_CGROUP + blkg_stat_add(&cfqg->stats.unaccounted_time, unaccounted_time); +#endif +} + +static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw) +{ + blkg_rwstat_add(&cfqg->stats.queued, rw, -1); +} + +static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw) +{ + blkg_rwstat_add(&cfqg->stats.merged, rw, 1); +} + +static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg, + uint64_t bytes, int rw) +{ + blkg_stat_add(&cfqg->stats.sectors, bytes >> 9); + blkg_rwstat_add(&cfqg->stats.serviced, rw, 1); + blkg_rwstat_add(&cfqg->stats.service_bytes, rw, bytes); +} + +static inline void cfqg_stats_update_completion(struct cfq_group *cfqg, + uint64_t start_time, uint64_t io_start_time, int rw) +{ + struct cfqg_stats *stats = &cfqg->stats; + unsigned long long now = sched_clock(); + + if (time_after64(now, io_start_time)) + blkg_rwstat_add(&stats->service_time, rw, now - io_start_time); + if (time_after64(io_start_time, start_time)) + blkg_rwstat_add(&stats->wait_time, rw, + io_start_time - start_time); +} + +/* @stats = 0 */ +static void cfqg_stats_reset(struct cfqg_stats *stats) +{ + /* queued stats shouldn't be cleared */ + blkg_rwstat_reset(&stats->service_bytes); + blkg_rwstat_reset(&stats->serviced); + blkg_rwstat_reset(&stats->merged); + blkg_rwstat_reset(&stats->service_time); + blkg_rwstat_reset(&stats->wait_time); + blkg_stat_reset(&stats->time); +#ifdef CONFIG_DEBUG_BLK_CGROUP + blkg_stat_reset(&stats->unaccounted_time); + blkg_stat_reset(&stats->avg_queue_size_sum); + blkg_stat_reset(&stats->avg_queue_size_samples); + blkg_stat_reset(&stats->dequeue); + blkg_stat_reset(&stats->group_wait_time); + blkg_stat_reset(&stats->idle_time); + blkg_stat_reset(&stats->empty_time); +#endif +} + +/* @to += @from */ +static void cfqg_stats_merge(struct cfqg_stats *to, struct cfqg_stats *from) +{ + /* queued stats shouldn't be cleared */ + blkg_rwstat_merge(&to->service_bytes, &from->service_bytes); + blkg_rwstat_merge(&to->serviced, &from->serviced); + blkg_rwstat_merge(&to->merged, &from->merged); + blkg_rwstat_merge(&to->service_time, &from->service_time); + blkg_rwstat_merge(&to->wait_time, &from->wait_time); + blkg_stat_merge(&from->time, &from->time); +#ifdef CONFIG_DEBUG_BLK_CGROUP + blkg_stat_merge(&to->unaccounted_time, &from->unaccounted_time); + blkg_stat_merge(&to->avg_queue_size_sum, &from->avg_queue_size_sum); + blkg_stat_merge(&to->avg_queue_size_samples, &from->avg_queue_size_samples); + blkg_stat_merge(&to->dequeue, &from->dequeue); + blkg_stat_merge(&to->group_wait_time, &from->group_wait_time); + blkg_stat_merge(&to->idle_time, &from->idle_time); + blkg_stat_merge(&to->empty_time, &from->empty_time); +#endif +} + +/* + * Transfer @cfqg's stats to its parent's dead_stats so that the ancestors' + * recursive stats can still account for the amount used by this cfqg after + * it's gone. + */ +static void cfqg_stats_xfer_dead(struct cfq_group *cfqg) +{ + struct cfq_group *parent = cfqg_parent(cfqg); + + lockdep_assert_held(cfqg_to_blkg(cfqg)->q->queue_lock); + + if (unlikely(!parent)) + return; + + cfqg_stats_merge(&parent->dead_stats, &cfqg->stats); + cfqg_stats_merge(&parent->dead_stats, &cfqg->dead_stats); + cfqg_stats_reset(&cfqg->stats); + cfqg_stats_reset(&cfqg->dead_stats); +} + +#else /* CONFIG_CFQ_GROUP_IOSCHED */ + +static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg) { return NULL; } +static inline void cfqg_get(struct cfq_group *cfqg) { } +static inline void cfqg_put(struct cfq_group *cfqg) { } + +#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \ + blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c " fmt, (cfqq)->pid, \ + cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \ + cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\ + ##args) +#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0) + +static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg, + struct cfq_group *curr_cfqg, int rw) { } +static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg, + unsigned long time, unsigned long unaccounted_time) { } +static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw) { } +static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw) { } +static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg, + uint64_t bytes, int rw) { } +static inline void cfqg_stats_update_completion(struct cfq_group *cfqg, + uint64_t start_time, uint64_t io_start_time, int rw) { } + +#endif /* CONFIG_CFQ_GROUP_IOSCHED */ + +#define cfq_log(cfqd, fmt, args...) \ + blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args) + +/* Traverses through cfq group service trees */ +#define for_each_cfqg_st(cfqg, i, j, st) \ + for (i = 0; i <= IDLE_WORKLOAD; i++) \ + for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\ + : &cfqg->service_tree_idle; \ + (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \ + (i == IDLE_WORKLOAD && j == 0); \ + j++, st = i < IDLE_WORKLOAD ? \ + &cfqg->service_trees[i][j]: NULL) \ + +static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd, + struct cfq_ttime *ttime, bool group_idle) +{ + unsigned long slice; + if (!sample_valid(ttime->ttime_samples)) + return false; + if (group_idle) + slice = cfqd->cfq_group_idle; + else + slice = cfqd->cfq_slice_idle; + return ttime->ttime_mean > slice; +} + +static inline bool iops_mode(struct cfq_data *cfqd) +{ + /* + * If we are not idling on queues and it is a NCQ drive, parallel + * execution of requests is on and measuring time is not possible + * in most of the cases until and unless we drive shallower queue + * depths and that becomes a performance bottleneck. In such cases + * switch to start providing fairness in terms of number of IOs. + */ + if (!cfqd->cfq_slice_idle && cfqd->hw_tag) + return true; + else + return false; +} + +static inline enum wl_class_t cfqq_class(struct cfq_queue *cfqq) +{ + if (cfq_class_idle(cfqq)) + return IDLE_WORKLOAD; + if (cfq_class_rt(cfqq)) + return RT_WORKLOAD; + return BE_WORKLOAD; +} + + +static enum wl_type_t cfqq_type(struct cfq_queue *cfqq) +{ + if (!cfq_cfqq_sync(cfqq)) + return ASYNC_WORKLOAD; + if (!cfq_cfqq_idle_window(cfqq)) + return SYNC_NOIDLE_WORKLOAD; + return SYNC_WORKLOAD; +} + +static inline int cfq_group_busy_queues_wl(enum wl_class_t wl_class, + struct cfq_data *cfqd, + struct cfq_group *cfqg) +{ + if (wl_class == IDLE_WORKLOAD) + return cfqg->service_tree_idle.count; + + return cfqg->service_trees[wl_class][ASYNC_WORKLOAD].count + + cfqg->service_trees[wl_class][SYNC_NOIDLE_WORKLOAD].count + + cfqg->service_trees[wl_class][SYNC_WORKLOAD].count; +} + +static inline int cfqg_busy_async_queues(struct cfq_data *cfqd, + struct cfq_group *cfqg) +{ + return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count + + cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count; +} + +static void cfq_dispatch_insert(struct request_queue *, struct request *); +static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync, + struct cfq_io_cq *cic, struct bio *bio, + gfp_t gfp_mask); + +static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq) +{ + /* cic->icq is the first member, %NULL will convert to %NULL */ + return container_of(icq, struct cfq_io_cq, icq); +} + +static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd, + struct io_context *ioc) +{ + if (ioc) + return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue)); + return NULL; +} + +static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync) +{ + return cic->cfqq[is_sync]; +} + +static inline void cic_set_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq, + bool is_sync) +{ + cic->cfqq[is_sync] = cfqq; +} + +static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic) +{ + return cic->icq.q->elevator->elevator_data; +} + +/* + * We regard a request as SYNC, if it's either a read or has the SYNC bit + * set (in which case it could also be direct WRITE). + */ +static inline bool cfq_bio_sync(struct bio *bio) +{ + return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC); +} + +/* + * scheduler run of queue, if there are requests pending and no one in the + * driver that will restart queueing + */ +static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) +{ + if (cfqd->busy_queues) { + cfq_log(cfqd, "schedule dispatch"); + kblockd_schedule_work(&cfqd->unplug_work); + } +} + +/* + * Scale schedule slice based on io priority. Use the sync time slice only + * if a queue is marked sync and has sync io queued. A sync queue with async + * io only, should not get full sync slice length. + */ +static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync, + unsigned short prio) +{ + const int base_slice = cfqd->cfq_slice[sync]; + + WARN_ON(prio >= IOPRIO_BE_NR); + + return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio)); +} + +static inline int +cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio); +} + +/** + * cfqg_scale_charge - scale disk time charge according to cfqg weight + * @charge: disk time being charged + * @vfraction: vfraction of the cfqg, fixed point w/ CFQ_SERVICE_SHIFT + * + * Scale @charge according to @vfraction, which is in range (0, 1]. The + * scaling is inversely proportional. + * + * scaled = charge / vfraction + * + * The result is also in fixed point w/ CFQ_SERVICE_SHIFT. + */ +static inline u64 cfqg_scale_charge(unsigned long charge, + unsigned int vfraction) +{ + u64 c = charge << CFQ_SERVICE_SHIFT; /* make it fixed point */ + + /* charge / vfraction */ + c <<= CFQ_SERVICE_SHIFT; + do_div(c, vfraction); + return c; +} + +static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime) +{ + s64 delta = (s64)(vdisktime - min_vdisktime); + if (delta > 0) + min_vdisktime = vdisktime; + + return min_vdisktime; +} + +static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime) +{ + s64 delta = (s64)(vdisktime - min_vdisktime); + if (delta < 0) + min_vdisktime = vdisktime; + + return min_vdisktime; +} + +static void update_min_vdisktime(struct cfq_rb_root *st) +{ + struct cfq_group *cfqg; + + if (st->left) { + cfqg = rb_entry_cfqg(st->left); + st->min_vdisktime = max_vdisktime(st->min_vdisktime, + cfqg->vdisktime); + } +} + +/* + * get averaged number of queues of RT/BE priority. + * average is updated, with a formula that gives more weight to higher numbers, + * to quickly follows sudden increases and decrease slowly + */ + +static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd, + struct cfq_group *cfqg, bool rt) +{ + unsigned min_q, max_q; + unsigned mult = cfq_hist_divisor - 1; + unsigned round = cfq_hist_divisor / 2; + unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg); + + min_q = min(cfqg->busy_queues_avg[rt], busy); + max_q = max(cfqg->busy_queues_avg[rt], busy); + cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) / + cfq_hist_divisor; + return cfqg->busy_queues_avg[rt]; +} + +static inline unsigned +cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg) +{ + return cfqd->cfq_target_latency * cfqg->vfraction >> CFQ_SERVICE_SHIFT; +} + +static inline unsigned +cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + unsigned slice = cfq_prio_to_slice(cfqd, cfqq); + if (cfqd->cfq_latency) { + /* + * interested queues (we consider only the ones with the same + * priority class in the cfq group) + */ + unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg, + cfq_class_rt(cfqq)); + unsigned sync_slice = cfqd->cfq_slice[1]; + unsigned expect_latency = sync_slice * iq; + unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg); + + if (expect_latency > group_slice) { + unsigned base_low_slice = 2 * cfqd->cfq_slice_idle; + /* scale low_slice according to IO priority + * and sync vs async */ + unsigned low_slice = + min(slice, base_low_slice * slice / sync_slice); + /* the adapted slice value is scaled to fit all iqs + * into the target latency */ + slice = max(slice * group_slice / expect_latency, + low_slice); + } + } + return slice; +} + +static inline void +cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq); + + cfqq->slice_start = jiffies; + cfqq->slice_end = jiffies + slice; + cfqq->allocated_slice = slice; + cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies); +} + +/* + * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end + * isn't valid until the first request from the dispatch is activated + * and the slice time set. + */ +static inline bool cfq_slice_used(struct cfq_queue *cfqq) +{ + if (cfq_cfqq_slice_new(cfqq)) + return false; + if (time_before(jiffies, cfqq->slice_end)) + return false; + + return true; +} + +/* + * Lifted from AS - choose which of rq1 and rq2 that is best served now. + * We choose the request that is closest to the head right now. Distance + * behind the head is penalized and only allowed to a certain extent. + */ +static struct request * +cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last) +{ + sector_t s1, s2, d1 = 0, d2 = 0; + unsigned long back_max; +#define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */ +#define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */ + unsigned wrap = 0; /* bit mask: requests behind the disk head? */ + + if (rq1 == NULL || rq1 == rq2) + return rq2; + if (rq2 == NULL) + return rq1; + + if (rq_is_sync(rq1) != rq_is_sync(rq2)) + return rq_is_sync(rq1) ? rq1 : rq2; + + if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO) + return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2; + + s1 = blk_rq_pos(rq1); + s2 = blk_rq_pos(rq2); + + /* + * by definition, 1KiB is 2 sectors + */ + back_max = cfqd->cfq_back_max * 2; + + /* + * Strict one way elevator _except_ in the case where we allow + * short backward seeks which are biased as twice the cost of a + * similar forward seek. + */ + if (s1 >= last) + d1 = s1 - last; + else if (s1 + back_max >= last) + d1 = (last - s1) * cfqd->cfq_back_penalty; + else + wrap |= CFQ_RQ1_WRAP; + + if (s2 >= last) + d2 = s2 - last; + else if (s2 + back_max >= last) + d2 = (last - s2) * cfqd->cfq_back_penalty; + else + wrap |= CFQ_RQ2_WRAP; + + /* Found required data */ + + /* + * By doing switch() on the bit mask "wrap" we avoid having to + * check two variables for all permutations: --> faster! + */ + switch (wrap) { + case 0: /* common case for CFQ: rq1 and rq2 not wrapped */ + if (d1 < d2) + return rq1; + else if (d2 < d1) + return rq2; + else { + if (s1 >= s2) + return rq1; + else + return rq2; + } + + case CFQ_RQ2_WRAP: + return rq1; + case CFQ_RQ1_WRAP: + return rq2; + case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */ + default: + /* + * Since both rqs are wrapped, + * start with the one that's further behind head + * (--> only *one* back seek required), + * since back seek takes more time than forward. + */ + if (s1 <= s2) + return rq1; + else + return rq2; + } +} + +/* + * The below is leftmost cache rbtree addon + */ +static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root) +{ + /* Service tree is empty */ + if (!root->count) + return NULL; + + if (!root->left) + root->left = rb_first(&root->rb); + + if (root->left) + return rb_entry(root->left, struct cfq_queue, rb_node); + + return NULL; +} + +static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root) +{ + if (!root->left) + root->left = rb_first(&root->rb); + + if (root->left) + return rb_entry_cfqg(root->left); + + return NULL; +} + +static void rb_erase_init(struct rb_node *n, struct rb_root *root) +{ + rb_erase(n, root); + RB_CLEAR_NODE(n); +} + +static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root) +{ + if (root->left == n) + root->left = NULL; + rb_erase_init(n, &root->rb); + --root->count; +} + +/* + * would be nice to take fifo expire time into account as well + */ +static struct request * +cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq, + struct request *last) +{ + struct rb_node *rbnext = rb_next(&last->rb_node); + struct rb_node *rbprev = rb_prev(&last->rb_node); + struct request *next = NULL, *prev = NULL; + + BUG_ON(RB_EMPTY_NODE(&last->rb_node)); + + if (rbprev) + prev = rb_entry_rq(rbprev); + + if (rbnext) + next = rb_entry_rq(rbnext); + else { + rbnext = rb_first(&cfqq->sort_list); + if (rbnext && rbnext != &last->rb_node) + next = rb_entry_rq(rbnext); + } + + return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last)); +} + +static unsigned long cfq_slice_offset(struct cfq_data *cfqd, + struct cfq_queue *cfqq) +{ + /* + * just an approximation, should be ok. + */ + return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) - + cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio)); +} + +static inline s64 +cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg) +{ + return cfqg->vdisktime - st->min_vdisktime; +} + +static void +__cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg) +{ + struct rb_node **node = &st->rb.rb_node; + struct rb_node *parent = NULL; + struct cfq_group *__cfqg; + s64 key = cfqg_key(st, cfqg); + int left = 1; + + while (*node != NULL) { + parent = *node; + __cfqg = rb_entry_cfqg(parent); + + if (key < cfqg_key(st, __cfqg)) + node = &parent->rb_left; + else { + node = &parent->rb_right; + left = 0; + } + } + + if (left) + st->left = &cfqg->rb_node; + + rb_link_node(&cfqg->rb_node, parent, node); + rb_insert_color(&cfqg->rb_node, &st->rb); +} + +/* + * This has to be called only on activation of cfqg + */ +static void +cfq_update_group_weight(struct cfq_group *cfqg) +{ + if (cfqg->new_weight) { + cfqg->weight = cfqg->new_weight; + cfqg->new_weight = 0; + } +} + +static void +cfq_update_group_leaf_weight(struct cfq_group *cfqg) +{ + BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node)); + + if (cfqg->new_leaf_weight) { + cfqg->leaf_weight = cfqg->new_leaf_weight; + cfqg->new_leaf_weight = 0; + } +} + +static void +cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg) +{ + unsigned int vfr = 1 << CFQ_SERVICE_SHIFT; /* start with 1 */ + struct cfq_group *pos = cfqg; + struct cfq_group *parent; + bool propagate; + + /* add to the service tree */ + BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node)); + + /* + * Update leaf_weight. We cannot update weight at this point + * because cfqg might already have been activated and is + * contributing its current weight to the parent's child_weight. + */ + cfq_update_group_leaf_weight(cfqg); + __cfq_group_service_tree_add(st, cfqg); + + /* + * Activate @cfqg and calculate the portion of vfraction @cfqg is + * entitled to. vfraction is calculated by walking the tree + * towards the root calculating the fraction it has at each level. + * The compounded ratio is how much vfraction @cfqg owns. + * + * Start with the proportion tasks in this cfqg has against active + * children cfqgs - its leaf_weight against children_weight. + */ + propagate = !pos->nr_active++; + pos->children_weight += pos->leaf_weight; + vfr = vfr * pos->leaf_weight / pos->children_weight; + + /* + * Compound ->weight walking up the tree. Both activation and + * vfraction calculation are done in the same loop. Propagation + * stops once an already activated node is met. vfraction + * calculation should always continue to the root. + */ + while ((parent = cfqg_parent(pos))) { + if (propagate) { + cfq_update_group_weight(pos); + propagate = !parent->nr_active++; + parent->children_weight += pos->weight; + } + vfr = vfr * pos->weight / parent->children_weight; + pos = parent; + } + + cfqg->vfraction = max_t(unsigned, vfr, 1); +} + +static void +cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg) +{ + struct cfq_rb_root *st = &cfqd->grp_service_tree; + struct cfq_group *__cfqg; + struct rb_node *n; + + cfqg->nr_cfqq++; + if (!RB_EMPTY_NODE(&cfqg->rb_node)) + return; + + /* + * Currently put the group at the end. Later implement something + * so that groups get lesser vtime based on their weights, so that + * if group does not loose all if it was not continuously backlogged. + */ + n = rb_last(&st->rb); + if (n) { + __cfqg = rb_entry_cfqg(n); + cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY; + } else + cfqg->vdisktime = st->min_vdisktime; + cfq_group_service_tree_add(st, cfqg); +} + +static void +cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg) +{ + struct cfq_group *pos = cfqg; + bool propagate; + + /* + * Undo activation from cfq_group_service_tree_add(). Deactivate + * @cfqg and propagate deactivation upwards. + */ + propagate = !--pos->nr_active; + pos->children_weight -= pos->leaf_weight; + + while (propagate) { + struct cfq_group *parent = cfqg_parent(pos); + + /* @pos has 0 nr_active at this point */ + WARN_ON_ONCE(pos->children_weight); + pos->vfraction = 0; + + if (!parent) + break; + + propagate = !--parent->nr_active; + parent->children_weight -= pos->weight; + pos = parent; + } + + /* remove from the service tree */ + if (!RB_EMPTY_NODE(&cfqg->rb_node)) + cfq_rb_erase(&cfqg->rb_node, st); +} + +static void +cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg) +{ + struct cfq_rb_root *st = &cfqd->grp_service_tree; + + BUG_ON(cfqg->nr_cfqq < 1); + cfqg->nr_cfqq--; + + /* If there are other cfq queues under this group, don't delete it */ + if (cfqg->nr_cfqq) + return; + + cfq_log_cfqg(cfqd, cfqg, "del_from_rr group"); + cfq_group_service_tree_del(st, cfqg); + cfqg->saved_wl_slice = 0; + cfqg_stats_update_dequeue(cfqg); +} + +static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq, + unsigned int *unaccounted_time) +{ + unsigned int slice_used; + + /* + * Queue got expired before even a single request completed or + * got expired immediately after first request completion. + */ + if (!cfqq->slice_start || cfqq->slice_start == jiffies) { + /* + * Also charge the seek time incurred to the group, otherwise + * if there are mutiple queues in the group, each can dispatch + * a single request on seeky media and cause lots of seek time + * and group will never know it. + */ + slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start), + 1); + } else { + slice_used = jiffies - cfqq->slice_start; + if (slice_used > cfqq->allocated_slice) { + *unaccounted_time = slice_used - cfqq->allocated_slice; + slice_used = cfqq->allocated_slice; + } + if (time_after(cfqq->slice_start, cfqq->dispatch_start)) + *unaccounted_time += cfqq->slice_start - + cfqq->dispatch_start; + } + + return slice_used; +} + +static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg, + struct cfq_queue *cfqq) +{ + struct cfq_rb_root *st = &cfqd->grp_service_tree; + unsigned int used_sl, charge, unaccounted_sl = 0; + int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg) + - cfqg->service_tree_idle.count; + unsigned int vfr; + + BUG_ON(nr_sync < 0); + used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl); + + if (iops_mode(cfqd)) + charge = cfqq->slice_dispatch; + else if (!cfq_cfqq_sync(cfqq) && !nr_sync) + charge = cfqq->allocated_slice; + + /* + * Can't update vdisktime while on service tree and cfqg->vfraction + * is valid only while on it. Cache vfr, leave the service tree, + * update vdisktime and go back on. The re-addition to the tree + * will also update the weights as necessary. + */ + vfr = cfqg->vfraction; + cfq_group_service_tree_del(st, cfqg); + cfqg->vdisktime += cfqg_scale_charge(charge, vfr); + cfq_group_service_tree_add(st, cfqg); + + /* This group is being expired. Save the context */ + if (time_after(cfqd->workload_expires, jiffies)) { + cfqg->saved_wl_slice = cfqd->workload_expires + - jiffies; + cfqg->saved_wl_type = cfqd->serving_wl_type; + cfqg->saved_wl_class = cfqd->serving_wl_class; + } else + cfqg->saved_wl_slice = 0; + + cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime, + st->min_vdisktime); + cfq_log_cfqq(cfqq->cfqd, cfqq, + "sl_used=%u disp=%u charge=%u iops=%u sect=%lu", + used_sl, cfqq->slice_dispatch, charge, + iops_mode(cfqd), cfqq->nr_sectors); + cfqg_stats_update_timeslice_used(cfqg, used_sl, unaccounted_sl); + cfqg_stats_set_start_empty_time(cfqg); +} + +/** + * cfq_init_cfqg_base - initialize base part of a cfq_group + * @cfqg: cfq_group to initialize + * + * Initialize the base part which is used whether %CONFIG_CFQ_GROUP_IOSCHED + * is enabled or not. + */ +static void cfq_init_cfqg_base(struct cfq_group *cfqg) +{ + struct cfq_rb_root *st; + int i, j; + + for_each_cfqg_st(cfqg, i, j, st) + *st = CFQ_RB_ROOT; + RB_CLEAR_NODE(&cfqg->rb_node); + + cfqg->ttime.last_end_request = jiffies; +} + +#ifdef CONFIG_CFQ_GROUP_IOSCHED +static void cfqg_stats_init(struct cfqg_stats *stats) +{ + blkg_rwstat_init(&stats->service_bytes); + blkg_rwstat_init(&stats->serviced); + blkg_rwstat_init(&stats->merged); + blkg_rwstat_init(&stats->service_time); + blkg_rwstat_init(&stats->wait_time); + blkg_rwstat_init(&stats->queued); + + blkg_stat_init(&stats->sectors); + blkg_stat_init(&stats->time); + +#ifdef CONFIG_DEBUG_BLK_CGROUP + blkg_stat_init(&stats->unaccounted_time); + blkg_stat_init(&stats->avg_queue_size_sum); + blkg_stat_init(&stats->avg_queue_size_samples); + blkg_stat_init(&stats->dequeue); + blkg_stat_init(&stats->group_wait_time); + blkg_stat_init(&stats->idle_time); + blkg_stat_init(&stats->empty_time); +#endif +} + +static void cfq_pd_init(struct blkcg_gq *blkg) +{ + struct cfq_group *cfqg = blkg_to_cfqg(blkg); + + cfq_init_cfqg_base(cfqg); + cfqg->weight = blkg->blkcg->cfq_weight; + cfqg->leaf_weight = blkg->blkcg->cfq_leaf_weight; + cfqg_stats_init(&cfqg->stats); + cfqg_stats_init(&cfqg->dead_stats); +} + +static void cfq_pd_offline(struct blkcg_gq *blkg) +{ + /* + * @blkg is going offline and will be ignored by + * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so + * that they don't get lost. If IOs complete after this point, the + * stats for them will be lost. Oh well... + */ + cfqg_stats_xfer_dead(blkg_to_cfqg(blkg)); +} + +/* offset delta from cfqg->stats to cfqg->dead_stats */ +static const int dead_stats_off_delta = offsetof(struct cfq_group, dead_stats) - + offsetof(struct cfq_group, stats); + +/* to be used by recursive prfill, sums live and dead stats recursively */ +static u64 cfqg_stat_pd_recursive_sum(struct blkg_policy_data *pd, int off) +{ + u64 sum = 0; + + sum += blkg_stat_recursive_sum(pd, off); + sum += blkg_stat_recursive_sum(pd, off + dead_stats_off_delta); + return sum; +} + +/* to be used by recursive prfill, sums live and dead rwstats recursively */ +static struct blkg_rwstat cfqg_rwstat_pd_recursive_sum(struct blkg_policy_data *pd, + int off) +{ + struct blkg_rwstat a, b; + + a = blkg_rwstat_recursive_sum(pd, off); + b = blkg_rwstat_recursive_sum(pd, off + dead_stats_off_delta); + blkg_rwstat_merge(&a, &b); + return a; +} + +static void cfq_pd_reset_stats(struct blkcg_gq *blkg) +{ + struct cfq_group *cfqg = blkg_to_cfqg(blkg); + + cfqg_stats_reset(&cfqg->stats); + cfqg_stats_reset(&cfqg->dead_stats); +} + +/* + * Search for the cfq group current task belongs to. request_queue lock must + * be held. + */ +static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd, + struct blkcg *blkcg) +{ + struct request_queue *q = cfqd->queue; + struct cfq_group *cfqg = NULL; + + /* avoid lookup for the common case where there's no blkcg */ + if (blkcg == &blkcg_root) { + cfqg = cfqd->root_group; + } else { + struct blkcg_gq *blkg; + + blkg = blkg_lookup_create(blkcg, q); + if (!IS_ERR(blkg)) + cfqg = blkg_to_cfqg(blkg); + } + + return cfqg; +} + +static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) +{ + /* Currently, all async queues are mapped to root group */ + if (!cfq_cfqq_sync(cfqq)) + cfqg = cfqq->cfqd->root_group; + + cfqq->cfqg = cfqg; + /* cfqq reference on cfqg */ + cfqg_get(cfqg); +} + +static u64 cfqg_prfill_weight_device(struct seq_file *sf, + struct blkg_policy_data *pd, int off) +{ + struct cfq_group *cfqg = pd_to_cfqg(pd); + + if (!cfqg->dev_weight) + return 0; + return __blkg_prfill_u64(sf, pd, cfqg->dev_weight); +} + +static int cfqg_print_weight_device(struct seq_file *sf, void *v) +{ + blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), + cfqg_prfill_weight_device, &blkcg_policy_cfq, + 0, false); + return 0; +} + +static u64 cfqg_prfill_leaf_weight_device(struct seq_file *sf, + struct blkg_policy_data *pd, int off) +{ + struct cfq_group *cfqg = pd_to_cfqg(pd); + + if (!cfqg->dev_leaf_weight) + return 0; + return __blkg_prfill_u64(sf, pd, cfqg->dev_leaf_weight); +} + +static int cfqg_print_leaf_weight_device(struct seq_file *sf, void *v) +{ + blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), + cfqg_prfill_leaf_weight_device, &blkcg_policy_cfq, + 0, false); + return 0; +} + +static int cfq_print_weight(struct seq_file *sf, void *v) +{ + seq_printf(sf, "%u\n", css_to_blkcg(seq_css(sf))->cfq_weight); + return 0; +} + +static int cfq_print_leaf_weight(struct seq_file *sf, void *v) +{ + seq_printf(sf, "%u\n", css_to_blkcg(seq_css(sf))->cfq_leaf_weight); + return 0; +} + +static ssize_t __cfqg_set_weight_device(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off, + bool is_leaf_weight) +{ + struct blkcg *blkcg = css_to_blkcg(of_css(of)); + struct blkg_conf_ctx ctx; + struct cfq_group *cfqg; + int ret; + + ret = blkg_conf_prep(blkcg, &blkcg_policy_cfq, buf, &ctx); + if (ret) + return ret; + + ret = -EINVAL; + cfqg = blkg_to_cfqg(ctx.blkg); + if (!ctx.v || (ctx.v >= CFQ_WEIGHT_MIN && ctx.v <= CFQ_WEIGHT_MAX)) { + if (!is_leaf_weight) { + cfqg->dev_weight = ctx.v; + cfqg->new_weight = ctx.v ?: blkcg->cfq_weight; + } else { + cfqg->dev_leaf_weight = ctx.v; + cfqg->new_leaf_weight = ctx.v ?: blkcg->cfq_leaf_weight; + } + ret = 0; + } + + blkg_conf_finish(&ctx); + return ret ?: nbytes; +} + +static ssize_t cfqg_set_weight_device(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off) +{ + return __cfqg_set_weight_device(of, buf, nbytes, off, false); +} + +static ssize_t cfqg_set_leaf_weight_device(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off) +{ + return __cfqg_set_weight_device(of, buf, nbytes, off, true); +} + +static int __cfq_set_weight(struct cgroup_subsys_state *css, struct cftype *cft, + u64 val, bool is_leaf_weight) +{ + struct blkcg *blkcg = css_to_blkcg(css); + struct blkcg_gq *blkg; + + if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX) + return -EINVAL; + + spin_lock_irq(&blkcg->lock); + + if (!is_leaf_weight) + blkcg->cfq_weight = val; + else + blkcg->cfq_leaf_weight = val; + + hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { + struct cfq_group *cfqg = blkg_to_cfqg(blkg); + + if (!cfqg) + continue; + + if (!is_leaf_weight) { + if (!cfqg->dev_weight) + cfqg->new_weight = blkcg->cfq_weight; + } else { + if (!cfqg->dev_leaf_weight) + cfqg->new_leaf_weight = blkcg->cfq_leaf_weight; + } + } + + spin_unlock_irq(&blkcg->lock); + return 0; +} + +static int cfq_set_weight(struct cgroup_subsys_state *css, struct cftype *cft, + u64 val) +{ + return __cfq_set_weight(css, cft, val, false); +} + +static int cfq_set_leaf_weight(struct cgroup_subsys_state *css, + struct cftype *cft, u64 val) +{ + return __cfq_set_weight(css, cft, val, true); +} + +static int cfqg_print_stat(struct seq_file *sf, void *v) +{ + blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat, + &blkcg_policy_cfq, seq_cft(sf)->private, false); + return 0; +} + +static int cfqg_print_rwstat(struct seq_file *sf, void *v) +{ + blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat, + &blkcg_policy_cfq, seq_cft(sf)->private, true); + return 0; +} + +static u64 cfqg_prfill_stat_recursive(struct seq_file *sf, + struct blkg_policy_data *pd, int off) +{ + u64 sum = cfqg_stat_pd_recursive_sum(pd, off); + + return __blkg_prfill_u64(sf, pd, sum); +} + +static u64 cfqg_prfill_rwstat_recursive(struct seq_file *sf, + struct blkg_policy_data *pd, int off) +{ + struct blkg_rwstat sum = cfqg_rwstat_pd_recursive_sum(pd, off); + + return __blkg_prfill_rwstat(sf, pd, &sum); +} + +static int cfqg_print_stat_recursive(struct seq_file *sf, void *v) +{ + blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), + cfqg_prfill_stat_recursive, &blkcg_policy_cfq, + seq_cft(sf)->private, false); + return 0; +} + +static int cfqg_print_rwstat_recursive(struct seq_file *sf, void *v) +{ + blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), + cfqg_prfill_rwstat_recursive, &blkcg_policy_cfq, + seq_cft(sf)->private, true); + return 0; +} + +#ifdef CONFIG_DEBUG_BLK_CGROUP +static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf, + struct blkg_policy_data *pd, int off) +{ + struct cfq_group *cfqg = pd_to_cfqg(pd); + u64 samples = blkg_stat_read(&cfqg->stats.avg_queue_size_samples); + u64 v = 0; + + if (samples) { + v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum); + v = div64_u64(v, samples); + } + __blkg_prfill_u64(sf, pd, v); + return 0; +} + +/* print avg_queue_size */ +static int cfqg_print_avg_queue_size(struct seq_file *sf, void *v) +{ + blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), + cfqg_prfill_avg_queue_size, &blkcg_policy_cfq, + 0, false); + return 0; +} +#endif /* CONFIG_DEBUG_BLK_CGROUP */ + +static struct cftype cfq_blkcg_files[] = { + /* on root, weight is mapped to leaf_weight */ + { + .name = "weight_device", + .flags = CFTYPE_ONLY_ON_ROOT, + .seq_show = cfqg_print_leaf_weight_device, + .write = cfqg_set_leaf_weight_device, + }, + { + .name = "weight", + .flags = CFTYPE_ONLY_ON_ROOT, + .seq_show = cfq_print_leaf_weight, + .write_u64 = cfq_set_leaf_weight, + }, + + /* no such mapping necessary for !roots */ + { + .name = "weight_device", + .flags = CFTYPE_NOT_ON_ROOT, + .seq_show = cfqg_print_weight_device, + .write = cfqg_set_weight_device, + }, + { + .name = "weight", + .flags = CFTYPE_NOT_ON_ROOT, + .seq_show = cfq_print_weight, + .write_u64 = cfq_set_weight, + }, + + { + .name = "leaf_weight_device", + .seq_show = cfqg_print_leaf_weight_device, + .write = cfqg_set_leaf_weight_device, + }, + { + .name = "leaf_weight", + .seq_show = cfq_print_leaf_weight, + .write_u64 = cfq_set_leaf_weight, + }, + + /* statistics, covers only the tasks in the cfqg */ + { + .name = "time", + .private = offsetof(struct cfq_group, stats.time), + .seq_show = cfqg_print_stat, + }, + { + .name = "sectors", + .private = offsetof(struct cfq_group, stats.sectors), + .seq_show = cfqg_print_stat, + }, + { + .name = "io_service_bytes", + .private = offsetof(struct cfq_group, stats.service_bytes), + .seq_show = cfqg_print_rwstat, + }, + { + .name = "io_serviced", + .private = offsetof(struct cfq_group, stats.serviced), + .seq_show = cfqg_print_rwstat, + }, + { + .name = "io_service_time", + .private = offsetof(struct cfq_group, stats.service_time), + .seq_show = cfqg_print_rwstat, + }, + { + .name = "io_wait_time", + .private = offsetof(struct cfq_group, stats.wait_time), + .seq_show = cfqg_print_rwstat, + }, + { + .name = "io_merged", + .private = offsetof(struct cfq_group, stats.merged), + .seq_show = cfqg_print_rwstat, + }, + { + .name = "io_queued", + .private = offsetof(struct cfq_group, stats.queued), + .seq_show = cfqg_print_rwstat, + }, + + /* the same statictics which cover the cfqg and its descendants */ + { + .name = "time_recursive", + .private = offsetof(struct cfq_group, stats.time), + .seq_show = cfqg_print_stat_recursive, + }, + { + .name = "sectors_recursive", + .private = offsetof(struct cfq_group, stats.sectors), + .seq_show = cfqg_print_stat_recursive, + }, + { + .name = "io_service_bytes_recursive", + .private = offsetof(struct cfq_group, stats.service_bytes), + .seq_show = cfqg_print_rwstat_recursive, + }, + { + .name = "io_serviced_recursive", + .private = offsetof(struct cfq_group, stats.serviced), + .seq_show = cfqg_print_rwstat_recursive, + }, + { + .name = "io_service_time_recursive", + .private = offsetof(struct cfq_group, stats.service_time), + .seq_show = cfqg_print_rwstat_recursive, + }, + { + .name = "io_wait_time_recursive", + .private = offsetof(struct cfq_group, stats.wait_time), + .seq_show = cfqg_print_rwstat_recursive, + }, + { + .name = "io_merged_recursive", + .private = offsetof(struct cfq_group, stats.merged), + .seq_show = cfqg_print_rwstat_recursive, + }, + { + .name = "io_queued_recursive", + .private = offsetof(struct cfq_group, stats.queued), + .seq_show = cfqg_print_rwstat_recursive, + }, +#ifdef CONFIG_DEBUG_BLK_CGROUP + { + .name = "avg_queue_size", + .seq_show = cfqg_print_avg_queue_size, + }, + { + .name = "group_wait_time", + .private = offsetof(struct cfq_group, stats.group_wait_time), + .seq_show = cfqg_print_stat, + }, + { + .name = "idle_time", + .private = offsetof(struct cfq_group, stats.idle_time), + .seq_show = cfqg_print_stat, + }, + { + .name = "empty_time", + .private = offsetof(struct cfq_group, stats.empty_time), + .seq_show = cfqg_print_stat, + }, + { + .name = "dequeue", + .private = offsetof(struct cfq_group, stats.dequeue), + .seq_show = cfqg_print_stat, + }, + { + .name = "unaccounted_time", + .private = offsetof(struct cfq_group, stats.unaccounted_time), + .seq_show = cfqg_print_stat, + }, +#endif /* CONFIG_DEBUG_BLK_CGROUP */ + { } /* terminate */ +}; +#else /* GROUP_IOSCHED */ +static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd, + struct blkcg *blkcg) +{ + return cfqd->root_group; +} + +static inline void +cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) { + cfqq->cfqg = cfqg; +} + +#endif /* GROUP_IOSCHED */ + +/* + * The cfqd->service_trees holds all pending cfq_queue's that have + * requests waiting to be processed. It is sorted in the order that + * we will service the queues. + */ +static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, + bool add_front) +{ + struct rb_node **p, *parent; + struct cfq_queue *__cfqq; + unsigned long rb_key; + struct cfq_rb_root *st; + int left; + int new_cfqq = 1; + + st = st_for(cfqq->cfqg, cfqq_class(cfqq), cfqq_type(cfqq)); + if (cfq_class_idle(cfqq)) { + rb_key = CFQ_IDLE_DELAY; + parent = rb_last(&st->rb); + if (parent && parent != &cfqq->rb_node) { + __cfqq = rb_entry(parent, struct cfq_queue, rb_node); + rb_key += __cfqq->rb_key; + } else + rb_key += jiffies; + } else if (!add_front) { + /* + * Get our rb key offset. Subtract any residual slice + * value carried from last service. A negative resid + * count indicates slice overrun, and this should position + * the next service time further away in the tree. + */ + rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies; + rb_key -= cfqq->slice_resid; + cfqq->slice_resid = 0; + } else { + rb_key = -HZ; + __cfqq = cfq_rb_first(st); + rb_key += __cfqq ? __cfqq->rb_key : jiffies; + } + + if (!RB_EMPTY_NODE(&cfqq->rb_node)) { + new_cfqq = 0; + /* + * same position, nothing more to do + */ + if (rb_key == cfqq->rb_key && cfqq->service_tree == st) + return; + + cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree); + cfqq->service_tree = NULL; + } + + left = 1; + parent = NULL; + cfqq->service_tree = st; + p = &st->rb.rb_node; + while (*p) { + parent = *p; + __cfqq = rb_entry(parent, struct cfq_queue, rb_node); + + /* + * sort by key, that represents service time. + */ + if (time_before(rb_key, __cfqq->rb_key)) + p = &parent->rb_left; + else { + p = &parent->rb_right; + left = 0; + } + } + + if (left) + st->left = &cfqq->rb_node; + + cfqq->rb_key = rb_key; + rb_link_node(&cfqq->rb_node, parent, p); + rb_insert_color(&cfqq->rb_node, &st->rb); + st->count++; + if (add_front || !new_cfqq) + return; + cfq_group_notify_queue_add(cfqd, cfqq->cfqg); +} + +static struct cfq_queue * +cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root, + sector_t sector, struct rb_node **ret_parent, + struct rb_node ***rb_link) +{ + struct rb_node **p, *parent; + struct cfq_queue *cfqq = NULL; + + parent = NULL; + p = &root->rb_node; + while (*p) { + struct rb_node **n; + + parent = *p; + cfqq = rb_entry(parent, struct cfq_queue, p_node); + + /* + * Sort strictly based on sector. Smallest to the left, + * largest to the right. + */ + if (sector > blk_rq_pos(cfqq->next_rq)) + n = &(*p)->rb_right; + else if (sector < blk_rq_pos(cfqq->next_rq)) + n = &(*p)->rb_left; + else + break; + p = n; + cfqq = NULL; + } + + *ret_parent = parent; + if (rb_link) + *rb_link = p; + return cfqq; +} + +static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + struct rb_node **p, *parent; + struct cfq_queue *__cfqq; + + if (cfqq->p_root) { + rb_erase(&cfqq->p_node, cfqq->p_root); + cfqq->p_root = NULL; + } + + if (cfq_class_idle(cfqq)) + return; + if (!cfqq->next_rq) + return; + + cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio]; + __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root, + blk_rq_pos(cfqq->next_rq), &parent, &p); + if (!__cfqq) { + rb_link_node(&cfqq->p_node, parent, p); + rb_insert_color(&cfqq->p_node, cfqq->p_root); + } else + cfqq->p_root = NULL; +} + +/* + * Update cfqq's position in the service tree. + */ +static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + /* + * Resorting requires the cfqq to be on the RR list already. + */ + if (cfq_cfqq_on_rr(cfqq)) { + cfq_service_tree_add(cfqd, cfqq, 0); + cfq_prio_tree_add(cfqd, cfqq); + } +} + +/* + * add to busy list of queues for service, trying to be fair in ordering + * the pending list according to last request service + */ +static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + cfq_log_cfqq(cfqd, cfqq, "add_to_rr"); + BUG_ON(cfq_cfqq_on_rr(cfqq)); + cfq_mark_cfqq_on_rr(cfqq); + cfqd->busy_queues++; + if (cfq_cfqq_sync(cfqq)) + cfqd->busy_sync_queues++; + + cfq_resort_rr_list(cfqd, cfqq); +} + +/* + * Called when the cfqq no longer has requests pending, remove it from + * the service tree. + */ +static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + cfq_log_cfqq(cfqd, cfqq, "del_from_rr"); + BUG_ON(!cfq_cfqq_on_rr(cfqq)); + cfq_clear_cfqq_on_rr(cfqq); + + if (!RB_EMPTY_NODE(&cfqq->rb_node)) { + cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree); + cfqq->service_tree = NULL; + } + if (cfqq->p_root) { + rb_erase(&cfqq->p_node, cfqq->p_root); + cfqq->p_root = NULL; + } + + cfq_group_notify_queue_del(cfqd, cfqq->cfqg); + BUG_ON(!cfqd->busy_queues); + cfqd->busy_queues--; + if (cfq_cfqq_sync(cfqq)) + cfqd->busy_sync_queues--; +} + +/* + * rb tree support functions + */ +static void cfq_del_rq_rb(struct request *rq) +{ + struct cfq_queue *cfqq = RQ_CFQQ(rq); + const int sync = rq_is_sync(rq); + + BUG_ON(!cfqq->queued[sync]); + cfqq->queued[sync]--; + + elv_rb_del(&cfqq->sort_list, rq); + + if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) { + /* + * Queue will be deleted from service tree when we actually + * expire it later. Right now just remove it from prio tree + * as it is empty. + */ + if (cfqq->p_root) { + rb_erase(&cfqq->p_node, cfqq->p_root); + cfqq->p_root = NULL; + } + } +} + +static void cfq_add_rq_rb(struct request *rq) +{ + struct cfq_queue *cfqq = RQ_CFQQ(rq); + struct cfq_data *cfqd = cfqq->cfqd; + struct request *prev; + + cfqq->queued[rq_is_sync(rq)]++; + + elv_rb_add(&cfqq->sort_list, rq); + + if (!cfq_cfqq_on_rr(cfqq)) + cfq_add_cfqq_rr(cfqd, cfqq); + + /* + * check if this request is a better next-serve candidate + */ + prev = cfqq->next_rq; + cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position); + + /* + * adjust priority tree position, if ->next_rq changes + */ + if (prev != cfqq->next_rq) + cfq_prio_tree_add(cfqd, cfqq); + + BUG_ON(!cfqq->next_rq); +} + +static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq) +{ + elv_rb_del(&cfqq->sort_list, rq); + cfqq->queued[rq_is_sync(rq)]--; + cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags); + cfq_add_rq_rb(rq); + cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group, + rq->cmd_flags); +} + +static struct request * +cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio) +{ + struct task_struct *tsk = current; + struct cfq_io_cq *cic; + struct cfq_queue *cfqq; + + cic = cfq_cic_lookup(cfqd, tsk->io_context); + if (!cic) + return NULL; + + cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); + if (cfqq) + return elv_rb_find(&cfqq->sort_list, bio_end_sector(bio)); + + return NULL; +} + +static void cfq_activate_request(struct request_queue *q, struct request *rq) +{ + struct cfq_data *cfqd = q->elevator->elevator_data; + + cfqd->rq_in_driver++; + cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d", + cfqd->rq_in_driver); + + cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); +} + +static void cfq_deactivate_request(struct request_queue *q, struct request *rq) +{ + struct cfq_data *cfqd = q->elevator->elevator_data; + + WARN_ON(!cfqd->rq_in_driver); + cfqd->rq_in_driver--; + cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d", + cfqd->rq_in_driver); +} + +static void cfq_remove_request(struct request *rq) +{ + struct cfq_queue *cfqq = RQ_CFQQ(rq); + + if (cfqq->next_rq == rq) + cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq); + + list_del_init(&rq->queuelist); + cfq_del_rq_rb(rq); + + cfqq->cfqd->rq_queued--; + cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags); + if (rq->cmd_flags & REQ_PRIO) { + WARN_ON(!cfqq->prio_pending); + cfqq->prio_pending--; + } +} + +static int cfq_merge(struct request_queue *q, struct request **req, + struct bio *bio) +{ + struct cfq_data *cfqd = q->elevator->elevator_data; + struct request *__rq; + + __rq = cfq_find_rq_fmerge(cfqd, bio); + if (__rq && elv_rq_merge_ok(__rq, bio)) { + *req = __rq; + return ELEVATOR_FRONT_MERGE; + } + + return ELEVATOR_NO_MERGE; +} + +static void cfq_merged_request(struct request_queue *q, struct request *req, + int type) +{ + if (type == ELEVATOR_FRONT_MERGE) { + struct cfq_queue *cfqq = RQ_CFQQ(req); + + cfq_reposition_rq_rb(cfqq, req); + } +} + +static void cfq_bio_merged(struct request_queue *q, struct request *req, + struct bio *bio) +{ + cfqg_stats_update_io_merged(RQ_CFQG(req), bio->bi_rw); +} + +static void +cfq_merged_requests(struct request_queue *q, struct request *rq, + struct request *next) +{ + struct cfq_queue *cfqq = RQ_CFQQ(rq); + struct cfq_data *cfqd = q->elevator->elevator_data; + + /* + * reposition in fifo if next is older than rq + */ + if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && + time_before(next->fifo_time, rq->fifo_time) && + cfqq == RQ_CFQQ(next)) { + list_move(&rq->queuelist, &next->queuelist); + rq->fifo_time = next->fifo_time; + } + + if (cfqq->next_rq == next) + cfqq->next_rq = rq; + cfq_remove_request(next); + cfqg_stats_update_io_merged(RQ_CFQG(rq), next->cmd_flags); + + cfqq = RQ_CFQQ(next); + /* + * all requests of this queue are merged to other queues, delete it + * from the service tree. If it's the active_queue, + * cfq_dispatch_requests() will choose to expire it or do idle + */ + if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) && + cfqq != cfqd->active_queue) + cfq_del_cfqq_rr(cfqd, cfqq); +} + +static int cfq_allow_merge(struct request_queue *q, struct request *rq, + struct bio *bio) +{ + struct cfq_data *cfqd = q->elevator->elevator_data; + struct cfq_io_cq *cic; + struct cfq_queue *cfqq; + + /* + * Disallow merge of a sync bio into an async request. + */ + if (cfq_bio_sync(bio) && !rq_is_sync(rq)) + return false; + + /* + * Lookup the cfqq that this bio will be queued with and allow + * merge only if rq is queued there. + */ + cic = cfq_cic_lookup(cfqd, current->io_context); + if (!cic) + return false; + + cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); + return cfqq == RQ_CFQQ(rq); +} + +static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + del_timer(&cfqd->idle_slice_timer); + cfqg_stats_update_idle_time(cfqq->cfqg); +} + +static void __cfq_set_active_queue(struct cfq_data *cfqd, + struct cfq_queue *cfqq) +{ + if (cfqq) { + cfq_log_cfqq(cfqd, cfqq, "set_active wl_class:%d wl_type:%d", + cfqd->serving_wl_class, cfqd->serving_wl_type); + cfqg_stats_update_avg_queue_size(cfqq->cfqg); + cfqq->slice_start = 0; + cfqq->dispatch_start = jiffies; + cfqq->allocated_slice = 0; + cfqq->slice_end = 0; + cfqq->slice_dispatch = 0; + cfqq->nr_sectors = 0; + + cfq_clear_cfqq_wait_request(cfqq); + cfq_clear_cfqq_must_dispatch(cfqq); + cfq_clear_cfqq_must_alloc_slice(cfqq); + cfq_clear_cfqq_fifo_expire(cfqq); + cfq_mark_cfqq_slice_new(cfqq); + + cfq_del_timer(cfqd, cfqq); + } + + cfqd->active_queue = cfqq; +} + +/* + * current cfqq expired its slice (or was too idle), select new one + */ +static void +__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, + bool timed_out) +{ + cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out); + + if (cfq_cfqq_wait_request(cfqq)) + cfq_del_timer(cfqd, cfqq); + + cfq_clear_cfqq_wait_request(cfqq); + cfq_clear_cfqq_wait_busy(cfqq); + + /* + * If this cfqq is shared between multiple processes, check to + * make sure that those processes are still issuing I/Os within + * the mean seek distance. If not, it may be time to break the + * queues apart again. + */ + if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq)) + cfq_mark_cfqq_split_coop(cfqq); + + /* + * store what was left of this slice, if the queue idled/timed out + */ + if (timed_out) { + if (cfq_cfqq_slice_new(cfqq)) + cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq); + else + cfqq->slice_resid = cfqq->slice_end - jiffies; + cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid); + } + + cfq_group_served(cfqd, cfqq->cfqg, cfqq); + + if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) + cfq_del_cfqq_rr(cfqd, cfqq); + + cfq_resort_rr_list(cfqd, cfqq); + + if (cfqq == cfqd->active_queue) + cfqd->active_queue = NULL; + + if (cfqd->active_cic) { + put_io_context(cfqd->active_cic->icq.ioc); + cfqd->active_cic = NULL; + } +} + +static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out) +{ + struct cfq_queue *cfqq = cfqd->active_queue; + + if (cfqq) + __cfq_slice_expired(cfqd, cfqq, timed_out); +} + +/* + * Get next queue for service. Unless we have a queue preemption, + * we'll simply select the first cfqq in the service tree. + */ +static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd) +{ + struct cfq_rb_root *st = st_for(cfqd->serving_group, + cfqd->serving_wl_class, cfqd->serving_wl_type); + + if (!cfqd->rq_queued) + return NULL; + + /* There is nothing to dispatch */ + if (!st) + return NULL; + if (RB_EMPTY_ROOT(&st->rb)) + return NULL; + return cfq_rb_first(st); +} + +static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd) +{ + struct cfq_group *cfqg; + struct cfq_queue *cfqq; + int i, j; + struct cfq_rb_root *st; + + if (!cfqd->rq_queued) + return NULL; + + cfqg = cfq_get_next_cfqg(cfqd); + if (!cfqg) + return NULL; + + for_each_cfqg_st(cfqg, i, j, st) + if ((cfqq = cfq_rb_first(st)) != NULL) + return cfqq; + return NULL; +} + +/* + * Get and set a new active queue for service. + */ +static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd, + struct cfq_queue *cfqq) +{ + if (!cfqq) + cfqq = cfq_get_next_queue(cfqd); + + __cfq_set_active_queue(cfqd, cfqq); + return cfqq; +} + +static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd, + struct request *rq) +{ + if (blk_rq_pos(rq) >= cfqd->last_position) + return blk_rq_pos(rq) - cfqd->last_position; + else + return cfqd->last_position - blk_rq_pos(rq); +} + +static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq, + struct request *rq) +{ + return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR; +} + +static struct cfq_queue *cfqq_close(struct cfq_data *cfqd, + struct cfq_queue *cur_cfqq) +{ + struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio]; + struct rb_node *parent, *node; + struct cfq_queue *__cfqq; + sector_t sector = cfqd->last_position; + + if (RB_EMPTY_ROOT(root)) + return NULL; + + /* + * First, if we find a request starting at the end of the last + * request, choose it. + */ + __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL); + if (__cfqq) + return __cfqq; + + /* + * If the exact sector wasn't found, the parent of the NULL leaf + * will contain the closest sector. + */ + __cfqq = rb_entry(parent, struct cfq_queue, p_node); + if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq)) + return __cfqq; + + if (blk_rq_pos(__cfqq->next_rq) < sector) + node = rb_next(&__cfqq->p_node); + else + node = rb_prev(&__cfqq->p_node); + if (!node) + return NULL; + + __cfqq = rb_entry(node, struct cfq_queue, p_node); + if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq)) + return __cfqq; + + return NULL; +} + +/* + * cfqd - obvious + * cur_cfqq - passed in so that we don't decide that the current queue is + * closely cooperating with itself. + * + * So, basically we're assuming that that cur_cfqq has dispatched at least + * one request, and that cfqd->last_position reflects a position on the disk + * associated with the I/O issued by cur_cfqq. I'm not sure this is a valid + * assumption. + */ +static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd, + struct cfq_queue *cur_cfqq) +{ + struct cfq_queue *cfqq; + + if (cfq_class_idle(cur_cfqq)) + return NULL; + if (!cfq_cfqq_sync(cur_cfqq)) + return NULL; + if (CFQQ_SEEKY(cur_cfqq)) + return NULL; + + /* + * Don't search priority tree if it's the only queue in the group. + */ + if (cur_cfqq->cfqg->nr_cfqq == 1) + return NULL; + + /* + * We should notice if some of the queues are cooperating, eg + * working closely on the same area of the disk. In that case, + * we can group them together and don't waste time idling. + */ + cfqq = cfqq_close(cfqd, cur_cfqq); + if (!cfqq) + return NULL; + + /* If new queue belongs to different cfq_group, don't choose it */ + if (cur_cfqq->cfqg != cfqq->cfqg) + return NULL; + + /* + * It only makes sense to merge sync queues. + */ + if (!cfq_cfqq_sync(cfqq)) + return NULL; + if (CFQQ_SEEKY(cfqq)) + return NULL; + + /* + * Do not merge queues of different priority classes + */ + if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq)) + return NULL; + + return cfqq; +} + +/* + * Determine whether we should enforce idle window for this queue. + */ + +static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + enum wl_class_t wl_class = cfqq_class(cfqq); + struct cfq_rb_root *st = cfqq->service_tree; + + BUG_ON(!st); + BUG_ON(!st->count); + + if (!cfqd->cfq_slice_idle) + return false; + + /* We never do for idle class queues. */ + if (wl_class == IDLE_WORKLOAD) + return false; + + /* We do for queues that were marked with idle window flag. */ + if (cfq_cfqq_idle_window(cfqq) && + !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)) + return true; + + /* + * Otherwise, we do only if they are the last ones + * in their service tree. + */ + if (st->count == 1 && cfq_cfqq_sync(cfqq) && + !cfq_io_thinktime_big(cfqd, &st->ttime, false)) + return true; + cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d", st->count); + return false; +} + +static void cfq_arm_slice_timer(struct cfq_data *cfqd) +{ + struct cfq_queue *cfqq = cfqd->active_queue; + struct cfq_io_cq *cic; + unsigned long sl, group_idle = 0; + + /* + * SSD device without seek penalty, disable idling. But only do so + * for devices that support queuing, otherwise we still have a problem + * with sync vs async workloads. + */ + if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag) + return; + + WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list)); + WARN_ON(cfq_cfqq_slice_new(cfqq)); + + /* + * idle is disabled, either manually or by past process history + */ + if (!cfq_should_idle(cfqd, cfqq)) { + /* no queue idling. Check for group idling */ + if (cfqd->cfq_group_idle) + group_idle = cfqd->cfq_group_idle; + else + return; + } + + /* + * still active requests from this queue, don't idle + */ + if (cfqq->dispatched) + return; + + /* + * task has exited, don't wait + */ + cic = cfqd->active_cic; + if (!cic || !atomic_read(&cic->icq.ioc->active_ref)) + return; + + /* + * If our average think time is larger than the remaining time + * slice, then don't idle. This avoids overrunning the allotted + * time slice. + */ + if (sample_valid(cic->ttime.ttime_samples) && + (cfqq->slice_end - jiffies < cic->ttime.ttime_mean)) { + cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu", + cic->ttime.ttime_mean); + return; + } + + /* There are other queues in the group, don't do group idle */ + if (group_idle && cfqq->cfqg->nr_cfqq > 1) + return; + + cfq_mark_cfqq_wait_request(cfqq); + + if (group_idle) + sl = cfqd->cfq_group_idle; + else + sl = cfqd->cfq_slice_idle; + + mod_timer(&cfqd->idle_slice_timer, jiffies + sl); + cfqg_stats_set_start_idle_time(cfqq->cfqg); + cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl, + group_idle ? 1 : 0); +} + +/* + * Move request from internal lists to the request queue dispatch list. + */ +static void cfq_dispatch_insert(struct request_queue *q, struct request *rq) +{ + struct cfq_data *cfqd = q->elevator->elevator_data; + struct cfq_queue *cfqq = RQ_CFQQ(rq); + + cfq_log_cfqq(cfqd, cfqq, "dispatch_insert"); + + cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq); + cfq_remove_request(rq); + cfqq->dispatched++; + (RQ_CFQG(rq))->dispatched++; + elv_dispatch_sort(q, rq); + + cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++; + cfqq->nr_sectors += blk_rq_sectors(rq); + cfqg_stats_update_dispatch(cfqq->cfqg, blk_rq_bytes(rq), rq->cmd_flags); +} + +/* + * return expired entry, or NULL to just start from scratch in rbtree + */ +static struct request *cfq_check_fifo(struct cfq_queue *cfqq) +{ + struct request *rq = NULL; + + if (cfq_cfqq_fifo_expire(cfqq)) + return NULL; + + cfq_mark_cfqq_fifo_expire(cfqq); + + if (list_empty(&cfqq->fifo)) + return NULL; + + rq = rq_entry_fifo(cfqq->fifo.next); + if (time_before(jiffies, rq->fifo_time)) + rq = NULL; + + cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq); + return rq; +} + +static inline int +cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + const int base_rq = cfqd->cfq_slice_async_rq; + + WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR); + + return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio); +} + +/* + * Must be called with the queue_lock held. + */ +static int cfqq_process_refs(struct cfq_queue *cfqq) +{ + int process_refs, io_refs; + + io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE]; + process_refs = cfqq->ref - io_refs; + BUG_ON(process_refs < 0); + return process_refs; +} + +static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq) +{ + int process_refs, new_process_refs; + struct cfq_queue *__cfqq; + + /* + * If there are no process references on the new_cfqq, then it is + * unsafe to follow the ->new_cfqq chain as other cfqq's in the + * chain may have dropped their last reference (not just their + * last process reference). + */ + if (!cfqq_process_refs(new_cfqq)) + return; + + /* Avoid a circular list and skip interim queue merges */ + while ((__cfqq = new_cfqq->new_cfqq)) { + if (__cfqq == cfqq) + return; + new_cfqq = __cfqq; + } + + process_refs = cfqq_process_refs(cfqq); + new_process_refs = cfqq_process_refs(new_cfqq); + /* + * If the process for the cfqq has gone away, there is no + * sense in merging the queues. + */ + if (process_refs == 0 || new_process_refs == 0) + return; + + /* + * Merge in the direction of the lesser amount of work. + */ + if (new_process_refs >= process_refs) { + cfqq->new_cfqq = new_cfqq; + new_cfqq->ref += process_refs; + } else { + new_cfqq->new_cfqq = cfqq; + cfqq->ref += new_process_refs; + } +} + +static enum wl_type_t cfq_choose_wl_type(struct cfq_data *cfqd, + struct cfq_group *cfqg, enum wl_class_t wl_class) +{ + struct cfq_queue *queue; + int i; + bool key_valid = false; + unsigned long lowest_key = 0; + enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD; + + for (i = 0; i <= SYNC_WORKLOAD; ++i) { + /* select the one with lowest rb_key */ + queue = cfq_rb_first(st_for(cfqg, wl_class, i)); + if (queue && + (!key_valid || time_before(queue->rb_key, lowest_key))) { + lowest_key = queue->rb_key; + cur_best = i; + key_valid = true; + } + } + + return cur_best; +} + +static void +choose_wl_class_and_type(struct cfq_data *cfqd, struct cfq_group *cfqg) +{ + unsigned slice; + unsigned count; + struct cfq_rb_root *st; + unsigned group_slice; + enum wl_class_t original_class = cfqd->serving_wl_class; + + /* Choose next priority. RT > BE > IDLE */ + if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg)) + cfqd->serving_wl_class = RT_WORKLOAD; + else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg)) + cfqd->serving_wl_class = BE_WORKLOAD; + else { + cfqd->serving_wl_class = IDLE_WORKLOAD; + cfqd->workload_expires = jiffies + 1; + return; + } + + if (original_class != cfqd->serving_wl_class) + goto new_workload; + + /* + * For RT and BE, we have to choose also the type + * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload + * expiration time + */ + st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type); + count = st->count; + + /* + * check workload expiration, and that we still have other queues ready + */ + if (count && !time_after(jiffies, cfqd->workload_expires)) + return; + +new_workload: + /* otherwise select new workload type */ + cfqd->serving_wl_type = cfq_choose_wl_type(cfqd, cfqg, + cfqd->serving_wl_class); + st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type); + count = st->count; + + /* + * the workload slice is computed as a fraction of target latency + * proportional to the number of queues in that workload, over + * all the queues in the same priority class + */ + group_slice = cfq_group_slice(cfqd, cfqg); + + slice = group_slice * count / + max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_wl_class], + cfq_group_busy_queues_wl(cfqd->serving_wl_class, cfqd, + cfqg)); + + if (cfqd->serving_wl_type == ASYNC_WORKLOAD) { + unsigned int tmp; + + /* + * Async queues are currently system wide. Just taking + * proportion of queues with-in same group will lead to higher + * async ratio system wide as generally root group is going + * to have higher weight. A more accurate thing would be to + * calculate system wide asnc/sync ratio. + */ + tmp = cfqd->cfq_target_latency * + cfqg_busy_async_queues(cfqd, cfqg); + tmp = tmp/cfqd->busy_queues; + slice = min_t(unsigned, slice, tmp); + + /* async workload slice is scaled down according to + * the sync/async slice ratio. */ + slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1]; + } else + /* sync workload slice is at least 2 * cfq_slice_idle */ + slice = max(slice, 2 * cfqd->cfq_slice_idle); + + slice = max_t(unsigned, slice, CFQ_MIN_TT); + cfq_log(cfqd, "workload slice:%d", slice); + cfqd->workload_expires = jiffies + slice; +} + +static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd) +{ + struct cfq_rb_root *st = &cfqd->grp_service_tree; + struct cfq_group *cfqg; + + if (RB_EMPTY_ROOT(&st->rb)) + return NULL; + cfqg = cfq_rb_first_group(st); + update_min_vdisktime(st); + return cfqg; +} + +static void cfq_choose_cfqg(struct cfq_data *cfqd) +{ + struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd); + + cfqd->serving_group = cfqg; + + /* Restore the workload type data */ + if (cfqg->saved_wl_slice) { + cfqd->workload_expires = jiffies + cfqg->saved_wl_slice; + cfqd->serving_wl_type = cfqg->saved_wl_type; + cfqd->serving_wl_class = cfqg->saved_wl_class; + } else + cfqd->workload_expires = jiffies - 1; + + choose_wl_class_and_type(cfqd, cfqg); +} + +/* + * Select a queue for service. If we have a current active queue, + * check whether to continue servicing it, or retrieve and set a new one. + */ +static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) +{ + struct cfq_queue *cfqq, *new_cfqq = NULL; + + cfqq = cfqd->active_queue; + if (!cfqq) + goto new_queue; + + if (!cfqd->rq_queued) + return NULL; + + /* + * We were waiting for group to get backlogged. Expire the queue + */ + if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list)) + goto expire; + + /* + * The active queue has run out of time, expire it and select new. + */ + if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) { + /* + * If slice had not expired at the completion of last request + * we might not have turned on wait_busy flag. Don't expire + * the queue yet. Allow the group to get backlogged. + * + * The very fact that we have used the slice, that means we + * have been idling all along on this queue and it should be + * ok to wait for this request to complete. + */ + if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list) + && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) { + cfqq = NULL; + goto keep_queue; + } else + goto check_group_idle; + } + + /* + * The active queue has requests and isn't expired, allow it to + * dispatch. + */ + if (!RB_EMPTY_ROOT(&cfqq->sort_list)) + goto keep_queue; + + /* + * If another queue has a request waiting within our mean seek + * distance, let it run. The expire code will check for close + * cooperators and put the close queue at the front of the service + * tree. If possible, merge the expiring queue with the new cfqq. + */ + new_cfqq = cfq_close_cooperator(cfqd, cfqq); + if (new_cfqq) { + if (!cfqq->new_cfqq) + cfq_setup_merge(cfqq, new_cfqq); + goto expire; + } + + /* + * No requests pending. If the active queue still has requests in + * flight or is idling for a new request, allow either of these + * conditions to happen (or time out) before selecting a new queue. + */ + if (timer_pending(&cfqd->idle_slice_timer)) { + cfqq = NULL; + goto keep_queue; + } + + /* + * This is a deep seek queue, but the device is much faster than + * the queue can deliver, don't idle + **/ + if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) && + (cfq_cfqq_slice_new(cfqq) || + (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) { + cfq_clear_cfqq_deep(cfqq); + cfq_clear_cfqq_idle_window(cfqq); + } + + if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) { + cfqq = NULL; + goto keep_queue; + } + + /* + * If group idle is enabled and there are requests dispatched from + * this group, wait for requests to complete. + */ +check_group_idle: + if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 && + cfqq->cfqg->dispatched && + !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) { + cfqq = NULL; + goto keep_queue; + } + +expire: + cfq_slice_expired(cfqd, 0); +new_queue: + /* + * Current queue expired. Check if we have to switch to a new + * service tree + */ + if (!new_cfqq) + cfq_choose_cfqg(cfqd); + + cfqq = cfq_set_active_queue(cfqd, new_cfqq); +keep_queue: + return cfqq; +} + +static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq) +{ + int dispatched = 0; + + while (cfqq->next_rq) { + cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq); + dispatched++; + } + + BUG_ON(!list_empty(&cfqq->fifo)); + + /* By default cfqq is not expired if it is empty. Do it explicitly */ + __cfq_slice_expired(cfqq->cfqd, cfqq, 0); + return dispatched; +} + +/* + * Drain our current requests. Used for barriers and when switching + * io schedulers on-the-fly. + */ +static int cfq_forced_dispatch(struct cfq_data *cfqd) +{ + struct cfq_queue *cfqq; + int dispatched = 0; + + /* Expire the timeslice of the current active queue first */ + cfq_slice_expired(cfqd, 0); + while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) { + __cfq_set_active_queue(cfqd, cfqq); + dispatched += __cfq_forced_dispatch_cfqq(cfqq); + } + + BUG_ON(cfqd->busy_queues); + + cfq_log(cfqd, "forced_dispatch=%d", dispatched); + return dispatched; +} + +static inline bool cfq_slice_used_soon(struct cfq_data *cfqd, + struct cfq_queue *cfqq) +{ + /* the queue hasn't finished any request, can't estimate */ + if (cfq_cfqq_slice_new(cfqq)) + return true; + if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched, + cfqq->slice_end)) + return true; + + return false; +} + +static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + unsigned int max_dispatch; + + /* + * Drain async requests before we start sync IO + */ + if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC]) + return false; + + /* + * If this is an async queue and we have sync IO in flight, let it wait + */ + if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq)) + return false; + + max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1); + if (cfq_class_idle(cfqq)) + max_dispatch = 1; + + /* + * Does this cfqq already have too much IO in flight? + */ + if (cfqq->dispatched >= max_dispatch) { + bool promote_sync = false; + /* + * idle queue must always only have a single IO in flight + */ + if (cfq_class_idle(cfqq)) + return false; + + /* + * If there is only one sync queue + * we can ignore async queue here and give the sync + * queue no dispatch limit. The reason is a sync queue can + * preempt async queue, limiting the sync queue doesn't make + * sense. This is useful for aiostress test. + */ + if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1) + promote_sync = true; + + /* + * We have other queues, don't allow more IO from this one + */ + if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) && + !promote_sync) + return false; + + /* + * Sole queue user, no limit + */ + if (cfqd->busy_queues == 1 || promote_sync) + max_dispatch = -1; + else + /* + * Normally we start throttling cfqq when cfq_quantum/2 + * requests have been dispatched. But we can drive + * deeper queue depths at the beginning of slice + * subjected to upper limit of cfq_quantum. + * */ + max_dispatch = cfqd->cfq_quantum; + } + + /* + * Async queues must wait a bit before being allowed dispatch. + * We also ramp up the dispatch depth gradually for async IO, + * based on the last sync IO we serviced + */ + if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) { + unsigned long last_sync = jiffies - cfqd->last_delayed_sync; + unsigned int depth; + + depth = last_sync / cfqd->cfq_slice[1]; + if (!depth && !cfqq->dispatched) + depth = 1; + if (depth < max_dispatch) + max_dispatch = depth; + } + + /* + * If we're below the current max, allow a dispatch + */ + return cfqq->dispatched < max_dispatch; +} + +/* + * Dispatch a request from cfqq, moving them to the request queue + * dispatch list. + */ +static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + struct request *rq; + + BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list)); + + if (!cfq_may_dispatch(cfqd, cfqq)) + return false; + + /* + * follow expired path, else get first next available + */ + rq = cfq_check_fifo(cfqq); + if (!rq) + rq = cfqq->next_rq; + + /* + * insert request into driver dispatch list + */ + cfq_dispatch_insert(cfqd->queue, rq); + + if (!cfqd->active_cic) { + struct cfq_io_cq *cic = RQ_CIC(rq); + + atomic_long_inc(&cic->icq.ioc->refcount); + cfqd->active_cic = cic; + } + + return true; +} + +/* + * Find the cfqq that we need to service and move a request from that to the + * dispatch list + */ +static int cfq_dispatch_requests(struct request_queue *q, int force) +{ + struct cfq_data *cfqd = q->elevator->elevator_data; + struct cfq_queue *cfqq; + + if (!cfqd->busy_queues) + return 0; + + if (unlikely(force)) + return cfq_forced_dispatch(cfqd); + + cfqq = cfq_select_queue(cfqd); + if (!cfqq) + return 0; + + /* + * Dispatch a request from this cfqq, if it is allowed + */ + if (!cfq_dispatch_request(cfqd, cfqq)) + return 0; + + cfqq->slice_dispatch++; + cfq_clear_cfqq_must_dispatch(cfqq); + + /* + * expire an async queue immediately if it has used up its slice. idle + * queue always expire after 1 dispatch round. + */ + if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) && + cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) || + cfq_class_idle(cfqq))) { + cfqq->slice_end = jiffies + 1; + cfq_slice_expired(cfqd, 0); + } + + cfq_log_cfqq(cfqd, cfqq, "dispatched a request"); + return 1; +} + +/* + * task holds one reference to the queue, dropped when task exits. each rq + * in-flight on this queue also holds a reference, dropped when rq is freed. + * + * Each cfq queue took a reference on the parent group. Drop it now. + * queue lock must be held here. + */ +static void cfq_put_queue(struct cfq_queue *cfqq) +{ + struct cfq_data *cfqd = cfqq->cfqd; + struct cfq_group *cfqg; + + BUG_ON(cfqq->ref <= 0); + + cfqq->ref--; + if (cfqq->ref) + return; + + cfq_log_cfqq(cfqd, cfqq, "put_queue"); + BUG_ON(rb_first(&cfqq->sort_list)); + BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]); + cfqg = cfqq->cfqg; + + if (unlikely(cfqd->active_queue == cfqq)) { + __cfq_slice_expired(cfqd, cfqq, 0); + cfq_schedule_dispatch(cfqd); + } + + BUG_ON(cfq_cfqq_on_rr(cfqq)); + kmem_cache_free(cfq_pool, cfqq); + cfqg_put(cfqg); +} + +static void cfq_put_cooperator(struct cfq_queue *cfqq) +{ + struct cfq_queue *__cfqq, *next; + + /* + * If this queue was scheduled to merge with another queue, be + * sure to drop the reference taken on that queue (and others in + * the merge chain). See cfq_setup_merge and cfq_merge_cfqqs. + */ + __cfqq = cfqq->new_cfqq; + while (__cfqq) { + if (__cfqq == cfqq) { + WARN(1, "cfqq->new_cfqq loop detected\n"); + break; + } + next = __cfqq->new_cfqq; + cfq_put_queue(__cfqq); + __cfqq = next; + } +} + +static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + if (unlikely(cfqq == cfqd->active_queue)) { + __cfq_slice_expired(cfqd, cfqq, 0); + cfq_schedule_dispatch(cfqd); + } + + cfq_put_cooperator(cfqq); + + cfq_put_queue(cfqq); +} + +static void cfq_init_icq(struct io_cq *icq) +{ + struct cfq_io_cq *cic = icq_to_cic(icq); + + cic->ttime.last_end_request = jiffies; +} + +static void cfq_exit_icq(struct io_cq *icq) +{ + struct cfq_io_cq *cic = icq_to_cic(icq); + struct cfq_data *cfqd = cic_to_cfqd(cic); + + if (cic->cfqq[BLK_RW_ASYNC]) { + cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]); + cic->cfqq[BLK_RW_ASYNC] = NULL; + } + + if (cic->cfqq[BLK_RW_SYNC]) { + cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]); + cic->cfqq[BLK_RW_SYNC] = NULL; + } +} + +static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic) +{ + struct task_struct *tsk = current; + int ioprio_class; + + if (!cfq_cfqq_prio_changed(cfqq)) + return; + + ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio); + switch (ioprio_class) { + default: + printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class); + case IOPRIO_CLASS_NONE: + /* + * no prio set, inherit CPU scheduling settings + */ + cfqq->ioprio = task_nice_ioprio(tsk); + cfqq->ioprio_class = task_nice_ioclass(tsk); + break; + case IOPRIO_CLASS_RT: + cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio); + cfqq->ioprio_class = IOPRIO_CLASS_RT; + break; + case IOPRIO_CLASS_BE: + cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio); + cfqq->ioprio_class = IOPRIO_CLASS_BE; + break; + case IOPRIO_CLASS_IDLE: + cfqq->ioprio_class = IOPRIO_CLASS_IDLE; + cfqq->ioprio = 7; + cfq_clear_cfqq_idle_window(cfqq); + break; + } + + /* + * keep track of original prio settings in case we have to temporarily + * elevate the priority of this queue + */ + cfqq->org_ioprio = cfqq->ioprio; + cfq_clear_cfqq_prio_changed(cfqq); +} + +static void check_ioprio_changed(struct cfq_io_cq *cic, struct bio *bio) +{ + int ioprio = cic->icq.ioc->ioprio; + struct cfq_data *cfqd = cic_to_cfqd(cic); + struct cfq_queue *cfqq; + + /* + * Check whether ioprio has changed. The condition may trigger + * spuriously on a newly created cic but there's no harm. + */ + if (unlikely(!cfqd) || likely(cic->ioprio == ioprio)) + return; + + cfqq = cic->cfqq[BLK_RW_ASYNC]; + if (cfqq) { + struct cfq_queue *new_cfqq; + new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio, + GFP_ATOMIC); + if (new_cfqq) { + cic->cfqq[BLK_RW_ASYNC] = new_cfqq; + cfq_put_queue(cfqq); + } + } + + cfqq = cic->cfqq[BLK_RW_SYNC]; + if (cfqq) + cfq_mark_cfqq_prio_changed(cfqq); + + cic->ioprio = ioprio; +} + +static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq, + pid_t pid, bool is_sync) +{ + RB_CLEAR_NODE(&cfqq->rb_node); + RB_CLEAR_NODE(&cfqq->p_node); + INIT_LIST_HEAD(&cfqq->fifo); + + cfqq->ref = 0; + cfqq->cfqd = cfqd; + + cfq_mark_cfqq_prio_changed(cfqq); + + if (is_sync) { + if (!cfq_class_idle(cfqq)) + cfq_mark_cfqq_idle_window(cfqq); + cfq_mark_cfqq_sync(cfqq); + } + cfqq->pid = pid; +} + +#ifdef CONFIG_CFQ_GROUP_IOSCHED +static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) +{ + struct cfq_data *cfqd = cic_to_cfqd(cic); + struct cfq_queue *sync_cfqq; + uint64_t serial_nr; + + rcu_read_lock(); + serial_nr = bio_blkcg(bio)->css.serial_nr; + rcu_read_unlock(); + + /* + * Check whether blkcg has changed. The condition may trigger + * spuriously on a newly created cic but there's no harm. + */ + if (unlikely(!cfqd) || likely(cic->blkcg_serial_nr == serial_nr)) + return; + + sync_cfqq = cic_to_cfqq(cic, 1); + if (sync_cfqq) { + /* + * Drop reference to sync queue. A new sync queue will be + * assigned in new group upon arrival of a fresh request. + */ + cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup"); + cic_set_cfqq(cic, NULL, 1); + cfq_put_queue(sync_cfqq); + } + + cic->blkcg_serial_nr = serial_nr; +} +#else +static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) { } +#endif /* CONFIG_CFQ_GROUP_IOSCHED */ + +static struct cfq_queue * +cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic, + struct bio *bio, gfp_t gfp_mask) +{ + struct blkcg *blkcg; + struct cfq_queue *cfqq, *new_cfqq = NULL; + struct cfq_group *cfqg; + +retry: + rcu_read_lock(); + + blkcg = bio_blkcg(bio); + cfqg = cfq_lookup_create_cfqg(cfqd, blkcg); + if (!cfqg) { + cfqq = &cfqd->oom_cfqq; + goto out; + } + + cfqq = cic_to_cfqq(cic, is_sync); + + /* + * Always try a new alloc if we fell back to the OOM cfqq + * originally, since it should just be a temporary situation. + */ + if (!cfqq || cfqq == &cfqd->oom_cfqq) { + cfqq = NULL; + if (new_cfqq) { + cfqq = new_cfqq; + new_cfqq = NULL; + } else if (gfp_mask & __GFP_WAIT) { + rcu_read_unlock(); + spin_unlock_irq(cfqd->queue->queue_lock); + new_cfqq = kmem_cache_alloc_node(cfq_pool, + gfp_mask | __GFP_ZERO, + cfqd->queue->node); + spin_lock_irq(cfqd->queue->queue_lock); + if (new_cfqq) + goto retry; + else + return &cfqd->oom_cfqq; + } else { + cfqq = kmem_cache_alloc_node(cfq_pool, + gfp_mask | __GFP_ZERO, + cfqd->queue->node); + } + + if (cfqq) { + cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync); + cfq_init_prio_data(cfqq, cic); + cfq_link_cfqq_cfqg(cfqq, cfqg); + cfq_log_cfqq(cfqd, cfqq, "alloced"); + } else + cfqq = &cfqd->oom_cfqq; + } +out: + if (new_cfqq) + kmem_cache_free(cfq_pool, new_cfqq); + + rcu_read_unlock(); + return cfqq; +} + +static struct cfq_queue ** +cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio) +{ + switch (ioprio_class) { + case IOPRIO_CLASS_RT: + return &cfqd->async_cfqq[0][ioprio]; + case IOPRIO_CLASS_NONE: + ioprio = IOPRIO_NORM; + /* fall through */ + case IOPRIO_CLASS_BE: + return &cfqd->async_cfqq[1][ioprio]; + case IOPRIO_CLASS_IDLE: + return &cfqd->async_idle_cfqq; + default: + BUG(); + } +} + +static struct cfq_queue * +cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic, + struct bio *bio, gfp_t gfp_mask) +{ + int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio); + int ioprio = IOPRIO_PRIO_DATA(cic->ioprio); + struct cfq_queue **async_cfqq = NULL; + struct cfq_queue *cfqq = NULL; + + if (!is_sync) { + if (!ioprio_valid(cic->ioprio)) { + struct task_struct *tsk = current; + ioprio = task_nice_ioprio(tsk); + ioprio_class = task_nice_ioclass(tsk); + } + async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio); + cfqq = *async_cfqq; + } + + if (!cfqq) + cfqq = cfq_find_alloc_queue(cfqd, is_sync, cic, bio, gfp_mask); + + /* + * pin the queue now that it's allocated, scheduler exit will prune it + */ + if (!is_sync && !(*async_cfqq)) { + cfqq->ref++; + *async_cfqq = cfqq; + } + + cfqq->ref++; + return cfqq; +} + +static void +__cfq_update_io_thinktime(struct cfq_ttime *ttime, unsigned long slice_idle) +{ + unsigned long elapsed = jiffies - ttime->last_end_request; + elapsed = min(elapsed, 2UL * slice_idle); + + ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8; + ttime->ttime_total = (7*ttime->ttime_total + 256*elapsed) / 8; + ttime->ttime_mean = (ttime->ttime_total + 128) / ttime->ttime_samples; +} + +static void +cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq, + struct cfq_io_cq *cic) +{ + if (cfq_cfqq_sync(cfqq)) { + __cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle); + __cfq_update_io_thinktime(&cfqq->service_tree->ttime, + cfqd->cfq_slice_idle); + } +#ifdef CONFIG_CFQ_GROUP_IOSCHED + __cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle); +#endif +} + +static void +cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq, + struct request *rq) +{ + sector_t sdist = 0; + sector_t n_sec = blk_rq_sectors(rq); + if (cfqq->last_request_pos) { + if (cfqq->last_request_pos < blk_rq_pos(rq)) + sdist = blk_rq_pos(rq) - cfqq->last_request_pos; + else + sdist = cfqq->last_request_pos - blk_rq_pos(rq); + } + + cfqq->seek_history <<= 1; + if (blk_queue_nonrot(cfqd->queue)) + cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT); + else + cfqq->seek_history |= (sdist > CFQQ_SEEK_THR); +} + +/* + * Disable idle window if the process thinks too long or seeks so much that + * it doesn't matter + */ +static void +cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, + struct cfq_io_cq *cic) +{ + int old_idle, enable_idle; + + /* + * Don't idle for async or idle io prio class + */ + if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq)) + return; + + enable_idle = old_idle = cfq_cfqq_idle_window(cfqq); + + if (cfqq->queued[0] + cfqq->queued[1] >= 4) + cfq_mark_cfqq_deep(cfqq); + + if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE)) + enable_idle = 0; + else if (!atomic_read(&cic->icq.ioc->active_ref) || + !cfqd->cfq_slice_idle || + (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq))) + enable_idle = 0; + else if (sample_valid(cic->ttime.ttime_samples)) { + if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle) + enable_idle = 0; + else + enable_idle = 1; + } + + if (old_idle != enable_idle) { + cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle); + if (enable_idle) + cfq_mark_cfqq_idle_window(cfqq); + else + cfq_clear_cfqq_idle_window(cfqq); + } +} + +/* + * Check if new_cfqq should preempt the currently active queue. Return 0 for + * no or if we aren't sure, a 1 will cause a preempt. + */ +static bool +cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, + struct request *rq) +{ + struct cfq_queue *cfqq; + + cfqq = cfqd->active_queue; + if (!cfqq) + return false; + + if (cfq_class_idle(new_cfqq)) + return false; + + if (cfq_class_idle(cfqq)) + return true; + + /* + * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice. + */ + if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq)) + return false; + + /* + * if the new request is sync, but the currently running queue is + * not, let the sync request have priority. + */ + if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq)) + return true; + + if (new_cfqq->cfqg != cfqq->cfqg) + return false; + + if (cfq_slice_used(cfqq)) + return true; + + /* Allow preemption only if we are idling on sync-noidle tree */ + if (cfqd->serving_wl_type == SYNC_NOIDLE_WORKLOAD && + cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD && + new_cfqq->service_tree->count == 2 && + RB_EMPTY_ROOT(&cfqq->sort_list)) + return true; + + /* + * So both queues are sync. Let the new request get disk time if + * it's a metadata request and the current queue is doing regular IO. + */ + if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending) + return true; + + /* + * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice. + */ + if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq)) + return true; + + /* An idle queue should not be idle now for some reason */ + if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq)) + return true; + + if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq)) + return false; + + /* + * if this request is as-good as one we would expect from the + * current cfqq, let it preempt + */ + if (cfq_rq_close(cfqd, cfqq, rq)) + return true; + + return false; +} + +/* + * cfqq preempts the active queue. if we allowed preempt with no slice left, + * let it have half of its nominal slice. + */ +static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + enum wl_type_t old_type = cfqq_type(cfqd->active_queue); + + cfq_log_cfqq(cfqd, cfqq, "preempt"); + cfq_slice_expired(cfqd, 1); + + /* + * workload type is changed, don't save slice, otherwise preempt + * doesn't happen + */ + if (old_type != cfqq_type(cfqq)) + cfqq->cfqg->saved_wl_slice = 0; + + /* + * Put the new queue at the front of the of the current list, + * so we know that it will be selected next. + */ + BUG_ON(!cfq_cfqq_on_rr(cfqq)); + + cfq_service_tree_add(cfqd, cfqq, 1); + + cfqq->slice_end = 0; + cfq_mark_cfqq_slice_new(cfqq); +} + +/* + * Called when a new fs request (rq) is added (to cfqq). Check if there's + * something we should do about it + */ +static void +cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, + struct request *rq) +{ + struct cfq_io_cq *cic = RQ_CIC(rq); + + cfqd->rq_queued++; + if (rq->cmd_flags & REQ_PRIO) + cfqq->prio_pending++; + + cfq_update_io_thinktime(cfqd, cfqq, cic); + cfq_update_io_seektime(cfqd, cfqq, rq); + cfq_update_idle_window(cfqd, cfqq, cic); + + cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); + + if (cfqq == cfqd->active_queue) { + /* + * Remember that we saw a request from this process, but + * don't start queuing just yet. Otherwise we risk seeing lots + * of tiny requests, because we disrupt the normal plugging + * and merging. If the request is already larger than a single + * page, let it rip immediately. For that case we assume that + * merging is already done. Ditto for a busy system that + * has other work pending, don't risk delaying until the + * idle timer unplug to continue working. + */ + if (cfq_cfqq_wait_request(cfqq)) { + if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE || + cfqd->busy_queues > 1) { + cfq_del_timer(cfqd, cfqq); + cfq_clear_cfqq_wait_request(cfqq); + __blk_run_queue(cfqd->queue); + } else { + cfqg_stats_update_idle_time(cfqq->cfqg); + cfq_mark_cfqq_must_dispatch(cfqq); + } + } + } else if (cfq_should_preempt(cfqd, cfqq, rq)) { + /* + * not the active queue - expire current slice if it is + * idle and has expired it's mean thinktime or this new queue + * has some old slice time left and is of higher priority or + * this new queue is RT and the current one is BE + */ + cfq_preempt_queue(cfqd, cfqq); + __blk_run_queue(cfqd->queue); + } +} + +static void cfq_insert_request(struct request_queue *q, struct request *rq) +{ + struct cfq_data *cfqd = q->elevator->elevator_data; + struct cfq_queue *cfqq = RQ_CFQQ(rq); + + cfq_log_cfqq(cfqd, cfqq, "insert_request"); + cfq_init_prio_data(cfqq, RQ_CIC(rq)); + + rq->fifo_time = jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]; + list_add_tail(&rq->queuelist, &cfqq->fifo); + cfq_add_rq_rb(rq); + cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group, + rq->cmd_flags); + cfq_rq_enqueued(cfqd, cfqq, rq); +} + +/* + * Update hw_tag based on peak queue depth over 50 samples under + * sufficient load. + */ +static void cfq_update_hw_tag(struct cfq_data *cfqd) +{ + struct cfq_queue *cfqq = cfqd->active_queue; + + if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth) + cfqd->hw_tag_est_depth = cfqd->rq_in_driver; + + if (cfqd->hw_tag == 1) + return; + + if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN && + cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN) + return; + + /* + * If active queue hasn't enough requests and can idle, cfq might not + * dispatch sufficient requests to hardware. Don't zero hw_tag in this + * case + */ + if (cfqq && cfq_cfqq_idle_window(cfqq) && + cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] < + CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN) + return; + + if (cfqd->hw_tag_samples++ < 50) + return; + + if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN) + cfqd->hw_tag = 1; + else + cfqd->hw_tag = 0; +} + +static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + struct cfq_io_cq *cic = cfqd->active_cic; + + /* If the queue already has requests, don't wait */ + if (!RB_EMPTY_ROOT(&cfqq->sort_list)) + return false; + + /* If there are other queues in the group, don't wait */ + if (cfqq->cfqg->nr_cfqq > 1) + return false; + + /* the only queue in the group, but think time is big */ + if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) + return false; + + if (cfq_slice_used(cfqq)) + return true; + + /* if slice left is less than think time, wait busy */ + if (cic && sample_valid(cic->ttime.ttime_samples) + && (cfqq->slice_end - jiffies < cic->ttime.ttime_mean)) + return true; + + /* + * If think times is less than a jiffy than ttime_mean=0 and above + * will not be true. It might happen that slice has not expired yet + * but will expire soon (4-5 ns) during select_queue(). To cover the + * case where think time is less than a jiffy, mark the queue wait + * busy if only 1 jiffy is left in the slice. + */ + if (cfqq->slice_end - jiffies == 1) + return true; + + return false; +} + +static void cfq_completed_request(struct request_queue *q, struct request *rq) +{ + struct cfq_queue *cfqq = RQ_CFQQ(rq); + struct cfq_data *cfqd = cfqq->cfqd; + const int sync = rq_is_sync(rq); + unsigned long now; + + now = jiffies; + cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d", + !!(rq->cmd_flags & REQ_NOIDLE)); + + cfq_update_hw_tag(cfqd); + + WARN_ON(!cfqd->rq_in_driver); + WARN_ON(!cfqq->dispatched); + cfqd->rq_in_driver--; + cfqq->dispatched--; + (RQ_CFQG(rq))->dispatched--; + cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq), + rq_io_start_time_ns(rq), rq->cmd_flags); + + cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--; + + if (sync) { + struct cfq_rb_root *st; + + RQ_CIC(rq)->ttime.last_end_request = now; + + if (cfq_cfqq_on_rr(cfqq)) + st = cfqq->service_tree; + else + st = st_for(cfqq->cfqg, cfqq_class(cfqq), + cfqq_type(cfqq)); + + st->ttime.last_end_request = now; + if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now)) + cfqd->last_delayed_sync = now; + } + +#ifdef CONFIG_CFQ_GROUP_IOSCHED + cfqq->cfqg->ttime.last_end_request = now; +#endif + + /* + * If this is the active queue, check if it needs to be expired, + * or if we want to idle in case it has no pending requests. + */ + if (cfqd->active_queue == cfqq) { + const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list); + + if (cfq_cfqq_slice_new(cfqq)) { + cfq_set_prio_slice(cfqd, cfqq); + cfq_clear_cfqq_slice_new(cfqq); + } + + /* + * Should we wait for next request to come in before we expire + * the queue. + */ + if (cfq_should_wait_busy(cfqd, cfqq)) { + unsigned long extend_sl = cfqd->cfq_slice_idle; + if (!cfqd->cfq_slice_idle) + extend_sl = cfqd->cfq_group_idle; + cfqq->slice_end = jiffies + extend_sl; + cfq_mark_cfqq_wait_busy(cfqq); + cfq_log_cfqq(cfqd, cfqq, "will busy wait"); + } + + /* + * Idling is not enabled on: + * - expired queues + * - idle-priority queues + * - async queues + * - queues with still some requests queued + * - when there is a close cooperator + */ + if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq)) + cfq_slice_expired(cfqd, 1); + else if (sync && cfqq_empty && + !cfq_close_cooperator(cfqd, cfqq)) { + cfq_arm_slice_timer(cfqd); + } + } + + if (!cfqd->rq_in_driver) + cfq_schedule_dispatch(cfqd); +} + +static inline int __cfq_may_queue(struct cfq_queue *cfqq) +{ + if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) { + cfq_mark_cfqq_must_alloc_slice(cfqq); + return ELV_MQUEUE_MUST; + } + + return ELV_MQUEUE_MAY; +} + +static int cfq_may_queue(struct request_queue *q, int rw) +{ + struct cfq_data *cfqd = q->elevator->elevator_data; + struct task_struct *tsk = current; + struct cfq_io_cq *cic; + struct cfq_queue *cfqq; + + /* + * don't force setup of a queue from here, as a call to may_queue + * does not necessarily imply that a request actually will be queued. + * so just lookup a possibly existing queue, or return 'may queue' + * if that fails + */ + cic = cfq_cic_lookup(cfqd, tsk->io_context); + if (!cic) + return ELV_MQUEUE_MAY; + + cfqq = cic_to_cfqq(cic, rw_is_sync(rw)); + if (cfqq) { + cfq_init_prio_data(cfqq, cic); + + return __cfq_may_queue(cfqq); + } + + return ELV_MQUEUE_MAY; +} + +/* + * queue lock held here + */ +static void cfq_put_request(struct request *rq) +{ + struct cfq_queue *cfqq = RQ_CFQQ(rq); + + if (cfqq) { + const int rw = rq_data_dir(rq); + + BUG_ON(!cfqq->allocated[rw]); + cfqq->allocated[rw]--; + + /* Put down rq reference on cfqg */ + cfqg_put(RQ_CFQG(rq)); + rq->elv.priv[0] = NULL; + rq->elv.priv[1] = NULL; + + cfq_put_queue(cfqq); + } +} + +static struct cfq_queue * +cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic, + struct cfq_queue *cfqq) +{ + cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq); + cic_set_cfqq(cic, cfqq->new_cfqq, 1); + cfq_mark_cfqq_coop(cfqq->new_cfqq); + cfq_put_queue(cfqq); + return cic_to_cfqq(cic, 1); +} + +/* + * Returns NULL if a new cfqq should be allocated, or the old cfqq if this + * was the last process referring to said cfqq. + */ +static struct cfq_queue * +split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq) +{ + if (cfqq_process_refs(cfqq) == 1) { + cfqq->pid = current->pid; + cfq_clear_cfqq_coop(cfqq); + cfq_clear_cfqq_split_coop(cfqq); + return cfqq; + } + + cic_set_cfqq(cic, NULL, 1); + + cfq_put_cooperator(cfqq); + + cfq_put_queue(cfqq); + return NULL; +} +/* + * Allocate cfq data structures associated with this request. + */ +static int +cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio, + gfp_t gfp_mask) +{ + struct cfq_data *cfqd = q->elevator->elevator_data; + struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq); + const int rw = rq_data_dir(rq); + const bool is_sync = rq_is_sync(rq); + struct cfq_queue *cfqq; + + might_sleep_if(gfp_mask & __GFP_WAIT); + + spin_lock_irq(q->queue_lock); + + check_ioprio_changed(cic, bio); + check_blkcg_changed(cic, bio); +new_queue: + cfqq = cic_to_cfqq(cic, is_sync); + if (!cfqq || cfqq == &cfqd->oom_cfqq) { + cfqq = cfq_get_queue(cfqd, is_sync, cic, bio, gfp_mask); + cic_set_cfqq(cic, cfqq, is_sync); + } else { + /* + * If the queue was seeky for too long, break it apart. + */ + if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) { + cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq"); + cfqq = split_cfqq(cic, cfqq); + if (!cfqq) + goto new_queue; + } + + /* + * Check to see if this queue is scheduled to merge with + * another, closely cooperating queue. The merging of + * queues happens here as it must be done in process context. + * The reference on new_cfqq was taken in merge_cfqqs. + */ + if (cfqq->new_cfqq) + cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq); + } + + cfqq->allocated[rw]++; + + cfqq->ref++; + cfqg_get(cfqq->cfqg); + rq->elv.priv[0] = cfqq; + rq->elv.priv[1] = cfqq->cfqg; + spin_unlock_irq(q->queue_lock); + return 0; +} + +static void cfq_kick_queue(struct work_struct *work) +{ + struct cfq_data *cfqd = + container_of(work, struct cfq_data, unplug_work); + struct request_queue *q = cfqd->queue; + + spin_lock_irq(q->queue_lock); + __blk_run_queue(cfqd->queue); + spin_unlock_irq(q->queue_lock); +} + +/* + * Timer running if the active_queue is currently idling inside its time slice + */ +static void cfq_idle_slice_timer(unsigned long data) +{ + struct cfq_data *cfqd = (struct cfq_data *) data; + struct cfq_queue *cfqq; + unsigned long flags; + int timed_out = 1; + + cfq_log(cfqd, "idle timer fired"); + + spin_lock_irqsave(cfqd->queue->queue_lock, flags); + + cfqq = cfqd->active_queue; + if (cfqq) { + timed_out = 0; + + /* + * We saw a request before the queue expired, let it through + */ + if (cfq_cfqq_must_dispatch(cfqq)) + goto out_kick; + + /* + * expired + */ + if (cfq_slice_used(cfqq)) + goto expire; + + /* + * only expire and reinvoke request handler, if there are + * other queues with pending requests + */ + if (!cfqd->busy_queues) + goto out_cont; + + /* + * not expired and it has a request pending, let it dispatch + */ + if (!RB_EMPTY_ROOT(&cfqq->sort_list)) + goto out_kick; + + /* + * Queue depth flag is reset only when the idle didn't succeed + */ + cfq_clear_cfqq_deep(cfqq); + } +expire: + cfq_slice_expired(cfqd, timed_out); +out_kick: + cfq_schedule_dispatch(cfqd); +out_cont: + spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); +} + +static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) +{ + del_timer_sync(&cfqd->idle_slice_timer); + cancel_work_sync(&cfqd->unplug_work); +} + +static void cfq_put_async_queues(struct cfq_data *cfqd) +{ + int i; + + for (i = 0; i < IOPRIO_BE_NR; i++) { + if (cfqd->async_cfqq[0][i]) + cfq_put_queue(cfqd->async_cfqq[0][i]); + if (cfqd->async_cfqq[1][i]) + cfq_put_queue(cfqd->async_cfqq[1][i]); + } + + if (cfqd->async_idle_cfqq) + cfq_put_queue(cfqd->async_idle_cfqq); +} + +static void cfq_exit_queue(struct elevator_queue *e) +{ + struct cfq_data *cfqd = e->elevator_data; + struct request_queue *q = cfqd->queue; + + cfq_shutdown_timer_wq(cfqd); + + spin_lock_irq(q->queue_lock); + + if (cfqd->active_queue) + __cfq_slice_expired(cfqd, cfqd->active_queue, 0); + + cfq_put_async_queues(cfqd); + + spin_unlock_irq(q->queue_lock); + + cfq_shutdown_timer_wq(cfqd); + +#ifdef CONFIG_CFQ_GROUP_IOSCHED + blkcg_deactivate_policy(q, &blkcg_policy_cfq); +#else + kfree(cfqd->root_group); +#endif + kfree(cfqd); +} + +static int cfq_init_queue(struct request_queue *q, struct elevator_type *e) +{ + struct cfq_data *cfqd; + struct blkcg_gq *blkg __maybe_unused; + int i, ret; + struct elevator_queue *eq; + + eq = elevator_alloc(q, e); + if (!eq) + return -ENOMEM; + + cfqd = kzalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node); + if (!cfqd) { + kobject_put(&eq->kobj); + return -ENOMEM; + } + eq->elevator_data = cfqd; + + cfqd->queue = q; + spin_lock_irq(q->queue_lock); + q->elevator = eq; + spin_unlock_irq(q->queue_lock); + + /* Init root service tree */ + cfqd->grp_service_tree = CFQ_RB_ROOT; + + /* Init root group and prefer root group over other groups by default */ +#ifdef CONFIG_CFQ_GROUP_IOSCHED + ret = blkcg_activate_policy(q, &blkcg_policy_cfq); + if (ret) + goto out_free; + + cfqd->root_group = blkg_to_cfqg(q->root_blkg); +#else + ret = -ENOMEM; + cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group), + GFP_KERNEL, cfqd->queue->node); + if (!cfqd->root_group) + goto out_free; + + cfq_init_cfqg_base(cfqd->root_group); +#endif + cfqd->root_group->weight = 2 * CFQ_WEIGHT_DEFAULT; + cfqd->root_group->leaf_weight = 2 * CFQ_WEIGHT_DEFAULT; + + /* + * Not strictly needed (since RB_ROOT just clears the node and we + * zeroed cfqd on alloc), but better be safe in case someone decides + * to add magic to the rb code + */ + for (i = 0; i < CFQ_PRIO_LISTS; i++) + cfqd->prio_trees[i] = RB_ROOT; + + /* + * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues. + * Grab a permanent reference to it, so that the normal code flow + * will not attempt to free it. oom_cfqq is linked to root_group + * but shouldn't hold a reference as it'll never be unlinked. Lose + * the reference from linking right away. + */ + cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0); + cfqd->oom_cfqq.ref++; + + spin_lock_irq(q->queue_lock); + cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, cfqd->root_group); + cfqg_put(cfqd->root_group); + spin_unlock_irq(q->queue_lock); + + init_timer(&cfqd->idle_slice_timer); + cfqd->idle_slice_timer.function = cfq_idle_slice_timer; + cfqd->idle_slice_timer.data = (unsigned long) cfqd; + + INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); + + cfqd->cfq_quantum = cfq_quantum; + cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; + cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1]; + cfqd->cfq_back_max = cfq_back_max; + cfqd->cfq_back_penalty = cfq_back_penalty; + cfqd->cfq_slice[0] = cfq_slice_async; + cfqd->cfq_slice[1] = cfq_slice_sync; + cfqd->cfq_target_latency = cfq_target_latency; + cfqd->cfq_slice_async_rq = cfq_slice_async_rq; + cfqd->cfq_slice_idle = cfq_slice_idle; + cfqd->cfq_group_idle = cfq_group_idle; + cfqd->cfq_latency = 1; + cfqd->hw_tag = -1; + /* + * we optimistically start assuming sync ops weren't delayed in last + * second, in order to have larger depth for async operations. + */ + cfqd->last_delayed_sync = jiffies - HZ; + return 0; + +out_free: + kfree(cfqd); + kobject_put(&eq->kobj); + return ret; +} + +/* + * sysfs parts below --> + */ +static ssize_t +cfq_var_show(unsigned int var, char *page) +{ + return sprintf(page, "%u\n", var); +} + +static ssize_t +cfq_var_store(unsigned int *var, const char *page, size_t count) +{ + char *p = (char *) page; + + *var = simple_strtoul(p, &p, 10); + return count; +} + +#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ +static ssize_t __FUNC(struct elevator_queue *e, char *page) \ +{ \ + struct cfq_data *cfqd = e->elevator_data; \ + unsigned int __data = __VAR; \ + if (__CONV) \ + __data = jiffies_to_msecs(__data); \ + return cfq_var_show(__data, (page)); \ +} +SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0); +SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1); +SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1); +SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0); +SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0); +SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); +SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1); +SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); +SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); +SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); +SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0); +SHOW_FUNCTION(cfq_target_latency_show, cfqd->cfq_target_latency, 1); +#undef SHOW_FUNCTION + +#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ +static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ +{ \ + struct cfq_data *cfqd = e->elevator_data; \ + unsigned int __data; \ + int ret = cfq_var_store(&__data, (page), count); \ + if (__data < (MIN)) \ + __data = (MIN); \ + else if (__data > (MAX)) \ + __data = (MAX); \ + if (__CONV) \ + *(__PTR) = msecs_to_jiffies(__data); \ + else \ + *(__PTR) = __data; \ + return ret; \ +} +STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0); +STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, + UINT_MAX, 1); +STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, + UINT_MAX, 1); +STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0); +STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, + UINT_MAX, 0); +STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); +STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1); +STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); +STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); +STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, + UINT_MAX, 0); +STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0); +STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, 1); +#undef STORE_FUNCTION + +#define CFQ_ATTR(name) \ + __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store) + +static struct elv_fs_entry cfq_attrs[] = { + CFQ_ATTR(quantum), + CFQ_ATTR(fifo_expire_sync), + CFQ_ATTR(fifo_expire_async), + CFQ_ATTR(back_seek_max), + CFQ_ATTR(back_seek_penalty), + CFQ_ATTR(slice_sync), + CFQ_ATTR(slice_async), + CFQ_ATTR(slice_async_rq), + CFQ_ATTR(slice_idle), + CFQ_ATTR(group_idle), + CFQ_ATTR(low_latency), + CFQ_ATTR(target_latency), + __ATTR_NULL +}; + +static struct elevator_type iosched_cfq = { + .ops = { + .elevator_merge_fn = cfq_merge, + .elevator_merged_fn = cfq_merged_request, + .elevator_merge_req_fn = cfq_merged_requests, + .elevator_allow_merge_fn = cfq_allow_merge, + .elevator_bio_merged_fn = cfq_bio_merged, + .elevator_dispatch_fn = cfq_dispatch_requests, + .elevator_add_req_fn = cfq_insert_request, + .elevator_activate_req_fn = cfq_activate_request, + .elevator_deactivate_req_fn = cfq_deactivate_request, + .elevator_completed_req_fn = cfq_completed_request, + .elevator_former_req_fn = elv_rb_former_request, + .elevator_latter_req_fn = elv_rb_latter_request, + .elevator_init_icq_fn = cfq_init_icq, + .elevator_exit_icq_fn = cfq_exit_icq, + .elevator_set_req_fn = cfq_set_request, + .elevator_put_req_fn = cfq_put_request, + .elevator_may_queue_fn = cfq_may_queue, + .elevator_init_fn = cfq_init_queue, + .elevator_exit_fn = cfq_exit_queue, + }, + .icq_size = sizeof(struct cfq_io_cq), + .icq_align = __alignof__(struct cfq_io_cq), + .elevator_attrs = cfq_attrs, + .elevator_name = "cfq", + .elevator_owner = THIS_MODULE, +}; + +#ifdef CONFIG_CFQ_GROUP_IOSCHED +static struct blkcg_policy blkcg_policy_cfq = { + .pd_size = sizeof(struct cfq_group), + .cftypes = cfq_blkcg_files, + + .pd_init_fn = cfq_pd_init, + .pd_offline_fn = cfq_pd_offline, + .pd_reset_stats_fn = cfq_pd_reset_stats, +}; +#endif + +static int __init cfq_init(void) +{ + int ret; + + /* + * could be 0 on HZ < 1000 setups + */ + if (!cfq_slice_async) + cfq_slice_async = 1; + if (!cfq_slice_idle) + cfq_slice_idle = 1; + +#ifdef CONFIG_CFQ_GROUP_IOSCHED + if (!cfq_group_idle) + cfq_group_idle = 1; + + ret = blkcg_policy_register(&blkcg_policy_cfq); + if (ret) + return ret; +#else + cfq_group_idle = 0; +#endif + + ret = -ENOMEM; + cfq_pool = KMEM_CACHE(cfq_queue, 0); + if (!cfq_pool) + goto err_pol_unreg; + + ret = elv_register(&iosched_cfq); + if (ret) + goto err_free_pool; + + return 0; + +err_free_pool: + kmem_cache_destroy(cfq_pool); +err_pol_unreg: +#ifdef CONFIG_CFQ_GROUP_IOSCHED + blkcg_policy_unregister(&blkcg_policy_cfq); +#endif + return ret; +} + +static void __exit cfq_exit(void) +{ +#ifdef CONFIG_CFQ_GROUP_IOSCHED + blkcg_policy_unregister(&blkcg_policy_cfq); +#endif + elv_unregister(&iosched_cfq); + kmem_cache_destroy(cfq_pool); +} + +module_init(cfq_init); +module_exit(cfq_exit); + +MODULE_AUTHOR("Jens Axboe"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler"); diff --git a/block/cmdline-parser.c b/block/cmdline-parser.c new file mode 100644 index 000000000..9dbc67e42 --- /dev/null +++ b/block/cmdline-parser.c @@ -0,0 +1,254 @@ +/* + * Parse command line, get partition information + * + * Written by Cai Zhiyong + * + */ +#include +#include + +static int parse_subpart(struct cmdline_subpart **subpart, char *partdef) +{ + int ret = 0; + struct cmdline_subpart *new_subpart; + + *subpart = NULL; + + new_subpart = kzalloc(sizeof(struct cmdline_subpart), GFP_KERNEL); + if (!new_subpart) + return -ENOMEM; + + if (*partdef == '-') { + new_subpart->size = (sector_t)(~0ULL); + partdef++; + } else { + new_subpart->size = (sector_t)memparse(partdef, &partdef); + if (new_subpart->size < (sector_t)PAGE_SIZE) { + pr_warn("cmdline partition size is invalid."); + ret = -EINVAL; + goto fail; + } + } + + if (*partdef == '@') { + partdef++; + new_subpart->from = (sector_t)memparse(partdef, &partdef); + } else { + new_subpart->from = (sector_t)(~0ULL); + } + + if (*partdef == '(') { + int length; + char *next = strchr(++partdef, ')'); + + if (!next) { + pr_warn("cmdline partition format is invalid."); + ret = -EINVAL; + goto fail; + } + + length = min_t(int, next - partdef, + sizeof(new_subpart->name) - 1); + strncpy(new_subpart->name, partdef, length); + new_subpart->name[length] = '\0'; + + partdef = ++next; + } else + new_subpart->name[0] = '\0'; + + new_subpart->flags = 0; + + if (!strncmp(partdef, "ro", 2)) { + new_subpart->flags |= PF_RDONLY; + partdef += 2; + } + + if (!strncmp(partdef, "lk", 2)) { + new_subpart->flags |= PF_POWERUP_LOCK; + partdef += 2; + } + + *subpart = new_subpart; + return 0; +fail: + kfree(new_subpart); + return ret; +} + +static void free_subpart(struct cmdline_parts *parts) +{ + struct cmdline_subpart *subpart; + + while (parts->subpart) { + subpart = parts->subpart; + parts->subpart = subpart->next_subpart; + kfree(subpart); + } +} + +static int parse_parts(struct cmdline_parts **parts, const char *bdevdef) +{ + int ret = -EINVAL; + char *next; + int length; + struct cmdline_subpart **next_subpart; + struct cmdline_parts *newparts; + char buf[BDEVNAME_SIZE + 32 + 4]; + + *parts = NULL; + + newparts = kzalloc(sizeof(struct cmdline_parts), GFP_KERNEL); + if (!newparts) + return -ENOMEM; + + next = strchr(bdevdef, ':'); + if (!next) { + pr_warn("cmdline partition has no block device."); + goto fail; + } + + length = min_t(int, next - bdevdef, sizeof(newparts->name) - 1); + strncpy(newparts->name, bdevdef, length); + newparts->name[length] = '\0'; + newparts->nr_subparts = 0; + + next_subpart = &newparts->subpart; + + while (next && *(++next)) { + bdevdef = next; + next = strchr(bdevdef, ','); + + length = (!next) ? (sizeof(buf) - 1) : + min_t(int, next - bdevdef, sizeof(buf) - 1); + + strncpy(buf, bdevdef, length); + buf[length] = '\0'; + + ret = parse_subpart(next_subpart, buf); + if (ret) + goto fail; + + newparts->nr_subparts++; + next_subpart = &(*next_subpart)->next_subpart; + } + + if (!newparts->subpart) { + pr_warn("cmdline partition has no valid partition."); + ret = -EINVAL; + goto fail; + } + + *parts = newparts; + + return 0; +fail: + free_subpart(newparts); + kfree(newparts); + return ret; +} + +void cmdline_parts_free(struct cmdline_parts **parts) +{ + struct cmdline_parts *next_parts; + + while (*parts) { + next_parts = (*parts)->next_parts; + free_subpart(*parts); + kfree(*parts); + *parts = next_parts; + } +} +EXPORT_SYMBOL(cmdline_parts_free); + +int cmdline_parts_parse(struct cmdline_parts **parts, const char *cmdline) +{ + int ret; + char *buf; + char *pbuf; + char *next; + struct cmdline_parts **next_parts; + + *parts = NULL; + + next = pbuf = buf = kstrdup(cmdline, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + next_parts = parts; + + while (next && *pbuf) { + next = strchr(pbuf, ';'); + if (next) + *next = '\0'; + + ret = parse_parts(next_parts, pbuf); + if (ret) + goto fail; + + if (next) + pbuf = ++next; + + next_parts = &(*next_parts)->next_parts; + } + + if (!*parts) { + pr_warn("cmdline partition has no valid partition."); + ret = -EINVAL; + goto fail; + } + + ret = 0; +done: + kfree(buf); + return ret; + +fail: + cmdline_parts_free(parts); + goto done; +} +EXPORT_SYMBOL(cmdline_parts_parse); + +struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts, + const char *bdev) +{ + while (parts && strncmp(bdev, parts->name, sizeof(parts->name))) + parts = parts->next_parts; + return parts; +} +EXPORT_SYMBOL(cmdline_parts_find); + +/* + * add_part() + * 0 success. + * 1 can not add so many partitions. + */ +int cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size, + int slot, + int (*add_part)(int, struct cmdline_subpart *, void *), + void *param) +{ + sector_t from = 0; + struct cmdline_subpart *subpart; + + for (subpart = parts->subpart; subpart; + subpart = subpart->next_subpart, slot++) { + if (subpart->from == (sector_t)(~0ULL)) + subpart->from = from; + else + from = subpart->from; + + if (from >= disk_size) + break; + + if (subpart->size > (disk_size - from)) + subpart->size = disk_size - from; + + from += subpart->size; + + if (add_part(slot, subpart, param)) + break; + } + + return slot; +} +EXPORT_SYMBOL(cmdline_parts_set); diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c new file mode 100644 index 000000000..f678c733d --- /dev/null +++ b/block/compat_ioctl.c @@ -0,0 +1,756 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static int compat_put_ushort(unsigned long arg, unsigned short val) +{ + return put_user(val, (unsigned short __user *)compat_ptr(arg)); +} + +static int compat_put_int(unsigned long arg, int val) +{ + return put_user(val, (compat_int_t __user *)compat_ptr(arg)); +} + +static int compat_put_uint(unsigned long arg, unsigned int val) +{ + return put_user(val, (compat_uint_t __user *)compat_ptr(arg)); +} + +static int compat_put_long(unsigned long arg, long val) +{ + return put_user(val, (compat_long_t __user *)compat_ptr(arg)); +} + +static int compat_put_ulong(unsigned long arg, compat_ulong_t val) +{ + return put_user(val, (compat_ulong_t __user *)compat_ptr(arg)); +} + +static int compat_put_u64(unsigned long arg, u64 val) +{ + return put_user(val, (compat_u64 __user *)compat_ptr(arg)); +} + +struct compat_hd_geometry { + unsigned char heads; + unsigned char sectors; + unsigned short cylinders; + u32 start; +}; + +static int compat_hdio_getgeo(struct gendisk *disk, struct block_device *bdev, + struct compat_hd_geometry __user *ugeo) +{ + struct hd_geometry geo; + int ret; + + if (!ugeo) + return -EINVAL; + if (!disk->fops->getgeo) + return -ENOTTY; + + memset(&geo, 0, sizeof(geo)); + /* + * We need to set the startsect first, the driver may + * want to override it. + */ + geo.start = get_start_sect(bdev); + ret = disk->fops->getgeo(bdev, &geo); + if (ret) + return ret; + + ret = copy_to_user(ugeo, &geo, 4); + ret |= put_user(geo.start, &ugeo->start); + if (ret) + ret = -EFAULT; + + return ret; +} + +static int compat_hdio_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg) +{ + mm_segment_t old_fs = get_fs(); + unsigned long kval; + unsigned int __user *uvp; + int error; + + set_fs(KERNEL_DS); + error = __blkdev_driver_ioctl(bdev, mode, + cmd, (unsigned long)(&kval)); + set_fs(old_fs); + + if (error == 0) { + uvp = compat_ptr(arg); + if (put_user(kval, uvp)) + error = -EFAULT; + } + return error; +} + +struct compat_cdrom_read_audio { + union cdrom_addr addr; + u8 addr_format; + compat_int_t nframes; + compat_caddr_t buf; +}; + +struct compat_cdrom_generic_command { + unsigned char cmd[CDROM_PACKET_SIZE]; + compat_caddr_t buffer; + compat_uint_t buflen; + compat_int_t stat; + compat_caddr_t sense; + unsigned char data_direction; + compat_int_t quiet; + compat_int_t timeout; + compat_caddr_t reserved[1]; +}; + +static int compat_cdrom_read_audio(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg) +{ + struct cdrom_read_audio __user *cdread_audio; + struct compat_cdrom_read_audio __user *cdread_audio32; + __u32 data; + void __user *datap; + + cdread_audio = compat_alloc_user_space(sizeof(*cdread_audio)); + cdread_audio32 = compat_ptr(arg); + + if (copy_in_user(&cdread_audio->addr, + &cdread_audio32->addr, + (sizeof(*cdread_audio32) - + sizeof(compat_caddr_t)))) + return -EFAULT; + + if (get_user(data, &cdread_audio32->buf)) + return -EFAULT; + datap = compat_ptr(data); + if (put_user(datap, &cdread_audio->buf)) + return -EFAULT; + + return __blkdev_driver_ioctl(bdev, mode, cmd, + (unsigned long)cdread_audio); +} + +static int compat_cdrom_generic_command(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg) +{ + struct cdrom_generic_command __user *cgc; + struct compat_cdrom_generic_command __user *cgc32; + u32 data; + unsigned char dir; + int itmp; + + cgc = compat_alloc_user_space(sizeof(*cgc)); + cgc32 = compat_ptr(arg); + + if (copy_in_user(&cgc->cmd, &cgc32->cmd, sizeof(cgc->cmd)) || + get_user(data, &cgc32->buffer) || + put_user(compat_ptr(data), &cgc->buffer) || + copy_in_user(&cgc->buflen, &cgc32->buflen, + (sizeof(unsigned int) + sizeof(int))) || + get_user(data, &cgc32->sense) || + put_user(compat_ptr(data), &cgc->sense) || + get_user(dir, &cgc32->data_direction) || + put_user(dir, &cgc->data_direction) || + get_user(itmp, &cgc32->quiet) || + put_user(itmp, &cgc->quiet) || + get_user(itmp, &cgc32->timeout) || + put_user(itmp, &cgc->timeout) || + get_user(data, &cgc32->reserved[0]) || + put_user(compat_ptr(data), &cgc->reserved[0])) + return -EFAULT; + + return __blkdev_driver_ioctl(bdev, mode, cmd, (unsigned long)cgc); +} + +struct compat_blkpg_ioctl_arg { + compat_int_t op; + compat_int_t flags; + compat_int_t datalen; + compat_caddr_t data; +}; + +static int compat_blkpg_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, struct compat_blkpg_ioctl_arg __user *ua32) +{ + struct blkpg_ioctl_arg __user *a = compat_alloc_user_space(sizeof(*a)); + compat_caddr_t udata; + compat_int_t n; + int err; + + err = get_user(n, &ua32->op); + err |= put_user(n, &a->op); + err |= get_user(n, &ua32->flags); + err |= put_user(n, &a->flags); + err |= get_user(n, &ua32->datalen); + err |= put_user(n, &a->datalen); + err |= get_user(udata, &ua32->data); + err |= put_user(compat_ptr(udata), &a->data); + if (err) + return err; + + return blkdev_ioctl(bdev, mode, cmd, (unsigned long)a); +} + +#define BLKBSZGET_32 _IOR(0x12, 112, int) +#define BLKBSZSET_32 _IOW(0x12, 113, int) +#define BLKGETSIZE64_32 _IOR(0x12, 114, int) + +struct compat_floppy_drive_params { + char cmos; + compat_ulong_t max_dtr; + compat_ulong_t hlt; + compat_ulong_t hut; + compat_ulong_t srt; + compat_ulong_t spinup; + compat_ulong_t spindown; + unsigned char spindown_offset; + unsigned char select_delay; + unsigned char rps; + unsigned char tracks; + compat_ulong_t timeout; + unsigned char interleave_sect; + struct floppy_max_errors max_errors; + char flags; + char read_track; + short autodetect[8]; + compat_int_t checkfreq; + compat_int_t native_format; +}; + +struct compat_floppy_drive_struct { + signed char flags; + compat_ulong_t spinup_date; + compat_ulong_t select_date; + compat_ulong_t first_read_date; + short probed_format; + short track; + short maxblock; + short maxtrack; + compat_int_t generation; + compat_int_t keep_data; + compat_int_t fd_ref; + compat_int_t fd_device; + compat_int_t last_checked; + compat_caddr_t dmabuf; + compat_int_t bufblocks; +}; + +struct compat_floppy_fdc_state { + compat_int_t spec1; + compat_int_t spec2; + compat_int_t dtr; + unsigned char version; + unsigned char dor; + compat_ulong_t address; + unsigned int rawcmd:2; + unsigned int reset:1; + unsigned int need_configure:1; + unsigned int perp_mode:2; + unsigned int has_fifo:1; + unsigned int driver_version; + unsigned char track[4]; +}; + +struct compat_floppy_write_errors { + unsigned int write_errors; + compat_ulong_t first_error_sector; + compat_int_t first_error_generation; + compat_ulong_t last_error_sector; + compat_int_t last_error_generation; + compat_uint_t badness; +}; + +#define FDSETPRM32 _IOW(2, 0x42, struct compat_floppy_struct) +#define FDDEFPRM32 _IOW(2, 0x43, struct compat_floppy_struct) +#define FDSETDRVPRM32 _IOW(2, 0x90, struct compat_floppy_drive_params) +#define FDGETDRVPRM32 _IOR(2, 0x11, struct compat_floppy_drive_params) +#define FDGETDRVSTAT32 _IOR(2, 0x12, struct compat_floppy_drive_struct) +#define FDPOLLDRVSTAT32 _IOR(2, 0x13, struct compat_floppy_drive_struct) +#define FDGETFDCSTAT32 _IOR(2, 0x15, struct compat_floppy_fdc_state) +#define FDWERRORGET32 _IOR(2, 0x17, struct compat_floppy_write_errors) + +static struct { + unsigned int cmd32; + unsigned int cmd; +} fd_ioctl_trans_table[] = { + { FDSETPRM32, FDSETPRM }, + { FDDEFPRM32, FDDEFPRM }, + { FDGETPRM32, FDGETPRM }, + { FDSETDRVPRM32, FDSETDRVPRM }, + { FDGETDRVPRM32, FDGETDRVPRM }, + { FDGETDRVSTAT32, FDGETDRVSTAT }, + { FDPOLLDRVSTAT32, FDPOLLDRVSTAT }, + { FDGETFDCSTAT32, FDGETFDCSTAT }, + { FDWERRORGET32, FDWERRORGET } +}; + +#define NR_FD_IOCTL_TRANS ARRAY_SIZE(fd_ioctl_trans_table) + +static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg) +{ + mm_segment_t old_fs = get_fs(); + void *karg = NULL; + unsigned int kcmd = 0; + int i, err; + + for (i = 0; i < NR_FD_IOCTL_TRANS; i++) + if (cmd == fd_ioctl_trans_table[i].cmd32) { + kcmd = fd_ioctl_trans_table[i].cmd; + break; + } + if (!kcmd) + return -EINVAL; + + switch (cmd) { + case FDSETPRM32: + case FDDEFPRM32: + case FDGETPRM32: + { + compat_uptr_t name; + struct compat_floppy_struct __user *uf; + struct floppy_struct *f; + + uf = compat_ptr(arg); + f = karg = kmalloc(sizeof(struct floppy_struct), GFP_KERNEL); + if (!karg) + return -ENOMEM; + if (cmd == FDGETPRM32) + break; + err = __get_user(f->size, &uf->size); + err |= __get_user(f->sect, &uf->sect); + err |= __get_user(f->head, &uf->head); + err |= __get_user(f->track, &uf->track); + err |= __get_user(f->stretch, &uf->stretch); + err |= __get_user(f->gap, &uf->gap); + err |= __get_user(f->rate, &uf->rate); + err |= __get_user(f->spec1, &uf->spec1); + err |= __get_user(f->fmt_gap, &uf->fmt_gap); + err |= __get_user(name, &uf->name); + f->name = compat_ptr(name); + if (err) { + err = -EFAULT; + goto out; + } + break; + } + case FDSETDRVPRM32: + case FDGETDRVPRM32: + { + struct compat_floppy_drive_params __user *uf; + struct floppy_drive_params *f; + + uf = compat_ptr(arg); + f = karg = kmalloc(sizeof(struct floppy_drive_params), GFP_KERNEL); + if (!karg) + return -ENOMEM; + if (cmd == FDGETDRVPRM32) + break; + err = __get_user(f->cmos, &uf->cmos); + err |= __get_user(f->max_dtr, &uf->max_dtr); + err |= __get_user(f->hlt, &uf->hlt); + err |= __get_user(f->hut, &uf->hut); + err |= __get_user(f->srt, &uf->srt); + err |= __get_user(f->spinup, &uf->spinup); + err |= __get_user(f->spindown, &uf->spindown); + err |= __get_user(f->spindown_offset, &uf->spindown_offset); + err |= __get_user(f->select_delay, &uf->select_delay); + err |= __get_user(f->rps, &uf->rps); + err |= __get_user(f->tracks, &uf->tracks); + err |= __get_user(f->timeout, &uf->timeout); + err |= __get_user(f->interleave_sect, &uf->interleave_sect); + err |= __copy_from_user(&f->max_errors, &uf->max_errors, sizeof(f->max_errors)); + err |= __get_user(f->flags, &uf->flags); + err |= __get_user(f->read_track, &uf->read_track); + err |= __copy_from_user(f->autodetect, uf->autodetect, sizeof(f->autodetect)); + err |= __get_user(f->checkfreq, &uf->checkfreq); + err |= __get_user(f->native_format, &uf->native_format); + if (err) { + err = -EFAULT; + goto out; + } + break; + } + case FDGETDRVSTAT32: + case FDPOLLDRVSTAT32: + karg = kmalloc(sizeof(struct floppy_drive_struct), GFP_KERNEL); + if (!karg) + return -ENOMEM; + break; + case FDGETFDCSTAT32: + karg = kmalloc(sizeof(struct floppy_fdc_state), GFP_KERNEL); + if (!karg) + return -ENOMEM; + break; + case FDWERRORGET32: + karg = kmalloc(sizeof(struct floppy_write_errors), GFP_KERNEL); + if (!karg) + return -ENOMEM; + break; + default: + return -EINVAL; + } + set_fs(KERNEL_DS); + err = __blkdev_driver_ioctl(bdev, mode, kcmd, (unsigned long)karg); + set_fs(old_fs); + if (err) + goto out; + switch (cmd) { + case FDGETPRM32: + { + struct floppy_struct *f = karg; + struct compat_floppy_struct __user *uf = compat_ptr(arg); + + err = __put_user(f->size, &uf->size); + err |= __put_user(f->sect, &uf->sect); + err |= __put_user(f->head, &uf->head); + err |= __put_user(f->track, &uf->track); + err |= __put_user(f->stretch, &uf->stretch); + err |= __put_user(f->gap, &uf->gap); + err |= __put_user(f->rate, &uf->rate); + err |= __put_user(f->spec1, &uf->spec1); + err |= __put_user(f->fmt_gap, &uf->fmt_gap); + err |= __put_user((u64)f->name, (compat_caddr_t __user *)&uf->name); + break; + } + case FDGETDRVPRM32: + { + struct compat_floppy_drive_params __user *uf; + struct floppy_drive_params *f = karg; + + uf = compat_ptr(arg); + err = __put_user(f->cmos, &uf->cmos); + err |= __put_user(f->max_dtr, &uf->max_dtr); + err |= __put_user(f->hlt, &uf->hlt); + err |= __put_user(f->hut, &uf->hut); + err |= __put_user(f->srt, &uf->srt); + err |= __put_user(f->spinup, &uf->spinup); + err |= __put_user(f->spindown, &uf->spindown); + err |= __put_user(f->spindown_offset, &uf->spindown_offset); + err |= __put_user(f->select_delay, &uf->select_delay); + err |= __put_user(f->rps, &uf->rps); + err |= __put_user(f->tracks, &uf->tracks); + err |= __put_user(f->timeout, &uf->timeout); + err |= __put_user(f->interleave_sect, &uf->interleave_sect); + err |= __copy_to_user(&uf->max_errors, &f->max_errors, sizeof(f->max_errors)); + err |= __put_user(f->flags, &uf->flags); + err |= __put_user(f->read_track, &uf->read_track); + err |= __copy_to_user(uf->autodetect, f->autodetect, sizeof(f->autodetect)); + err |= __put_user(f->checkfreq, &uf->checkfreq); + err |= __put_user(f->native_format, &uf->native_format); + break; + } + case FDGETDRVSTAT32: + case FDPOLLDRVSTAT32: + { + struct compat_floppy_drive_struct __user *uf; + struct floppy_drive_struct *f = karg; + + uf = compat_ptr(arg); + err = __put_user(f->flags, &uf->flags); + err |= __put_user(f->spinup_date, &uf->spinup_date); + err |= __put_user(f->select_date, &uf->select_date); + err |= __put_user(f->first_read_date, &uf->first_read_date); + err |= __put_user(f->probed_format, &uf->probed_format); + err |= __put_user(f->track, &uf->track); + err |= __put_user(f->maxblock, &uf->maxblock); + err |= __put_user(f->maxtrack, &uf->maxtrack); + err |= __put_user(f->generation, &uf->generation); + err |= __put_user(f->keep_data, &uf->keep_data); + err |= __put_user(f->fd_ref, &uf->fd_ref); + err |= __put_user(f->fd_device, &uf->fd_device); + err |= __put_user(f->last_checked, &uf->last_checked); + err |= __put_user((u64)f->dmabuf, &uf->dmabuf); + err |= __put_user((u64)f->bufblocks, &uf->bufblocks); + break; + } + case FDGETFDCSTAT32: + { + struct compat_floppy_fdc_state __user *uf; + struct floppy_fdc_state *f = karg; + + uf = compat_ptr(arg); + err = __put_user(f->spec1, &uf->spec1); + err |= __put_user(f->spec2, &uf->spec2); + err |= __put_user(f->dtr, &uf->dtr); + err |= __put_user(f->version, &uf->version); + err |= __put_user(f->dor, &uf->dor); + err |= __put_user(f->address, &uf->address); + err |= __copy_to_user((char __user *)&uf->address + sizeof(uf->address), + (char *)&f->address + sizeof(f->address), sizeof(int)); + err |= __put_user(f->driver_version, &uf->driver_version); + err |= __copy_to_user(uf->track, f->track, sizeof(f->track)); + break; + } + case FDWERRORGET32: + { + struct compat_floppy_write_errors __user *uf; + struct floppy_write_errors *f = karg; + + uf = compat_ptr(arg); + err = __put_user(f->write_errors, &uf->write_errors); + err |= __put_user(f->first_error_sector, &uf->first_error_sector); + err |= __put_user(f->first_error_generation, &uf->first_error_generation); + err |= __put_user(f->last_error_sector, &uf->last_error_sector); + err |= __put_user(f->last_error_generation, &uf->last_error_generation); + err |= __put_user(f->badness, &uf->badness); + break; + } + default: + break; + } + if (err) + err = -EFAULT; + +out: + kfree(karg); + return err; +} + +static int compat_blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode, + unsigned cmd, unsigned long arg) +{ + switch (cmd) { + case HDIO_GET_UNMASKINTR: + case HDIO_GET_MULTCOUNT: + case HDIO_GET_KEEPSETTINGS: + case HDIO_GET_32BIT: + case HDIO_GET_NOWERR: + case HDIO_GET_DMA: + case HDIO_GET_NICE: + case HDIO_GET_WCACHE: + case HDIO_GET_ACOUSTIC: + case HDIO_GET_ADDRESS: + case HDIO_GET_BUSSTATE: + return compat_hdio_ioctl(bdev, mode, cmd, arg); + case FDSETPRM32: + case FDDEFPRM32: + case FDGETPRM32: + case FDSETDRVPRM32: + case FDGETDRVPRM32: + case FDGETDRVSTAT32: + case FDPOLLDRVSTAT32: + case FDGETFDCSTAT32: + case FDWERRORGET32: + return compat_fd_ioctl(bdev, mode, cmd, arg); + case CDROMREADAUDIO: + return compat_cdrom_read_audio(bdev, mode, cmd, arg); + case CDROM_SEND_PACKET: + return compat_cdrom_generic_command(bdev, mode, cmd, arg); + + /* + * No handler required for the ones below, we just need to + * convert arg to a 64 bit pointer. + */ + case BLKSECTSET: + /* + * 0x03 -- HD/IDE ioctl's used by hdparm and friends. + * Some need translations, these do not. + */ + case HDIO_GET_IDENTITY: + case HDIO_DRIVE_TASK: + case HDIO_DRIVE_CMD: + /* 0x330 is reserved -- it used to be HDIO_GETGEO_BIG */ + case 0x330: + /* 0x02 -- Floppy ioctls */ + case FDMSGON: + case FDMSGOFF: + case FDSETEMSGTRESH: + case FDFLUSH: + case FDWERRORCLR: + case FDSETMAXERRS: + case FDGETMAXERRS: + case FDGETDRVTYP: + case FDEJECT: + case FDCLRPRM: + case FDFMTBEG: + case FDFMTEND: + case FDRESET: + case FDTWADDLE: + case FDFMTTRK: + case FDRAWCMD: + /* CDROM stuff */ + case CDROMPAUSE: + case CDROMRESUME: + case CDROMPLAYMSF: + case CDROMPLAYTRKIND: + case CDROMREADTOCHDR: + case CDROMREADTOCENTRY: + case CDROMSTOP: + case CDROMSTART: + case CDROMEJECT: + case CDROMVOLCTRL: + case CDROMSUBCHNL: + case CDROMMULTISESSION: + case CDROM_GET_MCN: + case CDROMRESET: + case CDROMVOLREAD: + case CDROMSEEK: + case CDROMPLAYBLK: + case CDROMCLOSETRAY: + case CDROM_DISC_STATUS: + case CDROM_CHANGER_NSLOTS: + case CDROM_GET_CAPABILITY: + /* Ignore cdrom.h about these next 5 ioctls, they absolutely do + * not take a struct cdrom_read, instead they take a struct cdrom_msf + * which is compatible. + */ + case CDROMREADMODE2: + case CDROMREADMODE1: + case CDROMREADRAW: + case CDROMREADCOOKED: + case CDROMREADALL: + /* DVD ioctls */ + case DVD_READ_STRUCT: + case DVD_WRITE_STRUCT: + case DVD_AUTH: + arg = (unsigned long)compat_ptr(arg); + /* These intepret arg as an unsigned long, not as a pointer, + * so we must not do compat_ptr() conversion. */ + case HDIO_SET_MULTCOUNT: + case HDIO_SET_UNMASKINTR: + case HDIO_SET_KEEPSETTINGS: + case HDIO_SET_32BIT: + case HDIO_SET_NOWERR: + case HDIO_SET_DMA: + case HDIO_SET_PIO_MODE: + case HDIO_SET_NICE: + case HDIO_SET_WCACHE: + case HDIO_SET_ACOUSTIC: + case HDIO_SET_BUSSTATE: + case HDIO_SET_ADDRESS: + case CDROMEJECT_SW: + case CDROM_SET_OPTIONS: + case CDROM_CLEAR_OPTIONS: + case CDROM_SELECT_SPEED: + case CDROM_SELECT_DISC: + case CDROM_MEDIA_CHANGED: + case CDROM_DRIVE_STATUS: + case CDROM_LOCKDOOR: + case CDROM_DEBUG: + break; + default: + /* unknown ioctl number */ + return -ENOIOCTLCMD; + } + + return __blkdev_driver_ioctl(bdev, mode, cmd, arg); +} + +/* Most of the generic ioctls are handled in the normal fallback path. + This assumes the blkdev's low level compat_ioctl always returns + ENOIOCTLCMD for unknown ioctls. */ +long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) +{ + int ret = -ENOIOCTLCMD; + struct inode *inode = file->f_mapping->host; + struct block_device *bdev = inode->i_bdev; + struct gendisk *disk = bdev->bd_disk; + fmode_t mode = file->f_mode; + struct backing_dev_info *bdi; + loff_t size; + unsigned int max_sectors; + + /* + * O_NDELAY can be altered using fcntl(.., F_SETFL, ..), so we have + * to updated it before every ioctl. + */ + if (file->f_flags & O_NDELAY) + mode |= FMODE_NDELAY; + else + mode &= ~FMODE_NDELAY; + + switch (cmd) { + case HDIO_GETGEO: + return compat_hdio_getgeo(disk, bdev, compat_ptr(arg)); + case BLKPBSZGET: + return compat_put_uint(arg, bdev_physical_block_size(bdev)); + case BLKIOMIN: + return compat_put_uint(arg, bdev_io_min(bdev)); + case BLKIOOPT: + return compat_put_uint(arg, bdev_io_opt(bdev)); + case BLKALIGNOFF: + return compat_put_int(arg, bdev_alignment_offset(bdev)); + case BLKDISCARDZEROES: + return compat_put_uint(arg, bdev_discard_zeroes_data(bdev)); + case BLKFLSBUF: + case BLKROSET: + case BLKDISCARD: + case BLKSECDISCARD: + case BLKZEROOUT: + /* + * the ones below are implemented in blkdev_locked_ioctl, + * but we call blkdev_ioctl, which gets the lock for us + */ + case BLKRRPART: + return blkdev_ioctl(bdev, mode, cmd, + (unsigned long)compat_ptr(arg)); + case BLKBSZSET_32: + return blkdev_ioctl(bdev, mode, BLKBSZSET, + (unsigned long)compat_ptr(arg)); + case BLKPG: + return compat_blkpg_ioctl(bdev, mode, cmd, compat_ptr(arg)); + case BLKRAGET: + case BLKFRAGET: + if (!arg) + return -EINVAL; + bdi = blk_get_backing_dev_info(bdev); + return compat_put_long(arg, + (bdi->ra_pages * PAGE_CACHE_SIZE) / 512); + case BLKROGET: /* compatible */ + return compat_put_int(arg, bdev_read_only(bdev) != 0); + case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */ + return compat_put_int(arg, block_size(bdev)); + case BLKSSZGET: /* get block device hardware sector size */ + return compat_put_int(arg, bdev_logical_block_size(bdev)); + case BLKSECTGET: + max_sectors = min_t(unsigned int, USHRT_MAX, + queue_max_sectors(bdev_get_queue(bdev))); + return compat_put_ushort(arg, max_sectors); + case BLKROTATIONAL: + return compat_put_ushort(arg, + !blk_queue_nonrot(bdev_get_queue(bdev))); + case BLKRASET: /* compatible, but no compat_ptr (!) */ + case BLKFRASET: + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + bdi = blk_get_backing_dev_info(bdev); + bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE; + return 0; + case BLKGETSIZE: + size = i_size_read(bdev->bd_inode); + if ((size >> 9) > ~0UL) + return -EFBIG; + return compat_put_ulong(arg, size >> 9); + + case BLKGETSIZE64_32: + return compat_put_u64(arg, i_size_read(bdev->bd_inode)); + + case BLKTRACESETUP32: + case BLKTRACESTART: /* compatible */ + case BLKTRACESTOP: /* compatible */ + case BLKTRACETEARDOWN: /* compatible */ + ret = blk_trace_ioctl(bdev, cmd, compat_ptr(arg)); + return ret; + default: + if (disk->fops->compat_ioctl) + ret = disk->fops->compat_ioctl(bdev, mode, cmd, arg); + if (ret == -ENOIOCTLCMD) + ret = compat_blkdev_driver_ioctl(bdev, mode, cmd, arg); + return ret; + } +} diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c new file mode 100644 index 000000000..a753df2b3 --- /dev/null +++ b/block/deadline-iosched.c @@ -0,0 +1,476 @@ +/* + * Deadline i/o scheduler. + * + * Copyright (C) 2002 Jens Axboe + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * See Documentation/block/deadline-iosched.txt + */ +static const int read_expire = HZ / 2; /* max time before a read is submitted. */ +static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */ +static const int writes_starved = 2; /* max times reads can starve a write */ +static const int fifo_batch = 16; /* # of sequential requests treated as one + by the above parameters. For throughput. */ + +struct deadline_data { + /* + * run time data + */ + + /* + * requests (deadline_rq s) are present on both sort_list and fifo_list + */ + struct rb_root sort_list[2]; + struct list_head fifo_list[2]; + + /* + * next in sort order. read, write or both are NULL + */ + struct request *next_rq[2]; + unsigned int batching; /* number of sequential requests made */ + sector_t last_sector; /* head position */ + unsigned int starved; /* times reads have starved writes */ + + /* + * settings that change how the i/o scheduler behaves + */ + int fifo_expire[2]; + int fifo_batch; + int writes_starved; + int front_merges; +}; + +static void deadline_move_request(struct deadline_data *, struct request *); + +static inline struct rb_root * +deadline_rb_root(struct deadline_data *dd, struct request *rq) +{ + return &dd->sort_list[rq_data_dir(rq)]; +} + +/* + * get the request after `rq' in sector-sorted order + */ +static inline struct request * +deadline_latter_request(struct request *rq) +{ + struct rb_node *node = rb_next(&rq->rb_node); + + if (node) + return rb_entry_rq(node); + + return NULL; +} + +static void +deadline_add_rq_rb(struct deadline_data *dd, struct request *rq) +{ + struct rb_root *root = deadline_rb_root(dd, rq); + + elv_rb_add(root, rq); +} + +static inline void +deadline_del_rq_rb(struct deadline_data *dd, struct request *rq) +{ + const int data_dir = rq_data_dir(rq); + + if (dd->next_rq[data_dir] == rq) + dd->next_rq[data_dir] = deadline_latter_request(rq); + + elv_rb_del(deadline_rb_root(dd, rq), rq); +} + +/* + * add rq to rbtree and fifo + */ +static void +deadline_add_request(struct request_queue *q, struct request *rq) +{ + struct deadline_data *dd = q->elevator->elevator_data; + const int data_dir = rq_data_dir(rq); + + deadline_add_rq_rb(dd, rq); + + /* + * set expire time and add to fifo list + */ + rq->fifo_time = jiffies + dd->fifo_expire[data_dir]; + list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]); +} + +/* + * remove rq from rbtree and fifo. + */ +static void deadline_remove_request(struct request_queue *q, struct request *rq) +{ + struct deadline_data *dd = q->elevator->elevator_data; + + rq_fifo_clear(rq); + deadline_del_rq_rb(dd, rq); +} + +static int +deadline_merge(struct request_queue *q, struct request **req, struct bio *bio) +{ + struct deadline_data *dd = q->elevator->elevator_data; + struct request *__rq; + int ret; + + /* + * check for front merge + */ + if (dd->front_merges) { + sector_t sector = bio_end_sector(bio); + + __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector); + if (__rq) { + BUG_ON(sector != blk_rq_pos(__rq)); + + if (elv_rq_merge_ok(__rq, bio)) { + ret = ELEVATOR_FRONT_MERGE; + goto out; + } + } + } + + return ELEVATOR_NO_MERGE; +out: + *req = __rq; + return ret; +} + +static void deadline_merged_request(struct request_queue *q, + struct request *req, int type) +{ + struct deadline_data *dd = q->elevator->elevator_data; + + /* + * if the merge was a front merge, we need to reposition request + */ + if (type == ELEVATOR_FRONT_MERGE) { + elv_rb_del(deadline_rb_root(dd, req), req); + deadline_add_rq_rb(dd, req); + } +} + +static void +deadline_merged_requests(struct request_queue *q, struct request *req, + struct request *next) +{ + /* + * if next expires before rq, assign its expire time to rq + * and move into next position (next will be deleted) in fifo + */ + if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) { + if (time_before(next->fifo_time, req->fifo_time)) { + list_move(&req->queuelist, &next->queuelist); + req->fifo_time = next->fifo_time; + } + } + + /* + * kill knowledge of next, this one is a goner + */ + deadline_remove_request(q, next); +} + +/* + * move request from sort list to dispatch queue. + */ +static inline void +deadline_move_to_dispatch(struct deadline_data *dd, struct request *rq) +{ + struct request_queue *q = rq->q; + + deadline_remove_request(q, rq); + elv_dispatch_add_tail(q, rq); +} + +/* + * move an entry to dispatch queue + */ +static void +deadline_move_request(struct deadline_data *dd, struct request *rq) +{ + const int data_dir = rq_data_dir(rq); + + dd->next_rq[READ] = NULL; + dd->next_rq[WRITE] = NULL; + dd->next_rq[data_dir] = deadline_latter_request(rq); + + dd->last_sector = rq_end_sector(rq); + + /* + * take it off the sort and fifo list, move + * to dispatch queue + */ + deadline_move_to_dispatch(dd, rq); +} + +/* + * deadline_check_fifo returns 0 if there are no expired requests on the fifo, + * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir]) + */ +static inline int deadline_check_fifo(struct deadline_data *dd, int ddir) +{ + struct request *rq = rq_entry_fifo(dd->fifo_list[ddir].next); + + /* + * rq is expired! + */ + if (time_after_eq(jiffies, rq->fifo_time)) + return 1; + + return 0; +} + +/* + * deadline_dispatch_requests selects the best request according to + * read/write expire, fifo_batch, etc + */ +static int deadline_dispatch_requests(struct request_queue *q, int force) +{ + struct deadline_data *dd = q->elevator->elevator_data; + const int reads = !list_empty(&dd->fifo_list[READ]); + const int writes = !list_empty(&dd->fifo_list[WRITE]); + struct request *rq; + int data_dir; + + /* + * batches are currently reads XOR writes + */ + if (dd->next_rq[WRITE]) + rq = dd->next_rq[WRITE]; + else + rq = dd->next_rq[READ]; + + if (rq && dd->batching < dd->fifo_batch) + /* we have a next request are still entitled to batch */ + goto dispatch_request; + + /* + * at this point we are not running a batch. select the appropriate + * data direction (read / write) + */ + + if (reads) { + BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ])); + + if (writes && (dd->starved++ >= dd->writes_starved)) + goto dispatch_writes; + + data_dir = READ; + + goto dispatch_find_request; + } + + /* + * there are either no reads or writes have been starved + */ + + if (writes) { +dispatch_writes: + BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE])); + + dd->starved = 0; + + data_dir = WRITE; + + goto dispatch_find_request; + } + + return 0; + +dispatch_find_request: + /* + * we are not running a batch, find best request for selected data_dir + */ + if (deadline_check_fifo(dd, data_dir) || !dd->next_rq[data_dir]) { + /* + * A deadline has expired, the last request was in the other + * direction, or we have run out of higher-sectored requests. + * Start again from the request with the earliest expiry time. + */ + rq = rq_entry_fifo(dd->fifo_list[data_dir].next); + } else { + /* + * The last req was the same dir and we have a next request in + * sort order. No expired requests so continue on from here. + */ + rq = dd->next_rq[data_dir]; + } + + dd->batching = 0; + +dispatch_request: + /* + * rq is the selected appropriate request. + */ + dd->batching++; + deadline_move_request(dd, rq); + + return 1; +} + +static void deadline_exit_queue(struct elevator_queue *e) +{ + struct deadline_data *dd = e->elevator_data; + + BUG_ON(!list_empty(&dd->fifo_list[READ])); + BUG_ON(!list_empty(&dd->fifo_list[WRITE])); + + kfree(dd); +} + +/* + * initialize elevator private data (deadline_data). + */ +static int deadline_init_queue(struct request_queue *q, struct elevator_type *e) +{ + struct deadline_data *dd; + struct elevator_queue *eq; + + eq = elevator_alloc(q, e); + if (!eq) + return -ENOMEM; + + dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node); + if (!dd) { + kobject_put(&eq->kobj); + return -ENOMEM; + } + eq->elevator_data = dd; + + INIT_LIST_HEAD(&dd->fifo_list[READ]); + INIT_LIST_HEAD(&dd->fifo_list[WRITE]); + dd->sort_list[READ] = RB_ROOT; + dd->sort_list[WRITE] = RB_ROOT; + dd->fifo_expire[READ] = read_expire; + dd->fifo_expire[WRITE] = write_expire; + dd->writes_starved = writes_starved; + dd->front_merges = 1; + dd->fifo_batch = fifo_batch; + + spin_lock_irq(q->queue_lock); + q->elevator = eq; + spin_unlock_irq(q->queue_lock); + return 0; +} + +/* + * sysfs parts below + */ + +static ssize_t +deadline_var_show(int var, char *page) +{ + return sprintf(page, "%d\n", var); +} + +static ssize_t +deadline_var_store(int *var, const char *page, size_t count) +{ + char *p = (char *) page; + + *var = simple_strtol(p, &p, 10); + return count; +} + +#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ +static ssize_t __FUNC(struct elevator_queue *e, char *page) \ +{ \ + struct deadline_data *dd = e->elevator_data; \ + int __data = __VAR; \ + if (__CONV) \ + __data = jiffies_to_msecs(__data); \ + return deadline_var_show(__data, (page)); \ +} +SHOW_FUNCTION(deadline_read_expire_show, dd->fifo_expire[READ], 1); +SHOW_FUNCTION(deadline_write_expire_show, dd->fifo_expire[WRITE], 1); +SHOW_FUNCTION(deadline_writes_starved_show, dd->writes_starved, 0); +SHOW_FUNCTION(deadline_front_merges_show, dd->front_merges, 0); +SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0); +#undef SHOW_FUNCTION + +#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ +static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ +{ \ + struct deadline_data *dd = e->elevator_data; \ + int __data; \ + int ret = deadline_var_store(&__data, (page), count); \ + if (__data < (MIN)) \ + __data = (MIN); \ + else if (__data > (MAX)) \ + __data = (MAX); \ + if (__CONV) \ + *(__PTR) = msecs_to_jiffies(__data); \ + else \ + *(__PTR) = __data; \ + return ret; \ +} +STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1); +STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1); +STORE_FUNCTION(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0); +STORE_FUNCTION(deadline_front_merges_store, &dd->front_merges, 0, 1, 0); +STORE_FUNCTION(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX, 0); +#undef STORE_FUNCTION + +#define DD_ATTR(name) \ + __ATTR(name, S_IRUGO|S_IWUSR, deadline_##name##_show, \ + deadline_##name##_store) + +static struct elv_fs_entry deadline_attrs[] = { + DD_ATTR(read_expire), + DD_ATTR(write_expire), + DD_ATTR(writes_starved), + DD_ATTR(front_merges), + DD_ATTR(fifo_batch), + __ATTR_NULL +}; + +static struct elevator_type iosched_deadline = { + .ops = { + .elevator_merge_fn = deadline_merge, + .elevator_merged_fn = deadline_merged_request, + .elevator_merge_req_fn = deadline_merged_requests, + .elevator_dispatch_fn = deadline_dispatch_requests, + .elevator_add_req_fn = deadline_add_request, + .elevator_former_req_fn = elv_rb_former_request, + .elevator_latter_req_fn = elv_rb_latter_request, + .elevator_init_fn = deadline_init_queue, + .elevator_exit_fn = deadline_exit_queue, + }, + + .elevator_attrs = deadline_attrs, + .elevator_name = "deadline", + .elevator_owner = THIS_MODULE, +}; + +static int __init deadline_init(void) +{ + return elv_register(&iosched_deadline); +} + +static void __exit deadline_exit(void) +{ + elv_unregister(&iosched_deadline); +} + +module_init(deadline_init); +module_exit(deadline_exit); + +MODULE_AUTHOR("Jens Axboe"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("deadline IO scheduler"); diff --git a/block/elevator.c b/block/elevator.c new file mode 100644 index 000000000..8985038f3 --- /dev/null +++ b/block/elevator.c @@ -0,0 +1,1047 @@ +/* + * Block device elevator/IO-scheduler. + * + * Copyright (C) 2000 Andrea Arcangeli SuSE + * + * 30042000 Jens Axboe : + * + * Split the elevator a bit so that it is possible to choose a different + * one or even write a new "plug in". There are three pieces: + * - elevator_fn, inserts a new request in the queue list + * - elevator_merge_fn, decides whether a new buffer can be merged with + * an existing request + * - elevator_dequeue_fn, called when a request is taken off the active list + * + * 20082000 Dave Jones : + * Removed tests for max-bomb-segments, which was breaking elvtune + * when run without -bN + * + * Jens: + * - Rework again to work with bio instead of buffer_heads + * - loose bi_dev comparisons, partition handling is right now + * - completely modularize elevator setup and teardown + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "blk.h" +#include "blk-cgroup.h" + +static DEFINE_SPINLOCK(elv_list_lock); +static LIST_HEAD(elv_list); + +/* + * Merge hash stuff. + */ +#define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) + +/* + * Query io scheduler to see if the current process issuing bio may be + * merged with rq. + */ +static int elv_iosched_allow_merge(struct request *rq, struct bio *bio) +{ + struct request_queue *q = rq->q; + struct elevator_queue *e = q->elevator; + + if (e->type->ops.elevator_allow_merge_fn) + return e->type->ops.elevator_allow_merge_fn(q, rq, bio); + + return 1; +} + +/* + * can we safely merge with this request? + */ +bool elv_rq_merge_ok(struct request *rq, struct bio *bio) +{ + if (!blk_rq_merge_ok(rq, bio)) + return 0; + + if (!elv_iosched_allow_merge(rq, bio)) + return 0; + + return 1; +} +EXPORT_SYMBOL(elv_rq_merge_ok); + +static struct elevator_type *elevator_find(const char *name) +{ + struct elevator_type *e; + + list_for_each_entry(e, &elv_list, list) { + if (!strcmp(e->elevator_name, name)) + return e; + } + + return NULL; +} + +static void elevator_put(struct elevator_type *e) +{ + module_put(e->elevator_owner); +} + +static struct elevator_type *elevator_get(const char *name, bool try_loading) +{ + struct elevator_type *e; + + spin_lock(&elv_list_lock); + + e = elevator_find(name); + if (!e && try_loading) { + spin_unlock(&elv_list_lock); + request_module("%s-iosched", name); + spin_lock(&elv_list_lock); + e = elevator_find(name); + } + + if (e && !try_module_get(e->elevator_owner)) + e = NULL; + + spin_unlock(&elv_list_lock); + + return e; +} + +static char chosen_elevator[ELV_NAME_MAX]; + +static int __init elevator_setup(char *str) +{ + /* + * Be backwards-compatible with previous kernels, so users + * won't get the wrong elevator. + */ + strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1); + return 1; +} + +__setup("elevator=", elevator_setup); + +/* called during boot to load the elevator chosen by the elevator param */ +void __init load_default_elevator_module(void) +{ + struct elevator_type *e; + + if (!chosen_elevator[0]) + return; + + spin_lock(&elv_list_lock); + e = elevator_find(chosen_elevator); + spin_unlock(&elv_list_lock); + + if (!e) + request_module("%s-iosched", chosen_elevator); +} + +static struct kobj_type elv_ktype; + +struct elevator_queue *elevator_alloc(struct request_queue *q, + struct elevator_type *e) +{ + struct elevator_queue *eq; + + eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node); + if (unlikely(!eq)) + return NULL; + + eq->type = e; + kobject_init(&eq->kobj, &elv_ktype); + mutex_init(&eq->sysfs_lock); + hash_init(eq->hash); + + return eq; +} +EXPORT_SYMBOL(elevator_alloc); + +static void elevator_release(struct kobject *kobj) +{ + struct elevator_queue *e; + + e = container_of(kobj, struct elevator_queue, kobj); + elevator_put(e->type); + kfree(e); +} + +int elevator_init(struct request_queue *q, char *name) +{ + struct elevator_type *e = NULL; + int err; + + /* + * q->sysfs_lock must be held to provide mutual exclusion between + * elevator_switch() and here. + */ + lockdep_assert_held(&q->sysfs_lock); + + if (unlikely(q->elevator)) + return 0; + + INIT_LIST_HEAD(&q->queue_head); + q->last_merge = NULL; + q->end_sector = 0; + q->boundary_rq = NULL; + + if (name) { + e = elevator_get(name, true); + if (!e) + return -EINVAL; + } + + /* + * Use the default elevator specified by config boot param or + * config option. Don't try to load modules as we could be running + * off async and request_module() isn't allowed from async. + */ + if (!e && *chosen_elevator) { + e = elevator_get(chosen_elevator, false); + if (!e) + printk(KERN_ERR "I/O scheduler %s not found\n", + chosen_elevator); + } + + if (!e) { + e = elevator_get(CONFIG_DEFAULT_IOSCHED, false); + if (!e) { + printk(KERN_ERR + "Default I/O scheduler not found. " \ + "Using noop.\n"); + e = elevator_get("noop", false); + } + } + + err = e->ops.elevator_init_fn(q, e); + if (err) + elevator_put(e); + return err; +} +EXPORT_SYMBOL(elevator_init); + +void elevator_exit(struct elevator_queue *e) +{ + mutex_lock(&e->sysfs_lock); + if (e->type->ops.elevator_exit_fn) + e->type->ops.elevator_exit_fn(e); + mutex_unlock(&e->sysfs_lock); + + kobject_put(&e->kobj); +} +EXPORT_SYMBOL(elevator_exit); + +static inline void __elv_rqhash_del(struct request *rq) +{ + hash_del(&rq->hash); + rq->cmd_flags &= ~REQ_HASHED; +} + +static void elv_rqhash_del(struct request_queue *q, struct request *rq) +{ + if (ELV_ON_HASH(rq)) + __elv_rqhash_del(rq); +} + +static void elv_rqhash_add(struct request_queue *q, struct request *rq) +{ + struct elevator_queue *e = q->elevator; + + BUG_ON(ELV_ON_HASH(rq)); + hash_add(e->hash, &rq->hash, rq_hash_key(rq)); + rq->cmd_flags |= REQ_HASHED; +} + +static void elv_rqhash_reposition(struct request_queue *q, struct request *rq) +{ + __elv_rqhash_del(rq); + elv_rqhash_add(q, rq); +} + +static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset) +{ + struct elevator_queue *e = q->elevator; + struct hlist_node *next; + struct request *rq; + + hash_for_each_possible_safe(e->hash, rq, next, hash, offset) { + BUG_ON(!ELV_ON_HASH(rq)); + + if (unlikely(!rq_mergeable(rq))) { + __elv_rqhash_del(rq); + continue; + } + + if (rq_hash_key(rq) == offset) + return rq; + } + + return NULL; +} + +/* + * RB-tree support functions for inserting/lookup/removal of requests + * in a sorted RB tree. + */ +void elv_rb_add(struct rb_root *root, struct request *rq) +{ + struct rb_node **p = &root->rb_node; + struct rb_node *parent = NULL; + struct request *__rq; + + while (*p) { + parent = *p; + __rq = rb_entry(parent, struct request, rb_node); + + if (blk_rq_pos(rq) < blk_rq_pos(__rq)) + p = &(*p)->rb_left; + else if (blk_rq_pos(rq) >= blk_rq_pos(__rq)) + p = &(*p)->rb_right; + } + + rb_link_node(&rq->rb_node, parent, p); + rb_insert_color(&rq->rb_node, root); +} +EXPORT_SYMBOL(elv_rb_add); + +void elv_rb_del(struct rb_root *root, struct request *rq) +{ + BUG_ON(RB_EMPTY_NODE(&rq->rb_node)); + rb_erase(&rq->rb_node, root); + RB_CLEAR_NODE(&rq->rb_node); +} +EXPORT_SYMBOL(elv_rb_del); + +struct request *elv_rb_find(struct rb_root *root, sector_t sector) +{ + struct rb_node *n = root->rb_node; + struct request *rq; + + while (n) { + rq = rb_entry(n, struct request, rb_node); + + if (sector < blk_rq_pos(rq)) + n = n->rb_left; + else if (sector > blk_rq_pos(rq)) + n = n->rb_right; + else + return rq; + } + + return NULL; +} +EXPORT_SYMBOL(elv_rb_find); + +/* + * Insert rq into dispatch queue of q. Queue lock must be held on + * entry. rq is sort instead into the dispatch queue. To be used by + * specific elevators. + */ +void elv_dispatch_sort(struct request_queue *q, struct request *rq) +{ + sector_t boundary; + struct list_head *entry; + int stop_flags; + + if (q->last_merge == rq) + q->last_merge = NULL; + + elv_rqhash_del(q, rq); + + q->nr_sorted--; + + boundary = q->end_sector; + stop_flags = REQ_SOFTBARRIER | REQ_STARTED; + list_for_each_prev(entry, &q->queue_head) { + struct request *pos = list_entry_rq(entry); + + if ((rq->cmd_flags & REQ_DISCARD) != + (pos->cmd_flags & REQ_DISCARD)) + break; + if (rq_data_dir(rq) != rq_data_dir(pos)) + break; + if (pos->cmd_flags & stop_flags) + break; + if (blk_rq_pos(rq) >= boundary) { + if (blk_rq_pos(pos) < boundary) + continue; + } else { + if (blk_rq_pos(pos) >= boundary) + break; + } + if (blk_rq_pos(rq) >= blk_rq_pos(pos)) + break; + } + + list_add(&rq->queuelist, entry); +} +EXPORT_SYMBOL(elv_dispatch_sort); + +/* + * Insert rq into dispatch queue of q. Queue lock must be held on + * entry. rq is added to the back of the dispatch queue. To be used by + * specific elevators. + */ +void elv_dispatch_add_tail(struct request_queue *q, struct request *rq) +{ + if (q->last_merge == rq) + q->last_merge = NULL; + + elv_rqhash_del(q, rq); + + q->nr_sorted--; + + q->end_sector = rq_end_sector(rq); + q->boundary_rq = rq; + list_add_tail(&rq->queuelist, &q->queue_head); +} +EXPORT_SYMBOL(elv_dispatch_add_tail); + +int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) +{ + struct elevator_queue *e = q->elevator; + struct request *__rq; + int ret; + + /* + * Levels of merges: + * nomerges: No merges at all attempted + * noxmerges: Only simple one-hit cache try + * merges: All merge tries attempted + */ + if (blk_queue_nomerges(q)) + return ELEVATOR_NO_MERGE; + + /* + * First try one-hit cache. + */ + if (q->last_merge && elv_rq_merge_ok(q->last_merge, bio)) { + ret = blk_try_merge(q->last_merge, bio); + if (ret != ELEVATOR_NO_MERGE) { + *req = q->last_merge; + return ret; + } + } + + if (blk_queue_noxmerges(q)) + return ELEVATOR_NO_MERGE; + + /* + * See if our hash lookup can find a potential backmerge. + */ + __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector); + if (__rq && elv_rq_merge_ok(__rq, bio)) { + *req = __rq; + return ELEVATOR_BACK_MERGE; + } + + if (e->type->ops.elevator_merge_fn) + return e->type->ops.elevator_merge_fn(q, req, bio); + + return ELEVATOR_NO_MERGE; +} + +/* + * Attempt to do an insertion back merge. Only check for the case where + * we can append 'rq' to an existing request, so we can throw 'rq' away + * afterwards. + * + * Returns true if we merged, false otherwise + */ +static bool elv_attempt_insert_merge(struct request_queue *q, + struct request *rq) +{ + struct request *__rq; + bool ret; + + if (blk_queue_nomerges(q)) + return false; + + /* + * First try one-hit cache. + */ + if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq)) + return true; + + if (blk_queue_noxmerges(q)) + return false; + + ret = false; + /* + * See if our hash lookup can find a potential backmerge. + */ + while (1) { + __rq = elv_rqhash_find(q, blk_rq_pos(rq)); + if (!__rq || !blk_attempt_req_merge(q, __rq, rq)) + break; + + /* The merged request could be merged with others, try again */ + ret = true; + rq = __rq; + } + + return ret; +} + +void elv_merged_request(struct request_queue *q, struct request *rq, int type) +{ + struct elevator_queue *e = q->elevator; + + if (e->type->ops.elevator_merged_fn) + e->type->ops.elevator_merged_fn(q, rq, type); + + if (type == ELEVATOR_BACK_MERGE) + elv_rqhash_reposition(q, rq); + + q->last_merge = rq; +} + +void elv_merge_requests(struct request_queue *q, struct request *rq, + struct request *next) +{ + struct elevator_queue *e = q->elevator; + const int next_sorted = next->cmd_flags & REQ_SORTED; + + if (next_sorted && e->type->ops.elevator_merge_req_fn) + e->type->ops.elevator_merge_req_fn(q, rq, next); + + elv_rqhash_reposition(q, rq); + + if (next_sorted) { + elv_rqhash_del(q, next); + q->nr_sorted--; + } + + q->last_merge = rq; +} + +void elv_bio_merged(struct request_queue *q, struct request *rq, + struct bio *bio) +{ + struct elevator_queue *e = q->elevator; + + if (e->type->ops.elevator_bio_merged_fn) + e->type->ops.elevator_bio_merged_fn(q, rq, bio); +} + +#ifdef CONFIG_PM +static void blk_pm_requeue_request(struct request *rq) +{ + if (rq->q->dev && !(rq->cmd_flags & REQ_PM)) + rq->q->nr_pending--; +} + +static void blk_pm_add_request(struct request_queue *q, struct request *rq) +{ + if (q->dev && !(rq->cmd_flags & REQ_PM) && q->nr_pending++ == 0 && + (q->rpm_status == RPM_SUSPENDED || q->rpm_status == RPM_SUSPENDING)) + pm_request_resume(q->dev); +} +#else +static inline void blk_pm_requeue_request(struct request *rq) {} +static inline void blk_pm_add_request(struct request_queue *q, + struct request *rq) +{ +} +#endif + +void elv_requeue_request(struct request_queue *q, struct request *rq) +{ + /* + * it already went through dequeue, we need to decrement the + * in_flight count again + */ + if (blk_account_rq(rq)) { + q->in_flight[rq_is_sync(rq)]--; + if (rq->cmd_flags & REQ_SORTED) + elv_deactivate_rq(q, rq); + } + + rq->cmd_flags &= ~REQ_STARTED; + + blk_pm_requeue_request(rq); + + __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE); +} + +void elv_drain_elevator(struct request_queue *q) +{ + static int printed; + + lockdep_assert_held(q->queue_lock); + + while (q->elevator->type->ops.elevator_dispatch_fn(q, 1)) + ; + if (q->nr_sorted && printed++ < 10) { + printk(KERN_ERR "%s: forced dispatching is broken " + "(nr_sorted=%u), please report this\n", + q->elevator->type->elevator_name, q->nr_sorted); + } +} + +void __elv_add_request(struct request_queue *q, struct request *rq, int where) +{ + trace_block_rq_insert(q, rq); + + blk_pm_add_request(q, rq); + + rq->q = q; + + if (rq->cmd_flags & REQ_SOFTBARRIER) { + /* barriers are scheduling boundary, update end_sector */ + if (rq->cmd_type == REQ_TYPE_FS) { + q->end_sector = rq_end_sector(rq); + q->boundary_rq = rq; + } + } else if (!(rq->cmd_flags & REQ_ELVPRIV) && + (where == ELEVATOR_INSERT_SORT || + where == ELEVATOR_INSERT_SORT_MERGE)) + where = ELEVATOR_INSERT_BACK; + + switch (where) { + case ELEVATOR_INSERT_REQUEUE: + case ELEVATOR_INSERT_FRONT: + rq->cmd_flags |= REQ_SOFTBARRIER; + list_add(&rq->queuelist, &q->queue_head); + break; + + case ELEVATOR_INSERT_BACK: + rq->cmd_flags |= REQ_SOFTBARRIER; + elv_drain_elevator(q); + list_add_tail(&rq->queuelist, &q->queue_head); + /* + * We kick the queue here for the following reasons. + * - The elevator might have returned NULL previously + * to delay requests and returned them now. As the + * queue wasn't empty before this request, ll_rw_blk + * won't run the queue on return, resulting in hang. + * - Usually, back inserted requests won't be merged + * with anything. There's no point in delaying queue + * processing. + */ + __blk_run_queue(q); + break; + + case ELEVATOR_INSERT_SORT_MERGE: + /* + * If we succeed in merging this request with one in the + * queue already, we are done - rq has now been freed, + * so no need to do anything further. + */ + if (elv_attempt_insert_merge(q, rq)) + break; + case ELEVATOR_INSERT_SORT: + BUG_ON(rq->cmd_type != REQ_TYPE_FS); + rq->cmd_flags |= REQ_SORTED; + q->nr_sorted++; + if (rq_mergeable(rq)) { + elv_rqhash_add(q, rq); + if (!q->last_merge) + q->last_merge = rq; + } + + /* + * Some ioscheds (cfq) run q->request_fn directly, so + * rq cannot be accessed after calling + * elevator_add_req_fn. + */ + q->elevator->type->ops.elevator_add_req_fn(q, rq); + break; + + case ELEVATOR_INSERT_FLUSH: + rq->cmd_flags |= REQ_SOFTBARRIER; + blk_insert_flush(rq); + break; + default: + printk(KERN_ERR "%s: bad insertion point %d\n", + __func__, where); + BUG(); + } +} +EXPORT_SYMBOL(__elv_add_request); + +void elv_add_request(struct request_queue *q, struct request *rq, int where) +{ + unsigned long flags; + + spin_lock_irqsave(q->queue_lock, flags); + __elv_add_request(q, rq, where); + spin_unlock_irqrestore(q->queue_lock, flags); +} +EXPORT_SYMBOL(elv_add_request); + +struct request *elv_latter_request(struct request_queue *q, struct request *rq) +{ + struct elevator_queue *e = q->elevator; + + if (e->type->ops.elevator_latter_req_fn) + return e->type->ops.elevator_latter_req_fn(q, rq); + return NULL; +} + +struct request *elv_former_request(struct request_queue *q, struct request *rq) +{ + struct elevator_queue *e = q->elevator; + + if (e->type->ops.elevator_former_req_fn) + return e->type->ops.elevator_former_req_fn(q, rq); + return NULL; +} + +int elv_set_request(struct request_queue *q, struct request *rq, + struct bio *bio, gfp_t gfp_mask) +{ + struct elevator_queue *e = q->elevator; + + if (e->type->ops.elevator_set_req_fn) + return e->type->ops.elevator_set_req_fn(q, rq, bio, gfp_mask); + return 0; +} + +void elv_put_request(struct request_queue *q, struct request *rq) +{ + struct elevator_queue *e = q->elevator; + + if (e->type->ops.elevator_put_req_fn) + e->type->ops.elevator_put_req_fn(rq); +} + +int elv_may_queue(struct request_queue *q, int rw) +{ + struct elevator_queue *e = q->elevator; + + if (e->type->ops.elevator_may_queue_fn) + return e->type->ops.elevator_may_queue_fn(q, rw); + + return ELV_MQUEUE_MAY; +} + +void elv_completed_request(struct request_queue *q, struct request *rq) +{ + struct elevator_queue *e = q->elevator; + + /* + * request is released from the driver, io must be done + */ + if (blk_account_rq(rq)) { + q->in_flight[rq_is_sync(rq)]--; + if ((rq->cmd_flags & REQ_SORTED) && + e->type->ops.elevator_completed_req_fn) + e->type->ops.elevator_completed_req_fn(q, rq); + } +} + +#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr) + +static ssize_t +elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page) +{ + struct elv_fs_entry *entry = to_elv(attr); + struct elevator_queue *e; + ssize_t error; + + if (!entry->show) + return -EIO; + + e = container_of(kobj, struct elevator_queue, kobj); + mutex_lock(&e->sysfs_lock); + error = e->type ? entry->show(e, page) : -ENOENT; + mutex_unlock(&e->sysfs_lock); + return error; +} + +static ssize_t +elv_attr_store(struct kobject *kobj, struct attribute *attr, + const char *page, size_t length) +{ + struct elv_fs_entry *entry = to_elv(attr); + struct elevator_queue *e; + ssize_t error; + + if (!entry->store) + return -EIO; + + e = container_of(kobj, struct elevator_queue, kobj); + mutex_lock(&e->sysfs_lock); + error = e->type ? entry->store(e, page, length) : -ENOENT; + mutex_unlock(&e->sysfs_lock); + return error; +} + +static const struct sysfs_ops elv_sysfs_ops = { + .show = elv_attr_show, + .store = elv_attr_store, +}; + +static struct kobj_type elv_ktype = { + .sysfs_ops = &elv_sysfs_ops, + .release = elevator_release, +}; + +int elv_register_queue(struct request_queue *q) +{ + struct elevator_queue *e = q->elevator; + int error; + + error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched"); + if (!error) { + struct elv_fs_entry *attr = e->type->elevator_attrs; + if (attr) { + while (attr->attr.name) { + if (sysfs_create_file(&e->kobj, &attr->attr)) + break; + attr++; + } + } + kobject_uevent(&e->kobj, KOBJ_ADD); + e->registered = 1; + } + return error; +} +EXPORT_SYMBOL(elv_register_queue); + +void elv_unregister_queue(struct request_queue *q) +{ + if (q) { + struct elevator_queue *e = q->elevator; + + kobject_uevent(&e->kobj, KOBJ_REMOVE); + kobject_del(&e->kobj); + e->registered = 0; + } +} +EXPORT_SYMBOL(elv_unregister_queue); + +int elv_register(struct elevator_type *e) +{ + char *def = ""; + + /* create icq_cache if requested */ + if (e->icq_size) { + if (WARN_ON(e->icq_size < sizeof(struct io_cq)) || + WARN_ON(e->icq_align < __alignof__(struct io_cq))) + return -EINVAL; + + snprintf(e->icq_cache_name, sizeof(e->icq_cache_name), + "%s_io_cq", e->elevator_name); + e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size, + e->icq_align, 0, NULL); + if (!e->icq_cache) + return -ENOMEM; + } + + /* register, don't allow duplicate names */ + spin_lock(&elv_list_lock); + if (elevator_find(e->elevator_name)) { + spin_unlock(&elv_list_lock); + if (e->icq_cache) + kmem_cache_destroy(e->icq_cache); + return -EBUSY; + } + list_add_tail(&e->list, &elv_list); + spin_unlock(&elv_list_lock); + + /* print pretty message */ + if (!strcmp(e->elevator_name, chosen_elevator) || + (!*chosen_elevator && + !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED))) + def = " (default)"; + + printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name, + def); + return 0; +} +EXPORT_SYMBOL_GPL(elv_register); + +void elv_unregister(struct elevator_type *e) +{ + /* unregister */ + spin_lock(&elv_list_lock); + list_del_init(&e->list); + spin_unlock(&elv_list_lock); + + /* + * Destroy icq_cache if it exists. icq's are RCU managed. Make + * sure all RCU operations are complete before proceeding. + */ + if (e->icq_cache) { + rcu_barrier(); + kmem_cache_destroy(e->icq_cache); + e->icq_cache = NULL; + } +} +EXPORT_SYMBOL_GPL(elv_unregister); + +/* + * switch to new_e io scheduler. be careful not to introduce deadlocks - + * we don't free the old io scheduler, before we have allocated what we + * need for the new one. this way we have a chance of going back to the old + * one, if the new one fails init for some reason. + */ +static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) +{ + struct elevator_queue *old = q->elevator; + bool registered = old->registered; + int err; + + /* + * Turn on BYPASS and drain all requests w/ elevator private data. + * Block layer doesn't call into a quiesced elevator - all requests + * are directly put on the dispatch list without elevator data + * using INSERT_BACK. All requests have SOFTBARRIER set and no + * merge happens either. + */ + blk_queue_bypass_start(q); + + /* unregister and clear all auxiliary data of the old elevator */ + if (registered) + elv_unregister_queue(q); + + spin_lock_irq(q->queue_lock); + ioc_clear_queue(q); + spin_unlock_irq(q->queue_lock); + + /* allocate, init and register new elevator */ + err = new_e->ops.elevator_init_fn(q, new_e); + if (err) + goto fail_init; + + if (registered) { + err = elv_register_queue(q); + if (err) + goto fail_register; + } + + /* done, kill the old one and finish */ + elevator_exit(old); + blk_queue_bypass_end(q); + + blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name); + + return 0; + +fail_register: + elevator_exit(q->elevator); +fail_init: + /* switch failed, restore and re-register old elevator */ + q->elevator = old; + elv_register_queue(q); + blk_queue_bypass_end(q); + + return err; +} + +/* + * Switch this queue to the given IO scheduler. + */ +static int __elevator_change(struct request_queue *q, const char *name) +{ + char elevator_name[ELV_NAME_MAX]; + struct elevator_type *e; + + if (!q->elevator) + return -ENXIO; + + strlcpy(elevator_name, name, sizeof(elevator_name)); + e = elevator_get(strstrip(elevator_name), true); + if (!e) { + printk(KERN_ERR "elevator: type %s not found\n", elevator_name); + return -EINVAL; + } + + if (!strcmp(elevator_name, q->elevator->type->elevator_name)) { + elevator_put(e); + return 0; + } + + return elevator_switch(q, e); +} + +int elevator_change(struct request_queue *q, const char *name) +{ + int ret; + + /* Protect q->elevator from elevator_init() */ + mutex_lock(&q->sysfs_lock); + ret = __elevator_change(q, name); + mutex_unlock(&q->sysfs_lock); + + return ret; +} +EXPORT_SYMBOL(elevator_change); + +ssize_t elv_iosched_store(struct request_queue *q, const char *name, + size_t count) +{ + int ret; + + if (!q->elevator) + return count; + + ret = __elevator_change(q, name); + if (!ret) + return count; + + printk(KERN_ERR "elevator: switch to %s failed\n", name); + return ret; +} + +ssize_t elv_iosched_show(struct request_queue *q, char *name) +{ + struct elevator_queue *e = q->elevator; + struct elevator_type *elv; + struct elevator_type *__e; + int len = 0; + + if (!q->elevator || !blk_queue_stackable(q)) + return sprintf(name, "none\n"); + + elv = e->type; + + spin_lock(&elv_list_lock); + list_for_each_entry(__e, &elv_list, list) { + if (!strcmp(elv->elevator_name, __e->elevator_name)) + len += sprintf(name+len, "[%s] ", elv->elevator_name); + else + len += sprintf(name+len, "%s ", __e->elevator_name); + } + spin_unlock(&elv_list_lock); + + len += sprintf(len+name, "\n"); + return len; +} + +struct request *elv_rb_former_request(struct request_queue *q, + struct request *rq) +{ + struct rb_node *rbprev = rb_prev(&rq->rb_node); + + if (rbprev) + return rb_entry_rq(rbprev); + + return NULL; +} +EXPORT_SYMBOL(elv_rb_former_request); + +struct request *elv_rb_latter_request(struct request_queue *q, + struct request *rq) +{ + struct rb_node *rbnext = rb_next(&rq->rb_node); + + if (rbnext) + return rb_entry_rq(rbnext); + + return NULL; +} +EXPORT_SYMBOL(elv_rb_latter_request); diff --git a/block/genhd.c b/block/genhd.c new file mode 100644 index 000000000..d2b7ebfb5 --- /dev/null +++ b/block/genhd.c @@ -0,0 +1,1926 @@ +/* + * gendisk handling + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "blk.h" + +static DEFINE_MUTEX(block_class_lock); +struct kobject *block_depr; + +/* for extended dynamic devt allocation, currently only one major is used */ +#define NR_EXT_DEVT (1 << MINORBITS) + +/* For extended devt allocation. ext_devt_lock prevents look up + * results from going away underneath its user. + */ +static DEFINE_SPINLOCK(ext_devt_lock); +static DEFINE_IDR(ext_devt_idr); + +static struct device_type disk_type; + +static void disk_check_events(struct disk_events *ev, + unsigned int *clearing_ptr); +static void disk_alloc_events(struct gendisk *disk); +static void disk_add_events(struct gendisk *disk); +static void disk_del_events(struct gendisk *disk); +static void disk_release_events(struct gendisk *disk); + +/** + * disk_get_part - get partition + * @disk: disk to look partition from + * @partno: partition number + * + * Look for partition @partno from @disk. If found, increment + * reference count and return it. + * + * CONTEXT: + * Don't care. + * + * RETURNS: + * Pointer to the found partition on success, NULL if not found. + */ +struct hd_struct *disk_get_part(struct gendisk *disk, int partno) +{ + struct hd_struct *part = NULL; + struct disk_part_tbl *ptbl; + + if (unlikely(partno < 0)) + return NULL; + + rcu_read_lock(); + + ptbl = rcu_dereference(disk->part_tbl); + if (likely(partno < ptbl->len)) { + part = rcu_dereference(ptbl->part[partno]); + if (part) + get_device(part_to_dev(part)); + } + + rcu_read_unlock(); + + return part; +} +EXPORT_SYMBOL_GPL(disk_get_part); + +/** + * disk_part_iter_init - initialize partition iterator + * @piter: iterator to initialize + * @disk: disk to iterate over + * @flags: DISK_PITER_* flags + * + * Initialize @piter so that it iterates over partitions of @disk. + * + * CONTEXT: + * Don't care. + */ +void disk_part_iter_init(struct disk_part_iter *piter, struct gendisk *disk, + unsigned int flags) +{ + struct disk_part_tbl *ptbl; + + rcu_read_lock(); + ptbl = rcu_dereference(disk->part_tbl); + + piter->disk = disk; + piter->part = NULL; + + if (flags & DISK_PITER_REVERSE) + piter->idx = ptbl->len - 1; + else if (flags & (DISK_PITER_INCL_PART0 | DISK_PITER_INCL_EMPTY_PART0)) + piter->idx = 0; + else + piter->idx = 1; + + piter->flags = flags; + + rcu_read_unlock(); +} +EXPORT_SYMBOL_GPL(disk_part_iter_init); + +/** + * disk_part_iter_next - proceed iterator to the next partition and return it + * @piter: iterator of interest + * + * Proceed @piter to the next partition and return it. + * + * CONTEXT: + * Don't care. + */ +struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter) +{ + struct disk_part_tbl *ptbl; + int inc, end; + + /* put the last partition */ + disk_put_part(piter->part); + piter->part = NULL; + + /* get part_tbl */ + rcu_read_lock(); + ptbl = rcu_dereference(piter->disk->part_tbl); + + /* determine iteration parameters */ + if (piter->flags & DISK_PITER_REVERSE) { + inc = -1; + if (piter->flags & (DISK_PITER_INCL_PART0 | + DISK_PITER_INCL_EMPTY_PART0)) + end = -1; + else + end = 0; + } else { + inc = 1; + end = ptbl->len; + } + + /* iterate to the next partition */ + for (; piter->idx != end; piter->idx += inc) { + struct hd_struct *part; + + part = rcu_dereference(ptbl->part[piter->idx]); + if (!part) + continue; + if (!part_nr_sects_read(part) && + !(piter->flags & DISK_PITER_INCL_EMPTY) && + !(piter->flags & DISK_PITER_INCL_EMPTY_PART0 && + piter->idx == 0)) + continue; + + get_device(part_to_dev(part)); + piter->part = part; + piter->idx += inc; + break; + } + + rcu_read_unlock(); + + return piter->part; +} +EXPORT_SYMBOL_GPL(disk_part_iter_next); + +/** + * disk_part_iter_exit - finish up partition iteration + * @piter: iter of interest + * + * Called when iteration is over. Cleans up @piter. + * + * CONTEXT: + * Don't care. + */ +void disk_part_iter_exit(struct disk_part_iter *piter) +{ + disk_put_part(piter->part); + piter->part = NULL; +} +EXPORT_SYMBOL_GPL(disk_part_iter_exit); + +static inline int sector_in_part(struct hd_struct *part, sector_t sector) +{ + return part->start_sect <= sector && + sector < part->start_sect + part_nr_sects_read(part); +} + +/** + * disk_map_sector_rcu - map sector to partition + * @disk: gendisk of interest + * @sector: sector to map + * + * Find out which partition @sector maps to on @disk. This is + * primarily used for stats accounting. + * + * CONTEXT: + * RCU read locked. The returned partition pointer is valid only + * while preemption is disabled. + * + * RETURNS: + * Found partition on success, part0 is returned if no partition matches + */ +struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, sector_t sector) +{ + struct disk_part_tbl *ptbl; + struct hd_struct *part; + int i; + + ptbl = rcu_dereference(disk->part_tbl); + + part = rcu_dereference(ptbl->last_lookup); + if (part && sector_in_part(part, sector)) + return part; + + for (i = 1; i < ptbl->len; i++) { + part = rcu_dereference(ptbl->part[i]); + + if (part && sector_in_part(part, sector)) { + rcu_assign_pointer(ptbl->last_lookup, part); + return part; + } + } + return &disk->part0; +} +EXPORT_SYMBOL_GPL(disk_map_sector_rcu); + +/* + * Can be deleted altogether. Later. + * + */ +static struct blk_major_name { + struct blk_major_name *next; + int major; + char name[16]; +} *major_names[BLKDEV_MAJOR_HASH_SIZE]; + +/* index in the above - for now: assume no multimajor ranges */ +static inline int major_to_index(unsigned major) +{ + return major % BLKDEV_MAJOR_HASH_SIZE; +} + +#ifdef CONFIG_PROC_FS +void blkdev_show(struct seq_file *seqf, off_t offset) +{ + struct blk_major_name *dp; + + if (offset < BLKDEV_MAJOR_HASH_SIZE) { + mutex_lock(&block_class_lock); + for (dp = major_names[offset]; dp; dp = dp->next) + seq_printf(seqf, "%3d %s\n", dp->major, dp->name); + mutex_unlock(&block_class_lock); + } +} +#endif /* CONFIG_PROC_FS */ + +/** + * register_blkdev - register a new block device + * + * @major: the requested major device number [1..255]. If @major=0, try to + * allocate any unused major number. + * @name: the name of the new block device as a zero terminated string + * + * The @name must be unique within the system. + * + * The return value depends on the @major input parameter. + * - if a major device number was requested in range [1..255] then the + * function returns zero on success, or a negative error code + * - if any unused major number was requested with @major=0 parameter + * then the return value is the allocated major number in range + * [1..255] or a negative error code otherwise + */ +int register_blkdev(unsigned int major, const char *name) +{ + struct blk_major_name **n, *p; + int index, ret = 0; + + mutex_lock(&block_class_lock); + + /* temporary */ + if (major == 0) { + for (index = ARRAY_SIZE(major_names)-1; index > 0; index--) { + if (major_names[index] == NULL) + break; + } + + if (index == 0) { + printk("register_blkdev: failed to get major for %s\n", + name); + ret = -EBUSY; + goto out; + } + major = index; + ret = major; + } + + p = kmalloc(sizeof(struct blk_major_name), GFP_KERNEL); + if (p == NULL) { + ret = -ENOMEM; + goto out; + } + + p->major = major; + strlcpy(p->name, name, sizeof(p->name)); + p->next = NULL; + index = major_to_index(major); + + for (n = &major_names[index]; *n; n = &(*n)->next) { + if ((*n)->major == major) + break; + } + if (!*n) + *n = p; + else + ret = -EBUSY; + + if (ret < 0) { + printk("register_blkdev: cannot get major %d for %s\n", + major, name); + kfree(p); + } +out: + mutex_unlock(&block_class_lock); + return ret; +} + +EXPORT_SYMBOL(register_blkdev); + +void unregister_blkdev(unsigned int major, const char *name) +{ + struct blk_major_name **n; + struct blk_major_name *p = NULL; + int index = major_to_index(major); + + mutex_lock(&block_class_lock); + for (n = &major_names[index]; *n; n = &(*n)->next) + if ((*n)->major == major) + break; + if (!*n || strcmp((*n)->name, name)) { + WARN_ON(1); + } else { + p = *n; + *n = p->next; + } + mutex_unlock(&block_class_lock); + kfree(p); +} + +EXPORT_SYMBOL(unregister_blkdev); + +static struct kobj_map *bdev_map; + +/** + * blk_mangle_minor - scatter minor numbers apart + * @minor: minor number to mangle + * + * Scatter consecutively allocated @minor number apart if MANGLE_DEVT + * is enabled. Mangling twice gives the original value. + * + * RETURNS: + * Mangled value. + * + * CONTEXT: + * Don't care. + */ +static int blk_mangle_minor(int minor) +{ +#ifdef CONFIG_DEBUG_BLOCK_EXT_DEVT + int i; + + for (i = 0; i < MINORBITS / 2; i++) { + int low = minor & (1 << i); + int high = minor & (1 << (MINORBITS - 1 - i)); + int distance = MINORBITS - 1 - 2 * i; + + minor ^= low | high; /* clear both bits */ + low <<= distance; /* swap the positions */ + high >>= distance; + minor |= low | high; /* and set */ + } +#endif + return minor; +} + +/** + * blk_alloc_devt - allocate a dev_t for a partition + * @part: partition to allocate dev_t for + * @devt: out parameter for resulting dev_t + * + * Allocate a dev_t for block device. + * + * RETURNS: + * 0 on success, allocated dev_t is returned in *@devt. -errno on + * failure. + * + * CONTEXT: + * Might sleep. + */ +int blk_alloc_devt(struct hd_struct *part, dev_t *devt) +{ + struct gendisk *disk = part_to_disk(part); + int idx; + + /* in consecutive minor range? */ + if (part->partno < disk->minors) { + *devt = MKDEV(disk->major, disk->first_minor + part->partno); + return 0; + } + + /* allocate ext devt */ + idr_preload(GFP_KERNEL); + + spin_lock_bh(&ext_devt_lock); + idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_NOWAIT); + spin_unlock_bh(&ext_devt_lock); + + idr_preload_end(); + if (idx < 0) + return idx == -ENOSPC ? -EBUSY : idx; + + *devt = MKDEV(BLOCK_EXT_MAJOR, blk_mangle_minor(idx)); + return 0; +} + +/** + * blk_free_devt - free a dev_t + * @devt: dev_t to free + * + * Free @devt which was allocated using blk_alloc_devt(). + * + * CONTEXT: + * Might sleep. + */ +void blk_free_devt(dev_t devt) +{ + if (devt == MKDEV(0, 0)) + return; + + if (MAJOR(devt) == BLOCK_EXT_MAJOR) { + spin_lock_bh(&ext_devt_lock); + idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt))); + spin_unlock_bh(&ext_devt_lock); + } +} + +static char *bdevt_str(dev_t devt, char *buf) +{ + if (MAJOR(devt) <= 0xff && MINOR(devt) <= 0xff) { + char tbuf[BDEVT_SIZE]; + snprintf(tbuf, BDEVT_SIZE, "%02x%02x", MAJOR(devt), MINOR(devt)); + snprintf(buf, BDEVT_SIZE, "%-9s", tbuf); + } else + snprintf(buf, BDEVT_SIZE, "%03x:%05x", MAJOR(devt), MINOR(devt)); + + return buf; +} + +/* + * Register device numbers dev..(dev+range-1) + * range must be nonzero + * The hash chain is sorted on range, so that subranges can override. + */ +void blk_register_region(dev_t devt, unsigned long range, struct module *module, + struct kobject *(*probe)(dev_t, int *, void *), + int (*lock)(dev_t, void *), void *data) +{ + kobj_map(bdev_map, devt, range, module, probe, lock, data); +} + +EXPORT_SYMBOL(blk_register_region); + +void blk_unregister_region(dev_t devt, unsigned long range) +{ + kobj_unmap(bdev_map, devt, range); +} + +EXPORT_SYMBOL(blk_unregister_region); + +static struct kobject *exact_match(dev_t devt, int *partno, void *data) +{ + struct gendisk *p = data; + + return &disk_to_dev(p)->kobj; +} + +static int exact_lock(dev_t devt, void *data) +{ + struct gendisk *p = data; + + if (!get_disk(p)) + return -1; + return 0; +} + +static void register_disk(struct gendisk *disk) +{ + struct device *ddev = disk_to_dev(disk); + struct block_device *bdev; + struct disk_part_iter piter; + struct hd_struct *part; + int err; + + ddev->parent = disk->driverfs_dev; + + dev_set_name(ddev, "%s", disk->disk_name); + + /* delay uevents, until we scanned partition table */ + dev_set_uevent_suppress(ddev, 1); + + if (device_add(ddev)) + return; + if (!sysfs_deprecated) { + err = sysfs_create_link(block_depr, &ddev->kobj, + kobject_name(&ddev->kobj)); + if (err) { + device_del(ddev); + return; + } + } + + /* + * avoid probable deadlock caused by allocating memory with + * GFP_KERNEL in runtime_resume callback of its all ancestor + * devices + */ + pm_runtime_set_memalloc_noio(ddev, true); + + disk->part0.holder_dir = kobject_create_and_add("holders", &ddev->kobj); + disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj); + + /* No minors to use for partitions */ + if (!disk_part_scan_enabled(disk)) + goto exit; + + /* No such device (e.g., media were just removed) */ + if (!get_capacity(disk)) + goto exit; + + bdev = bdget_disk(disk, 0); + if (!bdev) + goto exit; + + bdev->bd_invalidated = 1; + err = blkdev_get(bdev, FMODE_READ, NULL); + if (err < 0) + goto exit; + blkdev_put(bdev, FMODE_READ); + +exit: + /* announce disk after possible partitions are created */ + dev_set_uevent_suppress(ddev, 0); + kobject_uevent(&ddev->kobj, KOBJ_ADD); + + /* announce possible partitions */ + disk_part_iter_init(&piter, disk, 0); + while ((part = disk_part_iter_next(&piter))) + kobject_uevent(&part_to_dev(part)->kobj, KOBJ_ADD); + disk_part_iter_exit(&piter); +} + +/** + * add_disk - add partitioning information to kernel list + * @disk: per-device partitioning information + * + * This function registers the partitioning information in @disk + * with the kernel. + * + * FIXME: error handling + */ +void add_disk(struct gendisk *disk) +{ + struct backing_dev_info *bdi; + dev_t devt; + int retval; + + /* minors == 0 indicates to use ext devt from part0 and should + * be accompanied with EXT_DEVT flag. Make sure all + * parameters make sense. + */ + WARN_ON(disk->minors && !(disk->major || disk->first_minor)); + WARN_ON(!disk->minors && !(disk->flags & GENHD_FL_EXT_DEVT)); + + disk->flags |= GENHD_FL_UP; + + retval = blk_alloc_devt(&disk->part0, &devt); + if (retval) { + WARN_ON(1); + return; + } + disk_to_dev(disk)->devt = devt; + + /* ->major and ->first_minor aren't supposed to be + * dereferenced from here on, but set them just in case. + */ + disk->major = MAJOR(devt); + disk->first_minor = MINOR(devt); + + disk_alloc_events(disk); + + /* Register BDI before referencing it from bdev */ + bdi = &disk->queue->backing_dev_info; + bdi_register_dev(bdi, disk_devt(disk)); + + blk_register_region(disk_devt(disk), disk->minors, NULL, + exact_match, exact_lock, disk); + register_disk(disk); + blk_register_queue(disk); + + /* + * Take an extra ref on queue which will be put on disk_release() + * so that it sticks around as long as @disk is there. + */ + WARN_ON_ONCE(!blk_get_queue(disk->queue)); + + retval = sysfs_create_link(&disk_to_dev(disk)->kobj, &bdi->dev->kobj, + "bdi"); + WARN_ON(retval); + + disk_add_events(disk); +} +EXPORT_SYMBOL(add_disk); + +void del_gendisk(struct gendisk *disk) +{ + struct disk_part_iter piter; + struct hd_struct *part; + + disk_del_events(disk); + + /* invalidate stuff */ + disk_part_iter_init(&piter, disk, + DISK_PITER_INCL_EMPTY | DISK_PITER_REVERSE); + while ((part = disk_part_iter_next(&piter))) { + invalidate_partition(disk, part->partno); + delete_partition(disk, part->partno); + } + disk_part_iter_exit(&piter); + + invalidate_partition(disk, 0); + set_capacity(disk, 0); + disk->flags &= ~GENHD_FL_UP; + + sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi"); + blk_unregister_queue(disk); + blk_unregister_region(disk_devt(disk), disk->minors); + + part_stat_set_all(&disk->part0, 0); + disk->part0.stamp = 0; + + kobject_put(disk->part0.holder_dir); + kobject_put(disk->slave_dir); + disk->driverfs_dev = NULL; + if (!sysfs_deprecated) + sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk))); + pm_runtime_set_memalloc_noio(disk_to_dev(disk), false); + device_del(disk_to_dev(disk)); +} +EXPORT_SYMBOL(del_gendisk); + +/** + * get_gendisk - get partitioning information for a given device + * @devt: device to get partitioning information for + * @partno: returned partition index + * + * This function gets the structure containing partitioning + * information for the given device @devt. + */ +struct gendisk *get_gendisk(dev_t devt, int *partno) +{ + struct gendisk *disk = NULL; + + if (MAJOR(devt) != BLOCK_EXT_MAJOR) { + struct kobject *kobj; + + kobj = kobj_lookup(bdev_map, devt, partno); + if (kobj) + disk = dev_to_disk(kobj_to_dev(kobj)); + } else { + struct hd_struct *part; + + spin_lock_bh(&ext_devt_lock); + part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt))); + if (part && get_disk(part_to_disk(part))) { + *partno = part->partno; + disk = part_to_disk(part); + } + spin_unlock_bh(&ext_devt_lock); + } + + return disk; +} +EXPORT_SYMBOL(get_gendisk); + +/** + * bdget_disk - do bdget() by gendisk and partition number + * @disk: gendisk of interest + * @partno: partition number + * + * Find partition @partno from @disk, do bdget() on it. + * + * CONTEXT: + * Don't care. + * + * RETURNS: + * Resulting block_device on success, NULL on failure. + */ +struct block_device *bdget_disk(struct gendisk *disk, int partno) +{ + struct hd_struct *part; + struct block_device *bdev = NULL; + + part = disk_get_part(disk, partno); + if (part) + bdev = bdget(part_devt(part)); + disk_put_part(part); + + return bdev; +} +EXPORT_SYMBOL(bdget_disk); + +/* + * print a full list of all partitions - intended for places where the root + * filesystem can't be mounted and thus to give the victim some idea of what + * went wrong + */ +void __init printk_all_partitions(void) +{ + struct class_dev_iter iter; + struct device *dev; + + class_dev_iter_init(&iter, &block_class, NULL, &disk_type); + while ((dev = class_dev_iter_next(&iter))) { + struct gendisk *disk = dev_to_disk(dev); + struct disk_part_iter piter; + struct hd_struct *part; + char name_buf[BDEVNAME_SIZE]; + char devt_buf[BDEVT_SIZE]; + + /* + * Don't show empty devices or things that have been + * suppressed + */ + if (get_capacity(disk) == 0 || + (disk->flags & GENHD_FL_SUPPRESS_PARTITION_INFO)) + continue; + + /* + * Note, unlike /proc/partitions, I am showing the + * numbers in hex - the same format as the root= + * option takes. + */ + disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); + while ((part = disk_part_iter_next(&piter))) { + bool is_part0 = part == &disk->part0; + + printk("%s%s %10llu %s %s", is_part0 ? "" : " ", + bdevt_str(part_devt(part), devt_buf), + (unsigned long long)part_nr_sects_read(part) >> 1 + , disk_name(disk, part->partno, name_buf), + part->info ? part->info->uuid : ""); + if (is_part0) { + if (disk->driverfs_dev != NULL && + disk->driverfs_dev->driver != NULL) + printk(" driver: %s\n", + disk->driverfs_dev->driver->name); + else + printk(" (driver?)\n"); + } else + printk("\n"); + } + disk_part_iter_exit(&piter); + } + class_dev_iter_exit(&iter); +} + +#ifdef CONFIG_PROC_FS +/* iterator */ +static void *disk_seqf_start(struct seq_file *seqf, loff_t *pos) +{ + loff_t skip = *pos; + struct class_dev_iter *iter; + struct device *dev; + + iter = kmalloc(sizeof(*iter), GFP_KERNEL); + if (!iter) + return ERR_PTR(-ENOMEM); + + seqf->private = iter; + class_dev_iter_init(iter, &block_class, NULL, &disk_type); + do { + dev = class_dev_iter_next(iter); + if (!dev) + return NULL; + } while (skip--); + + return dev_to_disk(dev); +} + +static void *disk_seqf_next(struct seq_file *seqf, void *v, loff_t *pos) +{ + struct device *dev; + + (*pos)++; + dev = class_dev_iter_next(seqf->private); + if (dev) + return dev_to_disk(dev); + + return NULL; +} + +static void disk_seqf_stop(struct seq_file *seqf, void *v) +{ + struct class_dev_iter *iter = seqf->private; + + /* stop is called even after start failed :-( */ + if (iter) { + class_dev_iter_exit(iter); + kfree(iter); + } +} + +static void *show_partition_start(struct seq_file *seqf, loff_t *pos) +{ + void *p; + + p = disk_seqf_start(seqf, pos); + if (!IS_ERR_OR_NULL(p) && !*pos) + seq_puts(seqf, "major minor #blocks name\n\n"); + return p; +} + +static int show_partition(struct seq_file *seqf, void *v) +{ + struct gendisk *sgp = v; + struct disk_part_iter piter; + struct hd_struct *part; + char buf[BDEVNAME_SIZE]; + + /* Don't show non-partitionable removeable devices or empty devices */ + if (!get_capacity(sgp) || (!disk_max_parts(sgp) && + (sgp->flags & GENHD_FL_REMOVABLE))) + return 0; + if (sgp->flags & GENHD_FL_SUPPRESS_PARTITION_INFO) + return 0; + + /* show the full disk and all non-0 size partitions of it */ + disk_part_iter_init(&piter, sgp, DISK_PITER_INCL_PART0); + while ((part = disk_part_iter_next(&piter))) + seq_printf(seqf, "%4d %7d %10llu %s\n", + MAJOR(part_devt(part)), MINOR(part_devt(part)), + (unsigned long long)part_nr_sects_read(part) >> 1, + disk_name(sgp, part->partno, buf)); + disk_part_iter_exit(&piter); + + return 0; +} + +static const struct seq_operations partitions_op = { + .start = show_partition_start, + .next = disk_seqf_next, + .stop = disk_seqf_stop, + .show = show_partition +}; + +static int partitions_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &partitions_op); +} + +static const struct file_operations proc_partitions_operations = { + .open = partitions_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; +#endif + + +static struct kobject *base_probe(dev_t devt, int *partno, void *data) +{ + if (request_module("block-major-%d-%d", MAJOR(devt), MINOR(devt)) > 0) + /* Make old-style 2.4 aliases work */ + request_module("block-major-%d", MAJOR(devt)); + return NULL; +} + +static int __init genhd_device_init(void) +{ + int error; + + block_class.dev_kobj = sysfs_dev_block_kobj; + error = class_register(&block_class); + if (unlikely(error)) + return error; + bdev_map = kobj_map_init(base_probe, &block_class_lock); + blk_dev_init(); + + register_blkdev(BLOCK_EXT_MAJOR, "blkext"); + + /* create top-level block dir */ + if (!sysfs_deprecated) + block_depr = kobject_create_and_add("block", NULL); + return 0; +} + +subsys_initcall(genhd_device_init); + +static ssize_t disk_range_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct gendisk *disk = dev_to_disk(dev); + + return sprintf(buf, "%d\n", disk->minors); +} + +static ssize_t disk_ext_range_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct gendisk *disk = dev_to_disk(dev); + + return sprintf(buf, "%d\n", disk_max_parts(disk)); +} + +static ssize_t disk_removable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct gendisk *disk = dev_to_disk(dev); + + return sprintf(buf, "%d\n", + (disk->flags & GENHD_FL_REMOVABLE ? 1 : 0)); +} + +static ssize_t disk_ro_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct gendisk *disk = dev_to_disk(dev); + + return sprintf(buf, "%d\n", get_disk_ro(disk) ? 1 : 0); +} + +static ssize_t disk_capability_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct gendisk *disk = dev_to_disk(dev); + + return sprintf(buf, "%x\n", disk->flags); +} + +static ssize_t disk_alignment_offset_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct gendisk *disk = dev_to_disk(dev); + + return sprintf(buf, "%d\n", queue_alignment_offset(disk->queue)); +} + +static ssize_t disk_discard_alignment_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct gendisk *disk = dev_to_disk(dev); + + return sprintf(buf, "%d\n", queue_discard_alignment(disk->queue)); +} + +static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL); +static DEVICE_ATTR(ext_range, S_IRUGO, disk_ext_range_show, NULL); +static DEVICE_ATTR(removable, S_IRUGO, disk_removable_show, NULL); +static DEVICE_ATTR(ro, S_IRUGO, disk_ro_show, NULL); +static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL); +static DEVICE_ATTR(alignment_offset, S_IRUGO, disk_alignment_offset_show, NULL); +static DEVICE_ATTR(discard_alignment, S_IRUGO, disk_discard_alignment_show, + NULL); +static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL); +static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL); +static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL); +#ifdef CONFIG_FAIL_MAKE_REQUEST +static struct device_attribute dev_attr_fail = + __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store); +#endif +#ifdef CONFIG_FAIL_IO_TIMEOUT +static struct device_attribute dev_attr_fail_timeout = + __ATTR(io-timeout-fail, S_IRUGO|S_IWUSR, part_timeout_show, + part_timeout_store); +#endif + +static struct attribute *disk_attrs[] = { + &dev_attr_range.attr, + &dev_attr_ext_range.attr, + &dev_attr_removable.attr, + &dev_attr_ro.attr, + &dev_attr_size.attr, + &dev_attr_alignment_offset.attr, + &dev_attr_discard_alignment.attr, + &dev_attr_capability.attr, + &dev_attr_stat.attr, + &dev_attr_inflight.attr, +#ifdef CONFIG_FAIL_MAKE_REQUEST + &dev_attr_fail.attr, +#endif +#ifdef CONFIG_FAIL_IO_TIMEOUT + &dev_attr_fail_timeout.attr, +#endif + NULL +}; + +static struct attribute_group disk_attr_group = { + .attrs = disk_attrs, +}; + +static const struct attribute_group *disk_attr_groups[] = { + &disk_attr_group, + NULL +}; + +/** + * disk_replace_part_tbl - replace disk->part_tbl in RCU-safe way + * @disk: disk to replace part_tbl for + * @new_ptbl: new part_tbl to install + * + * Replace disk->part_tbl with @new_ptbl in RCU-safe way. The + * original ptbl is freed using RCU callback. + * + * LOCKING: + * Matching bd_mutx locked. + */ +static void disk_replace_part_tbl(struct gendisk *disk, + struct disk_part_tbl *new_ptbl) +{ + struct disk_part_tbl *old_ptbl = disk->part_tbl; + + rcu_assign_pointer(disk->part_tbl, new_ptbl); + + if (old_ptbl) { + rcu_assign_pointer(old_ptbl->last_lookup, NULL); + kfree_rcu(old_ptbl, rcu_head); + } +} + +/** + * disk_expand_part_tbl - expand disk->part_tbl + * @disk: disk to expand part_tbl for + * @partno: expand such that this partno can fit in + * + * Expand disk->part_tbl such that @partno can fit in. disk->part_tbl + * uses RCU to allow unlocked dereferencing for stats and other stuff. + * + * LOCKING: + * Matching bd_mutex locked, might sleep. + * + * RETURNS: + * 0 on success, -errno on failure. + */ +int disk_expand_part_tbl(struct gendisk *disk, int partno) +{ + struct disk_part_tbl *old_ptbl = disk->part_tbl; + struct disk_part_tbl *new_ptbl; + int len = old_ptbl ? old_ptbl->len : 0; + int i, target; + size_t size; + + /* + * check for int overflow, since we can get here from blkpg_ioctl() + * with a user passed 'partno'. + */ + target = partno + 1; + if (target < 0) + return -EINVAL; + + /* disk_max_parts() is zero during initialization, ignore if so */ + if (disk_max_parts(disk) && target > disk_max_parts(disk)) + return -EINVAL; + + if (target <= len) + return 0; + + size = sizeof(*new_ptbl) + target * sizeof(new_ptbl->part[0]); + new_ptbl = kzalloc_node(size, GFP_KERNEL, disk->node_id); + if (!new_ptbl) + return -ENOMEM; + + new_ptbl->len = target; + + for (i = 0; i < len; i++) + rcu_assign_pointer(new_ptbl->part[i], old_ptbl->part[i]); + + disk_replace_part_tbl(disk, new_ptbl); + return 0; +} + +static void disk_release(struct device *dev) +{ + struct gendisk *disk = dev_to_disk(dev); + + blk_free_devt(dev->devt); + disk_release_events(disk); + kfree(disk->random); + disk_replace_part_tbl(disk, NULL); + free_part_stats(&disk->part0); + free_part_info(&disk->part0); + if (disk->queue) + blk_put_queue(disk->queue); + kfree(disk); +} +struct class block_class = { + .name = "block", +}; + +static char *block_devnode(struct device *dev, umode_t *mode, + kuid_t *uid, kgid_t *gid) +{ + struct gendisk *disk = dev_to_disk(dev); + + if (disk->devnode) + return disk->devnode(disk, mode); + return NULL; +} + +static struct device_type disk_type = { + .name = "disk", + .groups = disk_attr_groups, + .release = disk_release, + .devnode = block_devnode, +}; + +#ifdef CONFIG_PROC_FS +/* + * aggregate disk stat collector. Uses the same stats that the sysfs + * entries do, above, but makes them available through one seq_file. + * + * The output looks suspiciously like /proc/partitions with a bunch of + * extra fields. + */ +static int diskstats_show(struct seq_file *seqf, void *v) +{ + struct gendisk *gp = v; + struct disk_part_iter piter; + struct hd_struct *hd; + char buf[BDEVNAME_SIZE]; + int cpu; + + /* + if (&disk_to_dev(gp)->kobj.entry == block_class.devices.next) + seq_puts(seqf, "major minor name" + " rio rmerge rsect ruse wio wmerge " + "wsect wuse running use aveq" + "\n\n"); + */ + + disk_part_iter_init(&piter, gp, DISK_PITER_INCL_EMPTY_PART0); + while ((hd = disk_part_iter_next(&piter))) { + cpu = part_stat_lock(); + part_round_stats(cpu, hd); + part_stat_unlock(); + seq_printf(seqf, "%4d %7d %s %lu %lu %lu " + "%u %lu %lu %lu %u %u %u %u\n", + MAJOR(part_devt(hd)), MINOR(part_devt(hd)), + disk_name(gp, hd->partno, buf), + part_stat_read(hd, ios[READ]), + part_stat_read(hd, merges[READ]), + part_stat_read(hd, sectors[READ]), + jiffies_to_msecs(part_stat_read(hd, ticks[READ])), + part_stat_read(hd, ios[WRITE]), + part_stat_read(hd, merges[WRITE]), + part_stat_read(hd, sectors[WRITE]), + jiffies_to_msecs(part_stat_read(hd, ticks[WRITE])), + part_in_flight(hd), + jiffies_to_msecs(part_stat_read(hd, io_ticks)), + jiffies_to_msecs(part_stat_read(hd, time_in_queue)) + ); + } + disk_part_iter_exit(&piter); + + return 0; +} + +static const struct seq_operations diskstats_op = { + .start = disk_seqf_start, + .next = disk_seqf_next, + .stop = disk_seqf_stop, + .show = diskstats_show +}; + +static int diskstats_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &diskstats_op); +} + +static const struct file_operations proc_diskstats_operations = { + .open = diskstats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static int __init proc_genhd_init(void) +{ + proc_create("diskstats", 0, NULL, &proc_diskstats_operations); + proc_create("partitions", 0, NULL, &proc_partitions_operations); + return 0; +} +module_init(proc_genhd_init); +#endif /* CONFIG_PROC_FS */ + +dev_t blk_lookup_devt(const char *name, int partno) +{ + dev_t devt = MKDEV(0, 0); + struct class_dev_iter iter; + struct device *dev; + + class_dev_iter_init(&iter, &block_class, NULL, &disk_type); + while ((dev = class_dev_iter_next(&iter))) { + struct gendisk *disk = dev_to_disk(dev); + struct hd_struct *part; + + if (strcmp(dev_name(dev), name)) + continue; + + if (partno < disk->minors) { + /* We need to return the right devno, even + * if the partition doesn't exist yet. + */ + devt = MKDEV(MAJOR(dev->devt), + MINOR(dev->devt) + partno); + break; + } + part = disk_get_part(disk, partno); + if (part) { + devt = part_devt(part); + disk_put_part(part); + break; + } + disk_put_part(part); + } + class_dev_iter_exit(&iter); + return devt; +} +EXPORT_SYMBOL(blk_lookup_devt); + +struct gendisk *alloc_disk(int minors) +{ + return alloc_disk_node(minors, NUMA_NO_NODE); +} +EXPORT_SYMBOL(alloc_disk); + +struct gendisk *alloc_disk_node(int minors, int node_id) +{ + struct gendisk *disk; + + disk = kzalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id); + if (disk) { + if (!init_part_stats(&disk->part0)) { + kfree(disk); + return NULL; + } + disk->node_id = node_id; + if (disk_expand_part_tbl(disk, 0)) { + free_part_stats(&disk->part0); + kfree(disk); + return NULL; + } + disk->part_tbl->part[0] = &disk->part0; + + /* + * set_capacity() and get_capacity() currently don't use + * seqcounter to read/update the part0->nr_sects. Still init + * the counter as we can read the sectors in IO submission + * patch using seqence counters. + * + * TODO: Ideally set_capacity() and get_capacity() should be + * converted to make use of bd_mutex and sequence counters. + */ + seqcount_init(&disk->part0.nr_sects_seq); + hd_ref_init(&disk->part0); + + disk->minors = minors; + rand_initialize_disk(disk); + disk_to_dev(disk)->class = &block_class; + disk_to_dev(disk)->type = &disk_type; + device_initialize(disk_to_dev(disk)); + } + return disk; +} +EXPORT_SYMBOL(alloc_disk_node); + +struct kobject *get_disk(struct gendisk *disk) +{ + struct module *owner; + struct kobject *kobj; + + if (!disk->fops) + return NULL; + owner = disk->fops->owner; + if (owner && !try_module_get(owner)) + return NULL; + kobj = kobject_get(&disk_to_dev(disk)->kobj); + if (kobj == NULL) { + module_put(owner); + return NULL; + } + return kobj; + +} + +EXPORT_SYMBOL(get_disk); + +void put_disk(struct gendisk *disk) +{ + if (disk) + kobject_put(&disk_to_dev(disk)->kobj); +} + +EXPORT_SYMBOL(put_disk); + +static void set_disk_ro_uevent(struct gendisk *gd, int ro) +{ + char event[] = "DISK_RO=1"; + char *envp[] = { event, NULL }; + + if (!ro) + event[8] = '0'; + kobject_uevent_env(&disk_to_dev(gd)->kobj, KOBJ_CHANGE, envp); +} + +void set_device_ro(struct block_device *bdev, int flag) +{ + bdev->bd_part->policy = flag; +} + +EXPORT_SYMBOL(set_device_ro); + +void set_disk_ro(struct gendisk *disk, int flag) +{ + struct disk_part_iter piter; + struct hd_struct *part; + + if (disk->part0.policy != flag) { + set_disk_ro_uevent(disk, flag); + disk->part0.policy = flag; + } + + disk_part_iter_init(&piter, disk, DISK_PITER_INCL_EMPTY); + while ((part = disk_part_iter_next(&piter))) + part->policy = flag; + disk_part_iter_exit(&piter); +} + +EXPORT_SYMBOL(set_disk_ro); + +int bdev_read_only(struct block_device *bdev) +{ + if (!bdev) + return 0; + return bdev->bd_part->policy; +} + +EXPORT_SYMBOL(bdev_read_only); + +int invalidate_partition(struct gendisk *disk, int partno) +{ + int res = 0; + struct block_device *bdev = bdget_disk(disk, partno); + if (bdev) { + fsync_bdev(bdev); + res = __invalidate_device(bdev, true); + bdput(bdev); + } + return res; +} + +EXPORT_SYMBOL(invalidate_partition); + +dev_t blk_lookup_fs_info(struct fs_info *seek) +{ + dev_t devt = MKDEV(0, 0); + struct class_dev_iter iter; + struct device *dev; + int best_score = 0; + + class_dev_iter_init(&iter, &block_class, NULL, &disk_type); + while (best_score < 3 && (dev = class_dev_iter_next(&iter))) { + struct gendisk *disk = dev_to_disk(dev); + struct disk_part_iter piter; + struct hd_struct *part; + + disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); + + while (best_score < 3 && (part = disk_part_iter_next(&piter))) { + int score = part_matches_fs_info(part, seek); + if (score > best_score) { + devt = part_devt(part); + best_score = score; + } + } + disk_part_iter_exit(&piter); + } + class_dev_iter_exit(&iter); + return devt; +} + +/* Caller uses NULL, key to start. For each match found, we return a bdev on + * which we have done blkdev_get, and we do the blkdev_put on block devices + * that are passed to us. When no more matches are found, we return NULL. + */ +struct block_device *next_bdev_of_type(struct block_device *last, + const char *key) +{ + dev_t devt = MKDEV(0, 0); + struct class_dev_iter iter; + struct device *dev; + struct block_device *next = NULL, *bdev; + int got_last = 0; + + if (!key) + goto out; + + class_dev_iter_init(&iter, &block_class, NULL, &disk_type); + while (!devt && (dev = class_dev_iter_next(&iter))) { + struct gendisk *disk = dev_to_disk(dev); + struct disk_part_iter piter; + struct hd_struct *part; + + disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); + + while ((part = disk_part_iter_next(&piter))) { + bdev = bdget(part_devt(part)); + if (last && !got_last) { + if (last == bdev) + got_last = 1; + continue; + } + + if (blkdev_get(bdev, FMODE_READ, 0)) + continue; + + if (bdev_matches_key(bdev, key)) { + next = bdev; + break; + } + + blkdev_put(bdev, FMODE_READ); + } + disk_part_iter_exit(&piter); + } + class_dev_iter_exit(&iter); +out: + if (last) + blkdev_put(last, FMODE_READ); + return next; +} + +/* + * Disk events - monitor disk events like media change and eject request. + */ +struct disk_events { + struct list_head node; /* all disk_event's */ + struct gendisk *disk; /* the associated disk */ + spinlock_t lock; + + struct mutex block_mutex; /* protects blocking */ + int block; /* event blocking depth */ + unsigned int pending; /* events already sent out */ + unsigned int clearing; /* events being cleared */ + + long poll_msecs; /* interval, -1 for default */ + struct delayed_work dwork; +}; + +static const char *disk_events_strs[] = { + [ilog2(DISK_EVENT_MEDIA_CHANGE)] = "media_change", + [ilog2(DISK_EVENT_EJECT_REQUEST)] = "eject_request", +}; + +static char *disk_uevents[] = { + [ilog2(DISK_EVENT_MEDIA_CHANGE)] = "DISK_MEDIA_CHANGE=1", + [ilog2(DISK_EVENT_EJECT_REQUEST)] = "DISK_EJECT_REQUEST=1", +}; + +/* list of all disk_events */ +static DEFINE_MUTEX(disk_events_mutex); +static LIST_HEAD(disk_events); + +/* disable in-kernel polling by default */ +static unsigned long disk_events_dfl_poll_msecs = 0; + +static unsigned long disk_events_poll_jiffies(struct gendisk *disk) +{ + struct disk_events *ev = disk->ev; + long intv_msecs = 0; + + /* + * If device-specific poll interval is set, always use it. If + * the default is being used, poll iff there are events which + * can't be monitored asynchronously. + */ + if (ev->poll_msecs >= 0) + intv_msecs = ev->poll_msecs; + else if (disk->events & ~disk->async_events) + intv_msecs = disk_events_dfl_poll_msecs; + + return msecs_to_jiffies(intv_msecs); +} + +/** + * disk_block_events - block and flush disk event checking + * @disk: disk to block events for + * + * On return from this function, it is guaranteed that event checking + * isn't in progress and won't happen until unblocked by + * disk_unblock_events(). Events blocking is counted and the actual + * unblocking happens after the matching number of unblocks are done. + * + * Note that this intentionally does not block event checking from + * disk_clear_events(). + * + * CONTEXT: + * Might sleep. + */ +void disk_block_events(struct gendisk *disk) +{ + struct disk_events *ev = disk->ev; + unsigned long flags; + bool cancel; + + if (!ev) + return; + + /* + * Outer mutex ensures that the first blocker completes canceling + * the event work before further blockers are allowed to finish. + */ + mutex_lock(&ev->block_mutex); + + spin_lock_irqsave(&ev->lock, flags); + cancel = !ev->block++; + spin_unlock_irqrestore(&ev->lock, flags); + + if (cancel) + cancel_delayed_work_sync(&disk->ev->dwork); + + mutex_unlock(&ev->block_mutex); +} + +static void __disk_unblock_events(struct gendisk *disk, bool check_now) +{ + struct disk_events *ev = disk->ev; + unsigned long intv; + unsigned long flags; + + spin_lock_irqsave(&ev->lock, flags); + + if (WARN_ON_ONCE(ev->block <= 0)) + goto out_unlock; + + if (--ev->block) + goto out_unlock; + + /* + * Not exactly a latency critical operation, set poll timer + * slack to 25% and kick event check. + */ + intv = disk_events_poll_jiffies(disk); + set_timer_slack(&ev->dwork.timer, intv / 4); + if (check_now) + queue_delayed_work(system_freezable_power_efficient_wq, + &ev->dwork, 0); + else if (intv) + queue_delayed_work(system_freezable_power_efficient_wq, + &ev->dwork, intv); +out_unlock: + spin_unlock_irqrestore(&ev->lock, flags); +} + +/** + * disk_unblock_events - unblock disk event checking + * @disk: disk to unblock events for + * + * Undo disk_block_events(). When the block count reaches zero, it + * starts events polling if configured. + * + * CONTEXT: + * Don't care. Safe to call from irq context. + */ +void disk_unblock_events(struct gendisk *disk) +{ + if (disk->ev) + __disk_unblock_events(disk, false); +} + +/** + * disk_flush_events - schedule immediate event checking and flushing + * @disk: disk to check and flush events for + * @mask: events to flush + * + * Schedule immediate event checking on @disk if not blocked. Events in + * @mask are scheduled to be cleared from the driver. Note that this + * doesn't clear the events from @disk->ev. + * + * CONTEXT: + * If @mask is non-zero must be called with bdev->bd_mutex held. + */ +void disk_flush_events(struct gendisk *disk, unsigned int mask) +{ + struct disk_events *ev = disk->ev; + + if (!ev) + return; + + spin_lock_irq(&ev->lock); + ev->clearing |= mask; + if (!ev->block) + mod_delayed_work(system_freezable_power_efficient_wq, + &ev->dwork, 0); + spin_unlock_irq(&ev->lock); +} + +/** + * disk_clear_events - synchronously check, clear and return pending events + * @disk: disk to fetch and clear events from + * @mask: mask of events to be fetched and cleared + * + * Disk events are synchronously checked and pending events in @mask + * are cleared and returned. This ignores the block count. + * + * CONTEXT: + * Might sleep. + */ +unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask) +{ + const struct block_device_operations *bdops = disk->fops; + struct disk_events *ev = disk->ev; + unsigned int pending; + unsigned int clearing = mask; + + if (!ev) { + /* for drivers still using the old ->media_changed method */ + if ((mask & DISK_EVENT_MEDIA_CHANGE) && + bdops->media_changed && bdops->media_changed(disk)) + return DISK_EVENT_MEDIA_CHANGE; + return 0; + } + + disk_block_events(disk); + + /* + * store the union of mask and ev->clearing on the stack so that the + * race with disk_flush_events does not cause ambiguity (ev->clearing + * can still be modified even if events are blocked). + */ + spin_lock_irq(&ev->lock); + clearing |= ev->clearing; + ev->clearing = 0; + spin_unlock_irq(&ev->lock); + + disk_check_events(ev, &clearing); + /* + * if ev->clearing is not 0, the disk_flush_events got called in the + * middle of this function, so we want to run the workfn without delay. + */ + __disk_unblock_events(disk, ev->clearing ? true : false); + + /* then, fetch and clear pending events */ + spin_lock_irq(&ev->lock); + pending = ev->pending & mask; + ev->pending &= ~mask; + spin_unlock_irq(&ev->lock); + WARN_ON_ONCE(clearing & mask); + + return pending; +} + +/* + * Separate this part out so that a different pointer for clearing_ptr can be + * passed in for disk_clear_events. + */ +static void disk_events_workfn(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct disk_events *ev = container_of(dwork, struct disk_events, dwork); + + disk_check_events(ev, &ev->clearing); +} + +static void disk_check_events(struct disk_events *ev, + unsigned int *clearing_ptr) +{ + struct gendisk *disk = ev->disk; + char *envp[ARRAY_SIZE(disk_uevents) + 1] = { }; + unsigned int clearing = *clearing_ptr; + unsigned int events; + unsigned long intv; + int nr_events = 0, i; + + /* check events */ + events = disk->fops->check_events(disk, clearing); + + /* accumulate pending events and schedule next poll if necessary */ + spin_lock_irq(&ev->lock); + + events &= ~ev->pending; + ev->pending |= events; + *clearing_ptr &= ~clearing; + + intv = disk_events_poll_jiffies(disk); + if (!ev->block && intv) + queue_delayed_work(system_freezable_power_efficient_wq, + &ev->dwork, intv); + + spin_unlock_irq(&ev->lock); + + /* + * Tell userland about new events. Only the events listed in + * @disk->events are reported. Unlisted events are processed the + * same internally but never get reported to userland. + */ + for (i = 0; i < ARRAY_SIZE(disk_uevents); i++) + if (events & disk->events & (1 << i)) + envp[nr_events++] = disk_uevents[i]; + + if (nr_events) + kobject_uevent_env(&disk_to_dev(disk)->kobj, KOBJ_CHANGE, envp); +} + +/* + * A disk events enabled device has the following sysfs nodes under + * its /sys/block/X/ directory. + * + * events : list of all supported events + * events_async : list of events which can be detected w/o polling + * events_poll_msecs : polling interval, 0: disable, -1: system default + */ +static ssize_t __disk_events_show(unsigned int events, char *buf) +{ + const char *delim = ""; + ssize_t pos = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(disk_events_strs); i++) + if (events & (1 << i)) { + pos += sprintf(buf + pos, "%s%s", + delim, disk_events_strs[i]); + delim = " "; + } + if (pos) + pos += sprintf(buf + pos, "\n"); + return pos; +} + +static ssize_t disk_events_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct gendisk *disk = dev_to_disk(dev); + + return __disk_events_show(disk->events, buf); +} + +static ssize_t disk_events_async_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct gendisk *disk = dev_to_disk(dev); + + return __disk_events_show(disk->async_events, buf); +} + +static ssize_t disk_events_poll_msecs_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct gendisk *disk = dev_to_disk(dev); + + return sprintf(buf, "%ld\n", disk->ev->poll_msecs); +} + +static ssize_t disk_events_poll_msecs_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct gendisk *disk = dev_to_disk(dev); + long intv; + + if (!count || !sscanf(buf, "%ld", &intv)) + return -EINVAL; + + if (intv < 0 && intv != -1) + return -EINVAL; + + disk_block_events(disk); + disk->ev->poll_msecs = intv; + __disk_unblock_events(disk, true); + + return count; +} + +static const DEVICE_ATTR(events, S_IRUGO, disk_events_show, NULL); +static const DEVICE_ATTR(events_async, S_IRUGO, disk_events_async_show, NULL); +static const DEVICE_ATTR(events_poll_msecs, S_IRUGO|S_IWUSR, + disk_events_poll_msecs_show, + disk_events_poll_msecs_store); + +static const struct attribute *disk_events_attrs[] = { + &dev_attr_events.attr, + &dev_attr_events_async.attr, + &dev_attr_events_poll_msecs.attr, + NULL, +}; + +/* + * The default polling interval can be specified by the kernel + * parameter block.events_dfl_poll_msecs which defaults to 0 + * (disable). This can also be modified runtime by writing to + * /sys/module/block/events_dfl_poll_msecs. + */ +static int disk_events_set_dfl_poll_msecs(const char *val, + const struct kernel_param *kp) +{ + struct disk_events *ev; + int ret; + + ret = param_set_ulong(val, kp); + if (ret < 0) + return ret; + + mutex_lock(&disk_events_mutex); + + list_for_each_entry(ev, &disk_events, node) + disk_flush_events(ev->disk, 0); + + mutex_unlock(&disk_events_mutex); + + return 0; +} + +static const struct kernel_param_ops disk_events_dfl_poll_msecs_param_ops = { + .set = disk_events_set_dfl_poll_msecs, + .get = param_get_ulong, +}; + +#undef MODULE_PARAM_PREFIX +#define MODULE_PARAM_PREFIX "block." + +module_param_cb(events_dfl_poll_msecs, &disk_events_dfl_poll_msecs_param_ops, + &disk_events_dfl_poll_msecs, 0644); + +/* + * disk_{alloc|add|del|release}_events - initialize and destroy disk_events. + */ +static void disk_alloc_events(struct gendisk *disk) +{ + struct disk_events *ev; + + if (!disk->fops->check_events) + return; + + ev = kzalloc(sizeof(*ev), GFP_KERNEL); + if (!ev) { + pr_warn("%s: failed to initialize events\n", disk->disk_name); + return; + } + + INIT_LIST_HEAD(&ev->node); + ev->disk = disk; + spin_lock_init(&ev->lock); + mutex_init(&ev->block_mutex); + ev->block = 1; + ev->poll_msecs = -1; + INIT_DELAYED_WORK(&ev->dwork, disk_events_workfn); + + disk->ev = ev; +} + +static void disk_add_events(struct gendisk *disk) +{ + if (!disk->ev) + return; + + /* FIXME: error handling */ + if (sysfs_create_files(&disk_to_dev(disk)->kobj, disk_events_attrs) < 0) + pr_warn("%s: failed to create sysfs files for events\n", + disk->disk_name); + + mutex_lock(&disk_events_mutex); + list_add_tail(&disk->ev->node, &disk_events); + mutex_unlock(&disk_events_mutex); + + /* + * Block count is initialized to 1 and the following initial + * unblock kicks it into action. + */ + __disk_unblock_events(disk, true); +} + +static void disk_del_events(struct gendisk *disk) +{ + if (!disk->ev) + return; + + disk_block_events(disk); + + mutex_lock(&disk_events_mutex); + list_del_init(&disk->ev->node); + mutex_unlock(&disk_events_mutex); + + sysfs_remove_files(&disk_to_dev(disk)->kobj, disk_events_attrs); +} + +static void disk_release_events(struct gendisk *disk) +{ + /* the block count should be 1 from disk_del_events() */ + WARN_ON_ONCE(disk->ev && disk->ev->block != 1); + kfree(disk->ev); +} diff --git a/block/ioctl.c b/block/ioctl.c new file mode 100644 index 000000000..7d8befde2 --- /dev/null +++ b/block/ioctl.c @@ -0,0 +1,430 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user *arg) +{ + struct block_device *bdevp; + struct gendisk *disk; + struct hd_struct *part, *lpart; + struct blkpg_ioctl_arg a; + struct blkpg_partition p; + struct disk_part_iter piter; + long long start, length; + int partno; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + if (copy_from_user(&a, arg, sizeof(struct blkpg_ioctl_arg))) + return -EFAULT; + if (copy_from_user(&p, a.data, sizeof(struct blkpg_partition))) + return -EFAULT; + disk = bdev->bd_disk; + if (bdev != bdev->bd_contains) + return -EINVAL; + partno = p.pno; + if (partno <= 0) + return -EINVAL; + switch (a.op) { + case BLKPG_ADD_PARTITION: + start = p.start >> 9; + length = p.length >> 9; + /* check for fit in a hd_struct */ + if (sizeof(sector_t) == sizeof(long) && + sizeof(long long) > sizeof(long)) { + long pstart = start, plength = length; + if (pstart != start || plength != length + || pstart < 0 || plength < 0 || partno > 65535) + return -EINVAL; + } + + mutex_lock(&bdev->bd_mutex); + + /* overlap? */ + disk_part_iter_init(&piter, disk, + DISK_PITER_INCL_EMPTY); + while ((part = disk_part_iter_next(&piter))) { + if (!(start + length <= part->start_sect || + start >= part->start_sect + part->nr_sects)) { + disk_part_iter_exit(&piter); + mutex_unlock(&bdev->bd_mutex); + return -EBUSY; + } + } + disk_part_iter_exit(&piter); + + /* all seems OK */ + part = add_partition(disk, partno, start, length, + ADDPART_FLAG_NONE, NULL); + mutex_unlock(&bdev->bd_mutex); + return PTR_ERR_OR_ZERO(part); + case BLKPG_DEL_PARTITION: + part = disk_get_part(disk, partno); + if (!part) + return -ENXIO; + + bdevp = bdget(part_devt(part)); + disk_put_part(part); + if (!bdevp) + return -ENOMEM; + + mutex_lock(&bdevp->bd_mutex); + if (bdevp->bd_openers) { + mutex_unlock(&bdevp->bd_mutex); + bdput(bdevp); + return -EBUSY; + } + /* all seems OK */ + fsync_bdev(bdevp); + invalidate_bdev(bdevp); + + mutex_lock_nested(&bdev->bd_mutex, 1); + delete_partition(disk, partno); + mutex_unlock(&bdev->bd_mutex); + mutex_unlock(&bdevp->bd_mutex); + bdput(bdevp); + + return 0; + case BLKPG_RESIZE_PARTITION: + start = p.start >> 9; + /* new length of partition in bytes */ + length = p.length >> 9; + /* check for fit in a hd_struct */ + if (sizeof(sector_t) == sizeof(long) && + sizeof(long long) > sizeof(long)) { + long pstart = start, plength = length; + if (pstart != start || plength != length + || pstart < 0 || plength < 0) + return -EINVAL; + } + part = disk_get_part(disk, partno); + if (!part) + return -ENXIO; + bdevp = bdget(part_devt(part)); + if (!bdevp) { + disk_put_part(part); + return -ENOMEM; + } + mutex_lock(&bdevp->bd_mutex); + mutex_lock_nested(&bdev->bd_mutex, 1); + if (start != part->start_sect) { + mutex_unlock(&bdevp->bd_mutex); + mutex_unlock(&bdev->bd_mutex); + bdput(bdevp); + disk_put_part(part); + return -EINVAL; + } + /* overlap? */ + disk_part_iter_init(&piter, disk, + DISK_PITER_INCL_EMPTY); + while ((lpart = disk_part_iter_next(&piter))) { + if (lpart->partno != partno && + !(start + length <= lpart->start_sect || + start >= lpart->start_sect + lpart->nr_sects) + ) { + disk_part_iter_exit(&piter); + mutex_unlock(&bdevp->bd_mutex); + mutex_unlock(&bdev->bd_mutex); + bdput(bdevp); + disk_put_part(part); + return -EBUSY; + } + } + disk_part_iter_exit(&piter); + part_nr_sects_write(part, (sector_t)length); + i_size_write(bdevp->bd_inode, p.length); + mutex_unlock(&bdevp->bd_mutex); + mutex_unlock(&bdev->bd_mutex); + bdput(bdevp); + disk_put_part(part); + return 0; + default: + return -EINVAL; + } +} + +static int blkdev_reread_part(struct block_device *bdev) +{ + struct gendisk *disk = bdev->bd_disk; + int res; + + if (!disk_part_scan_enabled(disk) || bdev != bdev->bd_contains) + return -EINVAL; + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + if (!mutex_trylock(&bdev->bd_mutex)) + return -EBUSY; + res = rescan_partitions(disk, bdev); + mutex_unlock(&bdev->bd_mutex); + return res; +} + +static int blk_ioctl_discard(struct block_device *bdev, uint64_t start, + uint64_t len, int secure) +{ + unsigned long flags = 0; + + if (start & 511) + return -EINVAL; + if (len & 511) + return -EINVAL; + start >>= 9; + len >>= 9; + + if (start + len > (i_size_read(bdev->bd_inode) >> 9)) + return -EINVAL; + if (secure) + flags |= BLKDEV_DISCARD_SECURE; + return blkdev_issue_discard(bdev, start, len, GFP_KERNEL, flags); +} + +static int blk_ioctl_zeroout(struct block_device *bdev, uint64_t start, + uint64_t len) +{ + if (start & 511) + return -EINVAL; + if (len & 511) + return -EINVAL; + start >>= 9; + len >>= 9; + + if (start + len > (i_size_read(bdev->bd_inode) >> 9)) + return -EINVAL; + + return blkdev_issue_zeroout(bdev, start, len, GFP_KERNEL, false); +} + +static int put_ushort(unsigned long arg, unsigned short val) +{ + return put_user(val, (unsigned short __user *)arg); +} + +static int put_int(unsigned long arg, int val) +{ + return put_user(val, (int __user *)arg); +} + +static int put_uint(unsigned long arg, unsigned int val) +{ + return put_user(val, (unsigned int __user *)arg); +} + +static int put_long(unsigned long arg, long val) +{ + return put_user(val, (long __user *)arg); +} + +static int put_ulong(unsigned long arg, unsigned long val) +{ + return put_user(val, (unsigned long __user *)arg); +} + +static int put_u64(unsigned long arg, u64 val) +{ + return put_user(val, (u64 __user *)arg); +} + +int __blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode, + unsigned cmd, unsigned long arg) +{ + struct gendisk *disk = bdev->bd_disk; + + if (disk->fops->ioctl) + return disk->fops->ioctl(bdev, mode, cmd, arg); + + return -ENOTTY; +} +/* + * For the record: _GPL here is only because somebody decided to slap it + * on the previous export. Sheer idiocy, since it wasn't copyrightable + * at all and could be open-coded without any exports by anybody who cares. + */ +EXPORT_SYMBOL_GPL(__blkdev_driver_ioctl); + +/* + * Is it an unrecognized ioctl? The correct returns are either + * ENOTTY (final) or ENOIOCTLCMD ("I don't know this one, try a + * fallback"). ENOIOCTLCMD gets turned into ENOTTY by the ioctl + * code before returning. + * + * Confused drivers sometimes return EINVAL, which is wrong. It + * means "I understood the ioctl command, but the parameters to + * it were wrong". + * + * We should aim to just fix the broken drivers, the EINVAL case + * should go away. + */ +static inline int is_unrecognized_ioctl(int ret) +{ + return ret == -EINVAL || + ret == -ENOTTY || + ret == -ENOIOCTLCMD; +} + +/* + * always keep this in sync with compat_blkdev_ioctl() + */ +int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, + unsigned long arg) +{ + struct gendisk *disk = bdev->bd_disk; + struct backing_dev_info *bdi; + loff_t size; + int ret, n; + unsigned int max_sectors; + + switch(cmd) { + case BLKFLSBUF: + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg); + if (!is_unrecognized_ioctl(ret)) + return ret; + + fsync_bdev(bdev); + invalidate_bdev(bdev); + return 0; + + case BLKROSET: + ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg); + if (!is_unrecognized_ioctl(ret)) + return ret; + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + if (get_user(n, (int __user *)(arg))) + return -EFAULT; + set_device_ro(bdev, n); + return 0; + + case BLKDISCARD: + case BLKSECDISCARD: { + uint64_t range[2]; + + if (!(mode & FMODE_WRITE)) + return -EBADF; + + if (copy_from_user(range, (void __user *)arg, sizeof(range))) + return -EFAULT; + + return blk_ioctl_discard(bdev, range[0], range[1], + cmd == BLKSECDISCARD); + } + case BLKZEROOUT: { + uint64_t range[2]; + + if (!(mode & FMODE_WRITE)) + return -EBADF; + + if (copy_from_user(range, (void __user *)arg, sizeof(range))) + return -EFAULT; + + return blk_ioctl_zeroout(bdev, range[0], range[1]); + } + + case HDIO_GETGEO: { + struct hd_geometry geo; + + if (!arg) + return -EINVAL; + if (!disk->fops->getgeo) + return -ENOTTY; + + /* + * We need to set the startsect first, the driver may + * want to override it. + */ + memset(&geo, 0, sizeof(geo)); + geo.start = get_start_sect(bdev); + ret = disk->fops->getgeo(bdev, &geo); + if (ret) + return ret; + if (copy_to_user((struct hd_geometry __user *)arg, &geo, + sizeof(geo))) + return -EFAULT; + return 0; + } + case BLKRAGET: + case BLKFRAGET: + if (!arg) + return -EINVAL; + bdi = blk_get_backing_dev_info(bdev); + return put_long(arg, (bdi->ra_pages * PAGE_CACHE_SIZE) / 512); + case BLKROGET: + return put_int(arg, bdev_read_only(bdev) != 0); + case BLKBSZGET: /* get block device soft block size (cf. BLKSSZGET) */ + return put_int(arg, block_size(bdev)); + case BLKSSZGET: /* get block device logical block size */ + return put_int(arg, bdev_logical_block_size(bdev)); + case BLKPBSZGET: /* get block device physical block size */ + return put_uint(arg, bdev_physical_block_size(bdev)); + case BLKIOMIN: + return put_uint(arg, bdev_io_min(bdev)); + case BLKIOOPT: + return put_uint(arg, bdev_io_opt(bdev)); + case BLKALIGNOFF: + return put_int(arg, bdev_alignment_offset(bdev)); + case BLKDISCARDZEROES: + return put_uint(arg, bdev_discard_zeroes_data(bdev)); + case BLKSECTGET: + max_sectors = min_t(unsigned int, USHRT_MAX, + queue_max_sectors(bdev_get_queue(bdev))); + return put_ushort(arg, max_sectors); + case BLKROTATIONAL: + return put_ushort(arg, !blk_queue_nonrot(bdev_get_queue(bdev))); + case BLKRASET: + case BLKFRASET: + if(!capable(CAP_SYS_ADMIN)) + return -EACCES; + bdi = blk_get_backing_dev_info(bdev); + bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE; + return 0; + case BLKBSZSET: + /* set the logical block size */ + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + if (!arg) + return -EINVAL; + if (get_user(n, (int __user *) arg)) + return -EFAULT; + if (!(mode & FMODE_EXCL)) { + bdgrab(bdev); + if (blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0) + return -EBUSY; + } + ret = set_blocksize(bdev, n); + if (!(mode & FMODE_EXCL)) + blkdev_put(bdev, mode | FMODE_EXCL); + return ret; + case BLKPG: + ret = blkpg_ioctl(bdev, (struct blkpg_ioctl_arg __user *) arg); + break; + case BLKRRPART: + ret = blkdev_reread_part(bdev); + break; + case BLKGETSIZE: + size = i_size_read(bdev->bd_inode); + if ((size >> 9) > ~0UL) + return -EFBIG; + return put_ulong(arg, size >> 9); + case BLKGETSIZE64: + return put_u64(arg, i_size_read(bdev->bd_inode)); + case BLKTRACESTART: + case BLKTRACESTOP: + case BLKTRACESETUP: + case BLKTRACETEARDOWN: + ret = blk_trace_ioctl(bdev, cmd, (char __user *) arg); + break; + default: + ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg); + } + return ret; +} +EXPORT_SYMBOL_GPL(blkdev_ioctl); diff --git a/block/ioprio.c b/block/ioprio.c new file mode 100644 index 000000000..31666c92b --- /dev/null +++ b/block/ioprio.c @@ -0,0 +1,243 @@ +/* + * fs/ioprio.c + * + * Copyright (C) 2004 Jens Axboe + * + * Helper functions for setting/querying io priorities of processes. The + * system calls closely mimmick getpriority/setpriority, see the man page for + * those. The prio argument is a composite of prio class and prio data, where + * the data argument has meaning within that class. The standard scheduling + * classes have 8 distinct prio levels, with 0 being the highest prio and 7 + * being the lowest. + * + * IOW, setting BE scheduling class with prio 2 is done ala: + * + * unsigned int prio = (IOPRIO_CLASS_BE << IOPRIO_CLASS_SHIFT) | 2; + * + * ioprio_set(PRIO_PROCESS, pid, prio); + * + * See also Documentation/block/ioprio.txt + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +int set_task_ioprio(struct task_struct *task, int ioprio) +{ + int err; + struct io_context *ioc; + const struct cred *cred = current_cred(), *tcred; + + rcu_read_lock(); + tcred = __task_cred(task); + if (!uid_eq(tcred->uid, cred->euid) && + !uid_eq(tcred->uid, cred->uid) && !capable(CAP_SYS_NICE)) { + rcu_read_unlock(); + return -EPERM; + } + rcu_read_unlock(); + + err = security_task_setioprio(task, ioprio); + if (err) + return err; + + ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE); + if (ioc) { + ioc->ioprio = ioprio; + put_io_context(ioc); + } + + return err; +} +EXPORT_SYMBOL_GPL(set_task_ioprio); + +SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio) +{ + int class = IOPRIO_PRIO_CLASS(ioprio); + int data = IOPRIO_PRIO_DATA(ioprio); + struct task_struct *p, *g; + struct user_struct *user; + struct pid *pgrp; + kuid_t uid; + int ret; + + switch (class) { + case IOPRIO_CLASS_RT: + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + /* fall through, rt has prio field too */ + case IOPRIO_CLASS_BE: + if (data >= IOPRIO_BE_NR || data < 0) + return -EINVAL; + + break; + case IOPRIO_CLASS_IDLE: + break; + case IOPRIO_CLASS_NONE: + if (data) + return -EINVAL; + break; + default: + return -EINVAL; + } + + ret = -ESRCH; + rcu_read_lock(); + switch (which) { + case IOPRIO_WHO_PROCESS: + if (!who) + p = current; + else + p = find_task_by_vpid(who); + if (p) + ret = set_task_ioprio(p, ioprio); + break; + case IOPRIO_WHO_PGRP: + if (!who) + pgrp = task_pgrp(current); + else + pgrp = find_vpid(who); + do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { + ret = set_task_ioprio(p, ioprio); + if (ret) + break; + } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); + break; + case IOPRIO_WHO_USER: + uid = make_kuid(current_user_ns(), who); + if (!uid_valid(uid)) + break; + if (!who) + user = current_user(); + else + user = find_user(uid); + + if (!user) + break; + + do_each_thread(g, p) { + if (!uid_eq(task_uid(p), uid)) + continue; + ret = set_task_ioprio(p, ioprio); + if (ret) + goto free_uid; + } while_each_thread(g, p); +free_uid: + if (who) + free_uid(user); + break; + default: + ret = -EINVAL; + } + + rcu_read_unlock(); + return ret; +} + +static int get_task_ioprio(struct task_struct *p) +{ + int ret; + + ret = security_task_getioprio(p); + if (ret) + goto out; + ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM); + if (p->io_context) + ret = p->io_context->ioprio; +out: + return ret; +} + +int ioprio_best(unsigned short aprio, unsigned short bprio) +{ + unsigned short aclass; + unsigned short bclass; + + if (!ioprio_valid(aprio)) + aprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM); + if (!ioprio_valid(bprio)) + bprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM); + + aclass = IOPRIO_PRIO_CLASS(aprio); + bclass = IOPRIO_PRIO_CLASS(bprio); + if (aclass == bclass) + return min(aprio, bprio); + if (aclass > bclass) + return bprio; + else + return aprio; +} + +SYSCALL_DEFINE2(ioprio_get, int, which, int, who) +{ + struct task_struct *g, *p; + struct user_struct *user; + struct pid *pgrp; + kuid_t uid; + int ret = -ESRCH; + int tmpio; + + rcu_read_lock(); + switch (which) { + case IOPRIO_WHO_PROCESS: + if (!who) + p = current; + else + p = find_task_by_vpid(who); + if (p) + ret = get_task_ioprio(p); + break; + case IOPRIO_WHO_PGRP: + if (!who) + pgrp = task_pgrp(current); + else + pgrp = find_vpid(who); + do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { + tmpio = get_task_ioprio(p); + if (tmpio < 0) + continue; + if (ret == -ESRCH) + ret = tmpio; + else + ret = ioprio_best(ret, tmpio); + } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); + break; + case IOPRIO_WHO_USER: + uid = make_kuid(current_user_ns(), who); + if (!who) + user = current_user(); + else + user = find_user(uid); + + if (!user) + break; + + do_each_thread(g, p) { + if (!uid_eq(task_uid(p), user->uid)) + continue; + tmpio = get_task_ioprio(p); + if (tmpio < 0) + continue; + if (ret == -ESRCH) + ret = tmpio; + else + ret = ioprio_best(ret, tmpio); + } while_each_thread(g, p); + + if (who) + free_uid(user); + break; + default: + ret = -EINVAL; + } + + rcu_read_unlock(); + return ret; +} diff --git a/block/noop-iosched.c b/block/noop-iosched.c new file mode 100644 index 000000000..3de89d469 --- /dev/null +++ b/block/noop-iosched.c @@ -0,0 +1,124 @@ +/* + * elevator noop + */ +#include +#include +#include +#include +#include +#include + +struct noop_data { + struct list_head queue; +}; + +static void noop_merged_requests(struct request_queue *q, struct request *rq, + struct request *next) +{ + list_del_init(&next->queuelist); +} + +static int noop_dispatch(struct request_queue *q, int force) +{ + struct noop_data *nd = q->elevator->elevator_data; + + if (!list_empty(&nd->queue)) { + struct request *rq; + rq = list_entry(nd->queue.next, struct request, queuelist); + list_del_init(&rq->queuelist); + elv_dispatch_sort(q, rq); + return 1; + } + return 0; +} + +static void noop_add_request(struct request_queue *q, struct request *rq) +{ + struct noop_data *nd = q->elevator->elevator_data; + + list_add_tail(&rq->queuelist, &nd->queue); +} + +static struct request * +noop_former_request(struct request_queue *q, struct request *rq) +{ + struct noop_data *nd = q->elevator->elevator_data; + + if (rq->queuelist.prev == &nd->queue) + return NULL; + return list_entry(rq->queuelist.prev, struct request, queuelist); +} + +static struct request * +noop_latter_request(struct request_queue *q, struct request *rq) +{ + struct noop_data *nd = q->elevator->elevator_data; + + if (rq->queuelist.next == &nd->queue) + return NULL; + return list_entry(rq->queuelist.next, struct request, queuelist); +} + +static int noop_init_queue(struct request_queue *q, struct elevator_type *e) +{ + struct noop_data *nd; + struct elevator_queue *eq; + + eq = elevator_alloc(q, e); + if (!eq) + return -ENOMEM; + + nd = kmalloc_node(sizeof(*nd), GFP_KERNEL, q->node); + if (!nd) { + kobject_put(&eq->kobj); + return -ENOMEM; + } + eq->elevator_data = nd; + + INIT_LIST_HEAD(&nd->queue); + + spin_lock_irq(q->queue_lock); + q->elevator = eq; + spin_unlock_irq(q->queue_lock); + return 0; +} + +static void noop_exit_queue(struct elevator_queue *e) +{ + struct noop_data *nd = e->elevator_data; + + BUG_ON(!list_empty(&nd->queue)); + kfree(nd); +} + +static struct elevator_type elevator_noop = { + .ops = { + .elevator_merge_req_fn = noop_merged_requests, + .elevator_dispatch_fn = noop_dispatch, + .elevator_add_req_fn = noop_add_request, + .elevator_former_req_fn = noop_former_request, + .elevator_latter_req_fn = noop_latter_request, + .elevator_init_fn = noop_init_queue, + .elevator_exit_fn = noop_exit_queue, + }, + .elevator_name = "noop", + .elevator_owner = THIS_MODULE, +}; + +static int __init noop_init(void) +{ + return elv_register(&elevator_noop); +} + +static void __exit noop_exit(void) +{ + elv_unregister(&elevator_noop); +} + +module_init(noop_init); +module_exit(noop_exit); + + +MODULE_AUTHOR("Jens Axboe"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("No-op IO scheduler"); diff --git a/block/partition-generic.c b/block/partition-generic.c new file mode 100644 index 000000000..0d9e5f97f --- /dev/null +++ b/block/partition-generic.c @@ -0,0 +1,571 @@ +/* + * Code extracted from drivers/block/genhd.c + * Copyright (C) 1991-1998 Linus Torvalds + * Re-organised Feb 1998 Russell King + * + * We now have independent partition support from the + * block drivers, which allows all the partition code to + * be grouped in one location, and it to be mostly self + * contained. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "partitions/check.h" + +#ifdef CONFIG_BLK_DEV_MD +extern void md_autodetect_dev(dev_t dev); +#endif + +/* + * disk_name() is used by partition check code and the genhd driver. + * It formats the devicename of the indicated disk into + * the supplied buffer (of size at least 32), and returns + * a pointer to that same buffer (for convenience). + */ + +char *disk_name(struct gendisk *hd, int partno, char *buf) +{ + if (!partno) + snprintf(buf, BDEVNAME_SIZE, "%s", hd->disk_name); + else if (isdigit(hd->disk_name[strlen(hd->disk_name)-1])) + snprintf(buf, BDEVNAME_SIZE, "%sp%d", hd->disk_name, partno); + else + snprintf(buf, BDEVNAME_SIZE, "%s%d", hd->disk_name, partno); + + return buf; +} + +const char *bdevname(struct block_device *bdev, char *buf) +{ + return disk_name(bdev->bd_disk, bdev->bd_part->partno, buf); +} + +EXPORT_SYMBOL(bdevname); + +/* + * There's very little reason to use this, you should really + * have a struct block_device just about everywhere and use + * bdevname() instead. + */ +const char *__bdevname(dev_t dev, char *buffer) +{ + scnprintf(buffer, BDEVNAME_SIZE, "unknown-block(%u,%u)", + MAJOR(dev), MINOR(dev)); + return buffer; +} + +EXPORT_SYMBOL(__bdevname); + +static ssize_t part_partition_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hd_struct *p = dev_to_part(dev); + + return sprintf(buf, "%d\n", p->partno); +} + +static ssize_t part_start_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hd_struct *p = dev_to_part(dev); + + return sprintf(buf, "%llu\n",(unsigned long long)p->start_sect); +} + +ssize_t part_size_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hd_struct *p = dev_to_part(dev); + return sprintf(buf, "%llu\n",(unsigned long long)part_nr_sects_read(p)); +} + +static ssize_t part_ro_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hd_struct *p = dev_to_part(dev); + return sprintf(buf, "%d\n", p->policy ? 1 : 0); +} + +static ssize_t part_alignment_offset_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hd_struct *p = dev_to_part(dev); + return sprintf(buf, "%llu\n", (unsigned long long)p->alignment_offset); +} + +static ssize_t part_discard_alignment_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hd_struct *p = dev_to_part(dev); + return sprintf(buf, "%u\n", p->discard_alignment); +} + +ssize_t part_stat_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hd_struct *p = dev_to_part(dev); + int cpu; + + cpu = part_stat_lock(); + part_round_stats(cpu, p); + part_stat_unlock(); + return sprintf(buf, + "%8lu %8lu %8llu %8u " + "%8lu %8lu %8llu %8u " + "%8u %8u %8u" + "\n", + part_stat_read(p, ios[READ]), + part_stat_read(p, merges[READ]), + (unsigned long long)part_stat_read(p, sectors[READ]), + jiffies_to_msecs(part_stat_read(p, ticks[READ])), + part_stat_read(p, ios[WRITE]), + part_stat_read(p, merges[WRITE]), + (unsigned long long)part_stat_read(p, sectors[WRITE]), + jiffies_to_msecs(part_stat_read(p, ticks[WRITE])), + part_in_flight(p), + jiffies_to_msecs(part_stat_read(p, io_ticks)), + jiffies_to_msecs(part_stat_read(p, time_in_queue))); +} + +ssize_t part_inflight_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hd_struct *p = dev_to_part(dev); + + return sprintf(buf, "%8u %8u\n", atomic_read(&p->in_flight[0]), + atomic_read(&p->in_flight[1])); +} + +#ifdef CONFIG_FAIL_MAKE_REQUEST +ssize_t part_fail_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hd_struct *p = dev_to_part(dev); + + return sprintf(buf, "%d\n", p->make_it_fail); +} + +ssize_t part_fail_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct hd_struct *p = dev_to_part(dev); + int i; + + if (count > 0 && sscanf(buf, "%d", &i) > 0) + p->make_it_fail = (i == 0) ? 0 : 1; + + return count; +} +#endif + +static DEVICE_ATTR(partition, S_IRUGO, part_partition_show, NULL); +static DEVICE_ATTR(start, S_IRUGO, part_start_show, NULL); +static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL); +static DEVICE_ATTR(ro, S_IRUGO, part_ro_show, NULL); +static DEVICE_ATTR(alignment_offset, S_IRUGO, part_alignment_offset_show, NULL); +static DEVICE_ATTR(discard_alignment, S_IRUGO, part_discard_alignment_show, + NULL); +static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL); +static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL); +#ifdef CONFIG_FAIL_MAKE_REQUEST +static struct device_attribute dev_attr_fail = + __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store); +#endif + +static struct attribute *part_attrs[] = { + &dev_attr_partition.attr, + &dev_attr_start.attr, + &dev_attr_size.attr, + &dev_attr_ro.attr, + &dev_attr_alignment_offset.attr, + &dev_attr_discard_alignment.attr, + &dev_attr_stat.attr, + &dev_attr_inflight.attr, +#ifdef CONFIG_FAIL_MAKE_REQUEST + &dev_attr_fail.attr, +#endif + NULL +}; + +static struct attribute_group part_attr_group = { + .attrs = part_attrs, +}; + +static const struct attribute_group *part_attr_groups[] = { + &part_attr_group, +#ifdef CONFIG_BLK_DEV_IO_TRACE + &blk_trace_attr_group, +#endif + NULL +}; + +static void part_release(struct device *dev) +{ + struct hd_struct *p = dev_to_part(dev); + blk_free_devt(dev->devt); + free_part_stats(p); + free_part_info(p); + kfree(p); +} + +struct device_type part_type = { + .name = "partition", + .groups = part_attr_groups, + .release = part_release, +}; + +static void delete_partition_rcu_cb(struct rcu_head *head) +{ + struct hd_struct *part = container_of(head, struct hd_struct, rcu_head); + + part->start_sect = 0; + part->nr_sects = 0; + part_stat_set_all(part, 0); + put_device(part_to_dev(part)); +} + +void __delete_partition(struct hd_struct *part) +{ + call_rcu(&part->rcu_head, delete_partition_rcu_cb); +} + +void delete_partition(struct gendisk *disk, int partno) +{ + struct disk_part_tbl *ptbl = disk->part_tbl; + struct hd_struct *part; + + if (partno >= ptbl->len) + return; + + part = ptbl->part[partno]; + if (!part) + return; + + rcu_assign_pointer(ptbl->part[partno], NULL); + rcu_assign_pointer(ptbl->last_lookup, NULL); + kobject_put(part->holder_dir); + device_del(part_to_dev(part)); + + hd_struct_put(part); +} + +static ssize_t whole_disk_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return 0; +} +static DEVICE_ATTR(whole_disk, S_IRUSR | S_IRGRP | S_IROTH, + whole_disk_show, NULL); + +struct hd_struct *add_partition(struct gendisk *disk, int partno, + sector_t start, sector_t len, int flags, + struct partition_meta_info *info) +{ + struct hd_struct *p; + dev_t devt = MKDEV(0, 0); + struct device *ddev = disk_to_dev(disk); + struct device *pdev; + struct disk_part_tbl *ptbl; + const char *dname; + int err; + + err = disk_expand_part_tbl(disk, partno); + if (err) + return ERR_PTR(err); + ptbl = disk->part_tbl; + + if (ptbl->part[partno]) + return ERR_PTR(-EBUSY); + + p = kzalloc(sizeof(*p), GFP_KERNEL); + if (!p) + return ERR_PTR(-EBUSY); + + if (!init_part_stats(p)) { + err = -ENOMEM; + goto out_free; + } + + seqcount_init(&p->nr_sects_seq); + pdev = part_to_dev(p); + + p->start_sect = start; + p->alignment_offset = + queue_limit_alignment_offset(&disk->queue->limits, start); + p->discard_alignment = + queue_limit_discard_alignment(&disk->queue->limits, start); + p->nr_sects = len; + p->partno = partno; + p->policy = get_disk_ro(disk); + + if (info) { + struct partition_meta_info *pinfo = alloc_part_info(disk); + if (!pinfo) + goto out_free_stats; + memcpy(pinfo, info, sizeof(*info)); + p->info = pinfo; + } + + dname = dev_name(ddev); + if (isdigit(dname[strlen(dname) - 1])) + dev_set_name(pdev, "%sp%d", dname, partno); + else + dev_set_name(pdev, "%s%d", dname, partno); + + device_initialize(pdev); + pdev->class = &block_class; + pdev->type = &part_type; + pdev->parent = ddev; + + err = blk_alloc_devt(p, &devt); + if (err) + goto out_free_info; + pdev->devt = devt; + + /* delay uevent until 'holders' subdir is created */ + dev_set_uevent_suppress(pdev, 1); + err = device_add(pdev); + if (err) + goto out_put; + + err = -ENOMEM; + p->holder_dir = kobject_create_and_add("holders", &pdev->kobj); + if (!p->holder_dir) + goto out_del; + + dev_set_uevent_suppress(pdev, 0); + if (flags & ADDPART_FLAG_WHOLEDISK) { + err = device_create_file(pdev, &dev_attr_whole_disk); + if (err) + goto out_del; + } + + /* everything is up and running, commence */ + rcu_assign_pointer(ptbl->part[partno], p); + + /* suppress uevent if the disk suppresses it */ + if (!dev_get_uevent_suppress(ddev)) + kobject_uevent(&pdev->kobj, KOBJ_ADD); + + hd_ref_init(p); + return p; + +out_free_info: + free_part_info(p); +out_free_stats: + free_part_stats(p); +out_free: + kfree(p); + return ERR_PTR(err); +out_del: + kobject_put(p->holder_dir); + device_del(pdev); +out_put: + put_device(pdev); + blk_free_devt(devt); + return ERR_PTR(err); +} + +static bool disk_unlock_native_capacity(struct gendisk *disk) +{ + const struct block_device_operations *bdops = disk->fops; + + if (bdops->unlock_native_capacity && + !(disk->flags & GENHD_FL_NATIVE_CAPACITY)) { + printk(KERN_CONT "enabling native capacity\n"); + bdops->unlock_native_capacity(disk); + disk->flags |= GENHD_FL_NATIVE_CAPACITY; + return true; + } else { + printk(KERN_CONT "truncated\n"); + return false; + } +} + +static int drop_partitions(struct gendisk *disk, struct block_device *bdev) +{ + struct disk_part_iter piter; + struct hd_struct *part; + int res; + + if (bdev->bd_part_count) + return -EBUSY; + res = invalidate_partition(disk, 0); + if (res) + return res; + + disk_part_iter_init(&piter, disk, DISK_PITER_INCL_EMPTY); + while ((part = disk_part_iter_next(&piter))) + delete_partition(disk, part->partno); + disk_part_iter_exit(&piter); + + return 0; +} + +int rescan_partitions(struct gendisk *disk, struct block_device *bdev) +{ + struct parsed_partitions *state = NULL; + struct hd_struct *part; + int p, highest, res; +rescan: + if (state && !IS_ERR(state)) { + free_partitions(state); + state = NULL; + } + + res = drop_partitions(disk, bdev); + if (res) + return res; + + if (disk->fops->revalidate_disk) + disk->fops->revalidate_disk(disk); + check_disk_size_change(disk, bdev); + bdev->bd_invalidated = 0; + if (!get_capacity(disk) || !(state = check_partition(disk, bdev))) + return 0; + if (IS_ERR(state)) { + /* + * I/O error reading the partition table. If any + * partition code tried to read beyond EOD, retry + * after unlocking native capacity. + */ + if (PTR_ERR(state) == -ENOSPC) { + printk(KERN_WARNING "%s: partition table beyond EOD, ", + disk->disk_name); + if (disk_unlock_native_capacity(disk)) + goto rescan; + } + return -EIO; + } + /* + * If any partition code tried to read beyond EOD, try + * unlocking native capacity even if partition table is + * successfully read as we could be missing some partitions. + */ + if (state->access_beyond_eod) { + printk(KERN_WARNING + "%s: partition table partially beyond EOD, ", + disk->disk_name); + if (disk_unlock_native_capacity(disk)) + goto rescan; + } + + /* tell userspace that the media / partition table may have changed */ + kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE); + + /* Detect the highest partition number and preallocate + * disk->part_tbl. This is an optimization and not strictly + * necessary. + */ + for (p = 1, highest = 0; p < state->limit; p++) + if (state->parts[p].size) + highest = p; + + disk_expand_part_tbl(disk, highest); + + /* add partitions */ + for (p = 1; p < state->limit; p++) { + sector_t size, from; + struct partition_meta_info *info = NULL; + + size = state->parts[p].size; + if (!size) + continue; + + from = state->parts[p].from; + if (from >= get_capacity(disk)) { + printk(KERN_WARNING + "%s: p%d start %llu is beyond EOD, ", + disk->disk_name, p, (unsigned long long) from); + if (disk_unlock_native_capacity(disk)) + goto rescan; + continue; + } + + if (from + size > get_capacity(disk)) { + printk(KERN_WARNING + "%s: p%d size %llu extends beyond EOD, ", + disk->disk_name, p, (unsigned long long) size); + + if (disk_unlock_native_capacity(disk)) { + /* free state and restart */ + goto rescan; + } else { + /* + * we can not ignore partitions of broken tables + * created by for example camera firmware, but + * we limit them to the end of the disk to avoid + * creating invalid block devices + */ + size = get_capacity(disk) - from; + } + } + + if (state->parts[p].has_info) + info = &state->parts[p].info; + part = add_partition(disk, p, from, size, + state->parts[p].flags, + &state->parts[p].info); + if (IS_ERR(part)) { + printk(KERN_ERR " %s: p%d could not be added: %ld\n", + disk->disk_name, p, -PTR_ERR(part)); + continue; + } +#ifdef CONFIG_BLK_DEV_MD + if (state->parts[p].flags & ADDPART_FLAG_RAID) + md_autodetect_dev(part_to_dev(part)->devt); +#endif + } + free_partitions(state); + return 0; +} + +int invalidate_partitions(struct gendisk *disk, struct block_device *bdev) +{ + int res; + + if (!bdev->bd_invalidated) + return 0; + + res = drop_partitions(disk, bdev); + if (res) + return res; + + set_capacity(disk, 0); + check_disk_size_change(disk, bdev); + bdev->bd_invalidated = 0; + /* tell userspace that the media / partition table may have changed */ + kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE); + + return 0; +} + +unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p) +{ + struct address_space *mapping = bdev->bd_inode->i_mapping; + struct page *page; + + page = read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_CACHE_SHIFT-9)), + NULL); + if (!IS_ERR(page)) { + if (PageError(page)) + goto fail; + p->v = page; + return (unsigned char *)page_address(page) + ((n & ((1 << (PAGE_CACHE_SHIFT - 9)) - 1)) << 9); +fail: + page_cache_release(page); + } + p->v = NULL; + return NULL; +} + +EXPORT_SYMBOL(read_dev_sector); diff --git a/block/partitions/Kconfig b/block/partitions/Kconfig new file mode 100644 index 000000000..9b29a996c --- /dev/null +++ b/block/partitions/Kconfig @@ -0,0 +1,269 @@ +# +# Partition configuration +# +config PARTITION_ADVANCED + bool "Advanced partition selection" + help + Say Y here if you would like to use hard disks under Linux which + were partitioned under an operating system running on a different + architecture than your Linux system. + + Note that the answer to this question won't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about foreign partitioning schemes. + + If unsure, say N. + +config ACORN_PARTITION + bool "Acorn partition support" if PARTITION_ADVANCED + default y if ARCH_ACORN + help + Support hard disks partitioned under Acorn operating systems. + +config ACORN_PARTITION_CUMANA + bool "Cumana partition support" if PARTITION_ADVANCED + default y if ARCH_ACORN + depends on ACORN_PARTITION + help + Say Y here if you would like to use hard disks under Linux which + were partitioned using the Cumana interface on Acorn machines. + +config ACORN_PARTITION_EESOX + bool "EESOX partition support" if PARTITION_ADVANCED + default y if ARCH_ACORN + depends on ACORN_PARTITION + +config ACORN_PARTITION_ICS + bool "ICS partition support" if PARTITION_ADVANCED + default y if ARCH_ACORN + depends on ACORN_PARTITION + help + Say Y here if you would like to use hard disks under Linux which + were partitioned using the ICS interface on Acorn machines. + +config ACORN_PARTITION_ADFS + bool "Native filecore partition support" if PARTITION_ADVANCED + default y if ARCH_ACORN + depends on ACORN_PARTITION + help + The Acorn Disc Filing System is the standard file system of the + RiscOS operating system which runs on Acorn's ARM-based Risc PC + systems and the Acorn Archimedes range of machines. If you say + `Y' here, Linux will support disk partitions created under ADFS. + +config ACORN_PARTITION_POWERTEC + bool "PowerTec partition support" if PARTITION_ADVANCED + default y if ARCH_ACORN + depends on ACORN_PARTITION + help + Support reading partition tables created on Acorn machines using + the PowerTec SCSI drive. + +config ACORN_PARTITION_RISCIX + bool "RISCiX partition support" if PARTITION_ADVANCED + default y if ARCH_ACORN + depends on ACORN_PARTITION + help + Once upon a time, there was a native Unix port for the Acorn series + of machines called RISCiX. If you say 'Y' here, Linux will be able + to read disks partitioned under RISCiX. + +config AIX_PARTITION + bool "AIX basic partition table support" if PARTITION_ADVANCED + help + Say Y here if you would like to be able to read the hard disk + partition table format used by IBM or Motorola PowerPC machines + running AIX. AIX actually uses a Logical Volume Manager, where + "logical volumes" can be spread across one or multiple disks, + but this driver works only for the simple case of partitions which + are contiguous. + Otherwise, say N. + +config OSF_PARTITION + bool "Alpha OSF partition support" if PARTITION_ADVANCED + default y if ALPHA + help + Say Y here if you would like to use hard disks under Linux which + were partitioned on an Alpha machine. + +config AMIGA_PARTITION + bool "Amiga partition table support" if PARTITION_ADVANCED + default y if (AMIGA || AFFS_FS=y) + help + Say Y here if you would like to use hard disks under Linux which + were partitioned under AmigaOS. + +config ATARI_PARTITION + bool "Atari partition table support" if PARTITION_ADVANCED + default y if ATARI + help + Say Y here if you would like to use hard disks under Linux which + were partitioned under the Atari OS. + +config IBM_PARTITION + bool "IBM disk label and partition support" + depends on PARTITION_ADVANCED && S390 + help + Say Y here if you would like to be able to read the hard disk + partition table format used by IBM DASD disks operating under CMS. + Otherwise, say N. + +config MAC_PARTITION + bool "Macintosh partition map support" if PARTITION_ADVANCED + default y if (MAC || PPC_PMAC) + help + Say Y here if you would like to use hard disks under Linux which + were partitioned on a Macintosh. + +config MSDOS_PARTITION + bool "PC BIOS (MSDOS partition tables) support" if PARTITION_ADVANCED + default y + help + Say Y here. + +config BSD_DISKLABEL + bool "BSD disklabel (FreeBSD partition tables) support" + depends on PARTITION_ADVANCED && MSDOS_PARTITION + help + FreeBSD uses its own hard disk partition scheme on your PC. It + requires only one entry in the primary partition table of your disk + and manages it similarly to DOS extended partitions, putting in its + first sector a new partition table in BSD disklabel format. Saying Y + here allows you to read these disklabels and further mount FreeBSD + partitions from within Linux if you have also said Y to "UFS + file system support", above. If you don't know what all this is + about, say N. + +config MINIX_SUBPARTITION + bool "Minix subpartition support" + depends on PARTITION_ADVANCED && MSDOS_PARTITION + help + Minix 2.0.0/2.0.2 subpartition table support for Linux. + Say Y here if you want to mount and use Minix 2.0.0/2.0.2 + subpartitions. + +config SOLARIS_X86_PARTITION + bool "Solaris (x86) partition table support" + depends on PARTITION_ADVANCED && MSDOS_PARTITION + help + Like most systems, Solaris x86 uses its own hard disk partition + table format, incompatible with all others. Saying Y here allows you + to read these partition tables and further mount Solaris x86 + partitions from within Linux if you have also said Y to "UFS + file system support", above. + +config UNIXWARE_DISKLABEL + bool "Unixware slices support" + depends on PARTITION_ADVANCED && MSDOS_PARTITION + ---help--- + Like some systems, UnixWare uses its own slice table inside a + partition (VTOC - Virtual Table of Contents). Its format is + incompatible with all other OSes. Saying Y here allows you to read + VTOC and further mount UnixWare partitions read-only from within + Linux if you have also said Y to "UFS file system support" or + "System V and Coherent file system support", above. + + This is mainly used to carry data from a UnixWare box to your + Linux box via a removable medium like magneto-optical, ZIP or + removable IDE drives. Note, however, that a good portable way to + transport files and directories between unixes (and even other + operating systems) is given by the tar program ("man tar" or + preferably "info tar"). + + If you don't know what all this is about, say N. + +config LDM_PARTITION + bool "Windows Logical Disk Manager (Dynamic Disk) support" + depends on PARTITION_ADVANCED + ---help--- + Say Y here if you would like to use hard disks under Linux which + were partitioned using Windows 2000's/XP's or Vista's Logical Disk + Manager. They are also known as "Dynamic Disks". + + Note this driver only supports Dynamic Disks with a protective MBR + label, i.e. DOS partition table. It does not support GPT labelled + Dynamic Disks yet as can be created with Vista. + + Windows 2000 introduced the concept of Dynamic Disks to get around + the limitations of the PC's partitioning scheme. The Logical Disk + Manager allows the user to repartition a disk and create spanned, + mirrored, striped or RAID volumes, all without the need for + rebooting. + + Normal partitions are now called Basic Disks under Windows 2000, XP, + and Vista. + + For a fuller description read . + + If unsure, say N. + +config LDM_DEBUG + bool "Windows LDM extra logging" + depends on LDM_PARTITION + help + Say Y here if you would like LDM to log verbosely. This could be + helpful if the driver doesn't work as expected and you'd like to + report a bug. + + If unsure, say N. + +config SGI_PARTITION + bool "SGI partition support" if PARTITION_ADVANCED + default y if DEFAULT_SGI_PARTITION + help + Say Y here if you would like to be able to read the hard disk + partition table format used by SGI machines. + +config ULTRIX_PARTITION + bool "Ultrix partition table support" if PARTITION_ADVANCED + default y if MACH_DECSTATION + help + Say Y here if you would like to be able to read the hard disk + partition table format used by DEC (now Compaq) Ultrix machines. + Otherwise, say N. + +config SUN_PARTITION + bool "Sun partition tables support" if PARTITION_ADVANCED + default y if (SPARC || SUN3 || SUN3X) + ---help--- + Like most systems, SunOS uses its own hard disk partition table + format, incompatible with all others. Saying Y here allows you to + read these partition tables and further mount SunOS partitions from + within Linux if you have also said Y to "UFS file system support", + above. This is mainly used to carry data from a SPARC under SunOS to + your Linux box via a removable medium like magneto-optical or ZIP + drives; note however that a good portable way to transport files and + directories between unixes (and even other operating systems) is + given by the tar program ("man tar" or preferably "info tar"). If + you don't know what all this is about, say N. + +config KARMA_PARTITION + bool "Karma Partition support" + depends on PARTITION_ADVANCED + help + Say Y here if you would like to mount the Rio Karma MP3 player, as it + uses a proprietary partition table. + +config EFI_PARTITION + bool "EFI GUID Partition support" if PARTITION_ADVANCED + default y + select CRC32 + help + Say Y here if you would like to use hard disks under Linux which + were partitioned using EFI GPT. + +config SYSV68_PARTITION + bool "SYSV68 partition table support" if PARTITION_ADVANCED + default y if VME + help + Say Y here if you would like to be able to read the hard disk + partition table format used by Motorola Delta machines (using + sysv68). + Otherwise, say N. + +config CMDLINE_PARTITION + bool "Command line partition support" if PARTITION_ADVANCED + select BLK_CMDLINE_PARSER + help + Say Y here if you want to read the partition table from bootargs. + The format for the command line is just like mtdparts. diff --git a/block/partitions/Makefile b/block/partitions/Makefile new file mode 100644 index 000000000..37a952705 --- /dev/null +++ b/block/partitions/Makefile @@ -0,0 +1,22 @@ +# +# Makefile for the linux kernel. +# + +obj-$(CONFIG_BLOCK) := check.o + +obj-$(CONFIG_ACORN_PARTITION) += acorn.o +obj-$(CONFIG_AMIGA_PARTITION) += amiga.o +obj-$(CONFIG_ATARI_PARTITION) += atari.o +obj-$(CONFIG_AIX_PARTITION) += aix.o +obj-$(CONFIG_CMDLINE_PARTITION) += cmdline.o +obj-$(CONFIG_MAC_PARTITION) += mac.o +obj-$(CONFIG_LDM_PARTITION) += ldm.o +obj-$(CONFIG_MSDOS_PARTITION) += msdos.o +obj-$(CONFIG_OSF_PARTITION) += osf.o +obj-$(CONFIG_SGI_PARTITION) += sgi.o +obj-$(CONFIG_SUN_PARTITION) += sun.o +obj-$(CONFIG_ULTRIX_PARTITION) += ultrix.o +obj-$(CONFIG_IBM_PARTITION) += ibm.o +obj-$(CONFIG_EFI_PARTITION) += efi.o +obj-$(CONFIG_KARMA_PARTITION) += karma.o +obj-$(CONFIG_SYSV68_PARTITION) += sysv68.o diff --git a/block/partitions/acorn.c b/block/partitions/acorn.c new file mode 100644 index 000000000..fbeb69737 --- /dev/null +++ b/block/partitions/acorn.c @@ -0,0 +1,556 @@ +/* + * linux/fs/partitions/acorn.c + * + * Copyright (c) 1996-2000 Russell King. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Scan ADFS partitions on hard disk drives. Unfortunately, there + * isn't a standard for partitioning drives on Acorn machines, so + * every single manufacturer of SCSI and IDE cards created their own + * method. + */ +#include +#include + +#include "check.h" +#include "acorn.h" + +/* + * Partition types. (Oh for reusability) + */ +#define PARTITION_RISCIX_MFM 1 +#define PARTITION_RISCIX_SCSI 2 +#define PARTITION_LINUX 9 + +#if defined(CONFIG_ACORN_PARTITION_CUMANA) || \ + defined(CONFIG_ACORN_PARTITION_ADFS) +static struct adfs_discrecord * +adfs_partition(struct parsed_partitions *state, char *name, char *data, + unsigned long first_sector, int slot) +{ + struct adfs_discrecord *dr; + unsigned int nr_sects; + + if (adfs_checkbblk(data)) + return NULL; + + dr = (struct adfs_discrecord *)(data + 0x1c0); + + if (dr->disc_size == 0 && dr->disc_size_high == 0) + return NULL; + + nr_sects = (le32_to_cpu(dr->disc_size_high) << 23) | + (le32_to_cpu(dr->disc_size) >> 9); + + if (name) { + strlcat(state->pp_buf, " [", PAGE_SIZE); + strlcat(state->pp_buf, name, PAGE_SIZE); + strlcat(state->pp_buf, "]", PAGE_SIZE); + } + put_partition(state, slot, first_sector, nr_sects); + return dr; +} +#endif + +#ifdef CONFIG_ACORN_PARTITION_RISCIX + +struct riscix_part { + __le32 start; + __le32 length; + __le32 one; + char name[16]; +}; + +struct riscix_record { + __le32 magic; +#define RISCIX_MAGIC cpu_to_le32(0x4a657320) + __le32 date; + struct riscix_part part[8]; +}; + +#if defined(CONFIG_ACORN_PARTITION_CUMANA) || \ + defined(CONFIG_ACORN_PARTITION_ADFS) +static int riscix_partition(struct parsed_partitions *state, + unsigned long first_sect, int slot, + unsigned long nr_sects) +{ + Sector sect; + struct riscix_record *rr; + + rr = read_part_sector(state, first_sect, §); + if (!rr) + return -1; + + strlcat(state->pp_buf, " [RISCiX]", PAGE_SIZE); + + + if (rr->magic == RISCIX_MAGIC) { + unsigned long size = nr_sects > 2 ? 2 : nr_sects; + int part; + + strlcat(state->pp_buf, " <", PAGE_SIZE); + + put_partition(state, slot++, first_sect, size); + for (part = 0; part < 8; part++) { + if (rr->part[part].one && + memcmp(rr->part[part].name, "All\0", 4)) { + put_partition(state, slot++, + le32_to_cpu(rr->part[part].start), + le32_to_cpu(rr->part[part].length)); + strlcat(state->pp_buf, "(", PAGE_SIZE); + strlcat(state->pp_buf, rr->part[part].name, PAGE_SIZE); + strlcat(state->pp_buf, ")", PAGE_SIZE); + } + } + + strlcat(state->pp_buf, " >\n", PAGE_SIZE); + } else { + put_partition(state, slot++, first_sect, nr_sects); + } + + put_dev_sector(sect); + return slot; +} +#endif +#endif + +#define LINUX_NATIVE_MAGIC 0xdeafa1de +#define LINUX_SWAP_MAGIC 0xdeafab1e + +struct linux_part { + __le32 magic; + __le32 start_sect; + __le32 nr_sects; +}; + +#if defined(CONFIG_ACORN_PARTITION_CUMANA) || \ + defined(CONFIG_ACORN_PARTITION_ADFS) +static int linux_partition(struct parsed_partitions *state, + unsigned long first_sect, int slot, + unsigned long nr_sects) +{ + Sector sect; + struct linux_part *linuxp; + unsigned long size = nr_sects > 2 ? 2 : nr_sects; + + strlcat(state->pp_buf, " [Linux]", PAGE_SIZE); + + put_partition(state, slot++, first_sect, size); + + linuxp = read_part_sector(state, first_sect, §); + if (!linuxp) + return -1; + + strlcat(state->pp_buf, " <", PAGE_SIZE); + while (linuxp->magic == cpu_to_le32(LINUX_NATIVE_MAGIC) || + linuxp->magic == cpu_to_le32(LINUX_SWAP_MAGIC)) { + if (slot == state->limit) + break; + put_partition(state, slot++, first_sect + + le32_to_cpu(linuxp->start_sect), + le32_to_cpu(linuxp->nr_sects)); + linuxp ++; + } + strlcat(state->pp_buf, " >", PAGE_SIZE); + + put_dev_sector(sect); + return slot; +} +#endif + +#ifdef CONFIG_ACORN_PARTITION_CUMANA +int adfspart_check_CUMANA(struct parsed_partitions *state) +{ + unsigned long first_sector = 0; + unsigned int start_blk = 0; + Sector sect; + unsigned char *data; + char *name = "CUMANA/ADFS"; + int first = 1; + int slot = 1; + + /* + * Try Cumana style partitions - sector 6 contains ADFS boot block + * with pointer to next 'drive'. + * + * There are unknowns in this code - is the 'cylinder number' of the + * next partition relative to the start of this one - I'm assuming + * it is. + * + * Also, which ID did Cumana use? + * + * This is totally unfinished, and will require more work to get it + * going. Hence it is totally untested. + */ + do { + struct adfs_discrecord *dr; + unsigned int nr_sects; + + data = read_part_sector(state, start_blk * 2 + 6, §); + if (!data) + return -1; + + if (slot == state->limit) + break; + + dr = adfs_partition(state, name, data, first_sector, slot++); + if (!dr) + break; + + name = NULL; + + nr_sects = (data[0x1fd] + (data[0x1fe] << 8)) * + (dr->heads + (dr->lowsector & 0x40 ? 1 : 0)) * + dr->secspertrack; + + if (!nr_sects) + break; + + first = 0; + first_sector += nr_sects; + start_blk += nr_sects >> (BLOCK_SIZE_BITS - 9); + nr_sects = 0; /* hmm - should be partition size */ + + switch (data[0x1fc] & 15) { + case 0: /* No partition / ADFS? */ + break; + +#ifdef CONFIG_ACORN_PARTITION_RISCIX + case PARTITION_RISCIX_SCSI: + /* RISCiX - we don't know how to find the next one. */ + slot = riscix_partition(state, first_sector, slot, + nr_sects); + break; +#endif + + case PARTITION_LINUX: + slot = linux_partition(state, first_sector, slot, + nr_sects); + break; + } + put_dev_sector(sect); + if (slot == -1) + return -1; + } while (1); + put_dev_sector(sect); + return first ? 0 : 1; +} +#endif + +#ifdef CONFIG_ACORN_PARTITION_ADFS +/* + * Purpose: allocate ADFS partitions. + * + * Params : hd - pointer to gendisk structure to store partition info. + * dev - device number to access. + * + * Returns: -1 on error, 0 for no ADFS boot sector, 1 for ok. + * + * Alloc : hda = whole drive + * hda1 = ADFS partition on first drive. + * hda2 = non-ADFS partition. + */ +int adfspart_check_ADFS(struct parsed_partitions *state) +{ + unsigned long start_sect, nr_sects, sectscyl, heads; + Sector sect; + unsigned char *data; + struct adfs_discrecord *dr; + unsigned char id; + int slot = 1; + + data = read_part_sector(state, 6, §); + if (!data) + return -1; + + dr = adfs_partition(state, "ADFS", data, 0, slot++); + if (!dr) { + put_dev_sector(sect); + return 0; + } + + heads = dr->heads + ((dr->lowsector >> 6) & 1); + sectscyl = dr->secspertrack * heads; + start_sect = ((data[0x1fe] << 8) + data[0x1fd]) * sectscyl; + id = data[0x1fc] & 15; + put_dev_sector(sect); + + /* + * Work out start of non-adfs partition. + */ + nr_sects = (state->bdev->bd_inode->i_size >> 9) - start_sect; + + if (start_sect) { + switch (id) { +#ifdef CONFIG_ACORN_PARTITION_RISCIX + case PARTITION_RISCIX_SCSI: + case PARTITION_RISCIX_MFM: + slot = riscix_partition(state, start_sect, slot, + nr_sects); + break; +#endif + + case PARTITION_LINUX: + slot = linux_partition(state, start_sect, slot, + nr_sects); + break; + } + } + strlcat(state->pp_buf, "\n", PAGE_SIZE); + return 1; +} +#endif + +#ifdef CONFIG_ACORN_PARTITION_ICS + +struct ics_part { + __le32 start; + __le32 size; +}; + +static int adfspart_check_ICSLinux(struct parsed_partitions *state, + unsigned long block) +{ + Sector sect; + unsigned char *data = read_part_sector(state, block, §); + int result = 0; + + if (data) { + if (memcmp(data, "LinuxPart", 9) == 0) + result = 1; + put_dev_sector(sect); + } + + return result; +} + +/* + * Check for a valid ICS partition using the checksum. + */ +static inline int valid_ics_sector(const unsigned char *data) +{ + unsigned long sum; + int i; + + for (i = 0, sum = 0x50617274; i < 508; i++) + sum += data[i]; + + sum -= le32_to_cpu(*(__le32 *)(&data[508])); + + return sum == 0; +} + +/* + * Purpose: allocate ICS partitions. + * Params : hd - pointer to gendisk structure to store partition info. + * dev - device number to access. + * Returns: -1 on error, 0 for no ICS table, 1 for partitions ok. + * Alloc : hda = whole drive + * hda1 = ADFS partition 0 on first drive. + * hda2 = ADFS partition 1 on first drive. + * ..etc.. + */ +int adfspart_check_ICS(struct parsed_partitions *state) +{ + const unsigned char *data; + const struct ics_part *p; + int slot; + Sector sect; + + /* + * Try ICS style partitions - sector 0 contains partition info. + */ + data = read_part_sector(state, 0, §); + if (!data) + return -1; + + if (!valid_ics_sector(data)) { + put_dev_sector(sect); + return 0; + } + + strlcat(state->pp_buf, " [ICS]", PAGE_SIZE); + + for (slot = 1, p = (const struct ics_part *)data; p->size; p++) { + u32 start = le32_to_cpu(p->start); + s32 size = le32_to_cpu(p->size); /* yes, it's signed. */ + + if (slot == state->limit) + break; + + /* + * Negative sizes tell the RISC OS ICS driver to ignore + * this partition - in effect it says that this does not + * contain an ADFS filesystem. + */ + if (size < 0) { + size = -size; + + /* + * Our own extension - We use the first sector + * of the partition to identify what type this + * partition is. We must not make this visible + * to the filesystem. + */ + if (size > 1 && adfspart_check_ICSLinux(state, start)) { + start += 1; + size -= 1; + } + } + + if (size) + put_partition(state, slot++, start, size); + } + + put_dev_sector(sect); + strlcat(state->pp_buf, "\n", PAGE_SIZE); + return 1; +} +#endif + +#ifdef CONFIG_ACORN_PARTITION_POWERTEC +struct ptec_part { + __le32 unused1; + __le32 unused2; + __le32 start; + __le32 size; + __le32 unused5; + char type[8]; +}; + +static inline int valid_ptec_sector(const unsigned char *data) +{ + unsigned char checksum = 0x2a; + int i; + + /* + * If it looks like a PC/BIOS partition, then it + * probably isn't PowerTec. + */ + if (data[510] == 0x55 && data[511] == 0xaa) + return 0; + + for (i = 0; i < 511; i++) + checksum += data[i]; + + return checksum == data[511]; +} + +/* + * Purpose: allocate ICS partitions. + * Params : hd - pointer to gendisk structure to store partition info. + * dev - device number to access. + * Returns: -1 on error, 0 for no ICS table, 1 for partitions ok. + * Alloc : hda = whole drive + * hda1 = ADFS partition 0 on first drive. + * hda2 = ADFS partition 1 on first drive. + * ..etc.. + */ +int adfspart_check_POWERTEC(struct parsed_partitions *state) +{ + Sector sect; + const unsigned char *data; + const struct ptec_part *p; + int slot = 1; + int i; + + data = read_part_sector(state, 0, §); + if (!data) + return -1; + + if (!valid_ptec_sector(data)) { + put_dev_sector(sect); + return 0; + } + + strlcat(state->pp_buf, " [POWERTEC]", PAGE_SIZE); + + for (i = 0, p = (const struct ptec_part *)data; i < 12; i++, p++) { + u32 start = le32_to_cpu(p->start); + u32 size = le32_to_cpu(p->size); + + if (size) + put_partition(state, slot++, start, size); + } + + put_dev_sector(sect); + strlcat(state->pp_buf, "\n", PAGE_SIZE); + return 1; +} +#endif + +#ifdef CONFIG_ACORN_PARTITION_EESOX +struct eesox_part { + char magic[6]; + char name[10]; + __le32 start; + __le32 unused6; + __le32 unused7; + __le32 unused8; +}; + +/* + * Guess who created this format? + */ +static const char eesox_name[] = { + 'N', 'e', 'i', 'l', ' ', + 'C', 'r', 'i', 't', 'c', 'h', 'e', 'l', 'l', ' ', ' ' +}; + +/* + * EESOX SCSI partition format. + * + * This is a goddamned awful partition format. We don't seem to store + * the size of the partition in this table, only the start addresses. + * + * There are two possibilities where the size comes from: + * 1. The individual ADFS boot block entries that are placed on the disk. + * 2. The start address of the next entry. + */ +int adfspart_check_EESOX(struct parsed_partitions *state) +{ + Sector sect; + const unsigned char *data; + unsigned char buffer[256]; + struct eesox_part *p; + sector_t start = 0; + int i, slot = 1; + + data = read_part_sector(state, 7, §); + if (!data) + return -1; + + /* + * "Decrypt" the partition table. God knows why... + */ + for (i = 0; i < 256; i++) + buffer[i] = data[i] ^ eesox_name[i & 15]; + + put_dev_sector(sect); + + for (i = 0, p = (struct eesox_part *)buffer; i < 8; i++, p++) { + sector_t next; + + if (memcmp(p->magic, "Eesox", 6)) + break; + + next = le32_to_cpu(p->start); + if (i) + put_partition(state, slot++, start, next - start); + start = next; + } + + if (i != 0) { + sector_t size; + + size = get_capacity(state->bdev->bd_disk); + put_partition(state, slot++, start, size - start); + strlcat(state->pp_buf, "\n", PAGE_SIZE); + } + + return i ? 1 : 0; +} +#endif diff --git a/block/partitions/acorn.h b/block/partitions/acorn.h new file mode 100644 index 000000000..ede828529 --- /dev/null +++ b/block/partitions/acorn.h @@ -0,0 +1,14 @@ +/* + * linux/fs/partitions/acorn.h + * + * Copyright (C) 1996-2001 Russell King. + * + * I _hate_ this partitioning mess - why can't we have one defined + * format, and everyone stick to it? + */ + +int adfspart_check_CUMANA(struct parsed_partitions *state); +int adfspart_check_ADFS(struct parsed_partitions *state); +int adfspart_check_ICS(struct parsed_partitions *state); +int adfspart_check_POWERTEC(struct parsed_partitions *state); +int adfspart_check_EESOX(struct parsed_partitions *state); diff --git a/block/partitions/aix.c b/block/partitions/aix.c new file mode 100644 index 000000000..f3ed7b2d8 --- /dev/null +++ b/block/partitions/aix.c @@ -0,0 +1,293 @@ +/* + * fs/partitions/aix.c + * + * Copyright (C) 2012-2013 Philippe De Muyter + */ + +#include "check.h" +#include "aix.h" + +struct lvm_rec { + char lvm_id[4]; /* "_LVM" */ + char reserved4[16]; + __be32 lvmarea_len; + __be32 vgda_len; + __be32 vgda_psn[2]; + char reserved36[10]; + __be16 pp_size; /* log2(pp_size) */ + char reserved46[12]; + __be16 version; + }; + +struct vgda { + __be32 secs; + __be32 usec; + char reserved8[16]; + __be16 numlvs; + __be16 maxlvs; + __be16 pp_size; + __be16 numpvs; + __be16 total_vgdas; + __be16 vgda_size; + }; + +struct lvd { + __be16 lv_ix; + __be16 res2; + __be16 res4; + __be16 maxsize; + __be16 lv_state; + __be16 mirror; + __be16 mirror_policy; + __be16 num_lps; + __be16 res10[8]; + }; + +struct lvname { + char name[64]; + }; + +struct ppe { + __be16 lv_ix; + unsigned short res2; + unsigned short res4; + __be16 lp_ix; + unsigned short res8[12]; + }; + +struct pvd { + char reserved0[16]; + __be16 pp_count; + char reserved18[2]; + __be32 psn_part1; + char reserved24[8]; + struct ppe ppe[1016]; + }; + +#define LVM_MAXLVS 256 + +/** + * last_lba(): return number of last logical block of device + * @bdev: block device + * + * Description: Returns last LBA value on success, 0 on error. + * This is stored (by sd and ide-geometry) in + * the part[0] entry for this disk, and is the number of + * physical sectors available on the disk. + */ +static u64 last_lba(struct block_device *bdev) +{ + if (!bdev || !bdev->bd_inode) + return 0; + return (bdev->bd_inode->i_size >> 9) - 1ULL; +} + +/** + * read_lba(): Read bytes from disk, starting at given LBA + * @state + * @lba + * @buffer + * @count + * + * Description: Reads @count bytes from @state->bdev into @buffer. + * Returns number of bytes read on success, 0 on error. + */ +static size_t read_lba(struct parsed_partitions *state, u64 lba, u8 *buffer, + size_t count) +{ + size_t totalreadcount = 0; + + if (!buffer || lba + count / 512 > last_lba(state->bdev)) + return 0; + + while (count) { + int copied = 512; + Sector sect; + unsigned char *data = read_part_sector(state, lba++, §); + if (!data) + break; + if (copied > count) + copied = count; + memcpy(buffer, data, copied); + put_dev_sector(sect); + buffer += copied; + totalreadcount += copied; + count -= copied; + } + return totalreadcount; +} + +/** + * alloc_pvd(): reads physical volume descriptor + * @state + * @lba + * + * Description: Returns pvd on success, NULL on error. + * Allocates space for pvd and fill it with disk blocks at @lba + * Notes: remember to free pvd when you're done! + */ +static struct pvd *alloc_pvd(struct parsed_partitions *state, u32 lba) +{ + size_t count = sizeof(struct pvd); + struct pvd *p; + + p = kmalloc(count, GFP_KERNEL); + if (!p) + return NULL; + + if (read_lba(state, lba, (u8 *) p, count) < count) { + kfree(p); + return NULL; + } + return p; +} + +/** + * alloc_lvn(): reads logical volume names + * @state + * @lba + * + * Description: Returns lvn on success, NULL on error. + * Allocates space for lvn and fill it with disk blocks at @lba + * Notes: remember to free lvn when you're done! + */ +static struct lvname *alloc_lvn(struct parsed_partitions *state, u32 lba) +{ + size_t count = sizeof(struct lvname) * LVM_MAXLVS; + struct lvname *p; + + p = kmalloc(count, GFP_KERNEL); + if (!p) + return NULL; + + if (read_lba(state, lba, (u8 *) p, count) < count) { + kfree(p); + return NULL; + } + return p; +} + +int aix_partition(struct parsed_partitions *state) +{ + int ret = 0; + Sector sect; + unsigned char *d; + u32 pp_bytes_size; + u32 pp_blocks_size = 0; + u32 vgda_sector = 0; + u32 vgda_len = 0; + int numlvs = 0; + struct pvd *pvd; + struct lv_info { + unsigned short pps_per_lv; + unsigned short pps_found; + unsigned char lv_is_contiguous; + } *lvip; + struct lvname *n = NULL; + + d = read_part_sector(state, 7, §); + if (d) { + struct lvm_rec *p = (struct lvm_rec *)d; + u16 lvm_version = be16_to_cpu(p->version); + char tmp[64]; + + if (lvm_version == 1) { + int pp_size_log2 = be16_to_cpu(p->pp_size); + + pp_bytes_size = 1 << pp_size_log2; + pp_blocks_size = pp_bytes_size / 512; + snprintf(tmp, sizeof(tmp), + " AIX LVM header version %u found\n", + lvm_version); + vgda_len = be32_to_cpu(p->vgda_len); + vgda_sector = be32_to_cpu(p->vgda_psn[0]); + } else { + snprintf(tmp, sizeof(tmp), + " unsupported AIX LVM version %d found\n", + lvm_version); + } + strlcat(state->pp_buf, tmp, PAGE_SIZE); + put_dev_sector(sect); + } + if (vgda_sector && (d = read_part_sector(state, vgda_sector, §))) { + struct vgda *p = (struct vgda *)d; + + numlvs = be16_to_cpu(p->numlvs); + put_dev_sector(sect); + } + lvip = kcalloc(state->limit, sizeof(struct lv_info), GFP_KERNEL); + if (!lvip) + return 0; + if (numlvs && (d = read_part_sector(state, vgda_sector + 1, §))) { + struct lvd *p = (struct lvd *)d; + int i; + + n = alloc_lvn(state, vgda_sector + vgda_len - 33); + if (n) { + int foundlvs = 0; + + for (i = 0; foundlvs < numlvs && i < state->limit; i += 1) { + lvip[i].pps_per_lv = be16_to_cpu(p[i].num_lps); + if (lvip[i].pps_per_lv) + foundlvs += 1; + } + } + put_dev_sector(sect); + } + pvd = alloc_pvd(state, vgda_sector + 17); + if (pvd) { + int numpps = be16_to_cpu(pvd->pp_count); + int psn_part1 = be32_to_cpu(pvd->psn_part1); + int i; + int cur_lv_ix = -1; + int next_lp_ix = 1; + int lp_ix; + + for (i = 0; i < numpps; i += 1) { + struct ppe *p = pvd->ppe + i; + unsigned int lv_ix; + + lp_ix = be16_to_cpu(p->lp_ix); + if (!lp_ix) { + next_lp_ix = 1; + continue; + } + lv_ix = be16_to_cpu(p->lv_ix) - 1; + if (lv_ix >= state->limit) { + cur_lv_ix = -1; + continue; + } + lvip[lv_ix].pps_found += 1; + if (lp_ix == 1) { + cur_lv_ix = lv_ix; + next_lp_ix = 1; + } else if (lv_ix != cur_lv_ix || lp_ix != next_lp_ix) { + next_lp_ix = 1; + continue; + } + if (lp_ix == lvip[lv_ix].pps_per_lv) { + char tmp[70]; + + put_partition(state, lv_ix + 1, + (i + 1 - lp_ix) * pp_blocks_size + psn_part1, + lvip[lv_ix].pps_per_lv * pp_blocks_size); + snprintf(tmp, sizeof(tmp), " <%s>\n", + n[lv_ix].name); + strlcat(state->pp_buf, tmp, PAGE_SIZE); + lvip[lv_ix].lv_is_contiguous = 1; + ret = 1; + next_lp_ix = 1; + } else + next_lp_ix += 1; + } + for (i = 0; i < state->limit; i += 1) + if (lvip[i].pps_found && !lvip[i].lv_is_contiguous) + pr_warn("partition %s (%u pp's found) is " + "not contiguous\n", + n[i].name, lvip[i].pps_found); + kfree(pvd); + } + kfree(n); + kfree(lvip); + return ret; +} diff --git a/block/partitions/aix.h b/block/partitions/aix.h new file mode 100644 index 000000000..e0c66a987 --- /dev/null +++ b/block/partitions/aix.h @@ -0,0 +1 @@ +extern int aix_partition(struct parsed_partitions *state); diff --git a/block/partitions/amiga.c b/block/partitions/amiga.c new file mode 100644 index 000000000..2b13533d6 --- /dev/null +++ b/block/partitions/amiga.c @@ -0,0 +1,141 @@ +/* + * fs/partitions/amiga.c + * + * Code extracted from drivers/block/genhd.c + * + * Copyright (C) 1991-1998 Linus Torvalds + * Re-organised Feb 1998 Russell King + */ + +#define pr_fmt(fmt) fmt + +#include +#include + +#include "check.h" +#include "amiga.h" + +static __inline__ u32 +checksum_block(__be32 *m, int size) +{ + u32 sum = 0; + + while (size--) + sum += be32_to_cpu(*m++); + return sum; +} + +int amiga_partition(struct parsed_partitions *state) +{ + Sector sect; + unsigned char *data; + struct RigidDiskBlock *rdb; + struct PartitionBlock *pb; + int start_sect, nr_sects, blk, part, res = 0; + int blksize = 1; /* Multiplier for disk block size */ + int slot = 1; + char b[BDEVNAME_SIZE]; + + for (blk = 0; ; blk++, put_dev_sector(sect)) { + if (blk == RDB_ALLOCATION_LIMIT) + goto rdb_done; + data = read_part_sector(state, blk, §); + if (!data) { + if (warn_no_part) + pr_err("Dev %s: unable to read RDB block %d\n", + bdevname(state->bdev, b), blk); + res = -1; + goto rdb_done; + } + if (*(__be32 *)data != cpu_to_be32(IDNAME_RIGIDDISK)) + continue; + + rdb = (struct RigidDiskBlock *)data; + if (checksum_block((__be32 *)data, be32_to_cpu(rdb->rdb_SummedLongs) & 0x7F) == 0) + break; + /* Try again with 0xdc..0xdf zeroed, Windows might have + * trashed it. + */ + *(__be32 *)(data+0xdc) = 0; + if (checksum_block((__be32 *)data, + be32_to_cpu(rdb->rdb_SummedLongs) & 0x7F)==0) { + pr_err("Trashed word at 0xd0 in block %d ignored in checksum calculation\n", + blk); + break; + } + + pr_err("Dev %s: RDB in block %d has bad checksum\n", + bdevname(state->bdev, b), blk); + } + + /* blksize is blocks per 512 byte standard block */ + blksize = be32_to_cpu( rdb->rdb_BlockBytes ) / 512; + + { + char tmp[7 + 10 + 1 + 1]; + + /* Be more informative */ + snprintf(tmp, sizeof(tmp), " RDSK (%d)", blksize * 512); + strlcat(state->pp_buf, tmp, PAGE_SIZE); + } + blk = be32_to_cpu(rdb->rdb_PartitionList); + put_dev_sector(sect); + for (part = 1; blk>0 && part<=16; part++, put_dev_sector(sect)) { + blk *= blksize; /* Read in terms partition table understands */ + data = read_part_sector(state, blk, §); + if (!data) { + if (warn_no_part) + pr_err("Dev %s: unable to read partition block %d\n", + bdevname(state->bdev, b), blk); + res = -1; + goto rdb_done; + } + pb = (struct PartitionBlock *)data; + blk = be32_to_cpu(pb->pb_Next); + if (pb->pb_ID != cpu_to_be32(IDNAME_PARTITION)) + continue; + if (checksum_block((__be32 *)pb, be32_to_cpu(pb->pb_SummedLongs) & 0x7F) != 0 ) + continue; + + /* Tell Kernel about it */ + + nr_sects = (be32_to_cpu(pb->pb_Environment[10]) + 1 - + be32_to_cpu(pb->pb_Environment[9])) * + be32_to_cpu(pb->pb_Environment[3]) * + be32_to_cpu(pb->pb_Environment[5]) * + blksize; + if (!nr_sects) + continue; + start_sect = be32_to_cpu(pb->pb_Environment[9]) * + be32_to_cpu(pb->pb_Environment[3]) * + be32_to_cpu(pb->pb_Environment[5]) * + blksize; + put_partition(state,slot++,start_sect,nr_sects); + { + /* Be even more informative to aid mounting */ + char dostype[4]; + char tmp[42]; + + __be32 *dt = (__be32 *)dostype; + *dt = pb->pb_Environment[16]; + if (dostype[3] < ' ') + snprintf(tmp, sizeof(tmp), " (%c%c%c^%c)", + dostype[0], dostype[1], + dostype[2], dostype[3] + '@' ); + else + snprintf(tmp, sizeof(tmp), " (%c%c%c%c)", + dostype[0], dostype[1], + dostype[2], dostype[3]); + strlcat(state->pp_buf, tmp, PAGE_SIZE); + snprintf(tmp, sizeof(tmp), "(res %d spb %d)", + be32_to_cpu(pb->pb_Environment[6]), + be32_to_cpu(pb->pb_Environment[4])); + strlcat(state->pp_buf, tmp, PAGE_SIZE); + } + res = 1; + } + strlcat(state->pp_buf, "\n", PAGE_SIZE); + +rdb_done: + return res; +} diff --git a/block/partitions/amiga.h b/block/partitions/amiga.h new file mode 100644 index 000000000..d094585ca --- /dev/null +++ b/block/partitions/amiga.h @@ -0,0 +1,6 @@ +/* + * fs/partitions/amiga.h + */ + +int amiga_partition(struct parsed_partitions *state); + diff --git a/block/partitions/atari.c b/block/partitions/atari.c new file mode 100644 index 000000000..9875b05e8 --- /dev/null +++ b/block/partitions/atari.c @@ -0,0 +1,149 @@ +/* + * fs/partitions/atari.c + * + * Code extracted from drivers/block/genhd.c + * + * Copyright (C) 1991-1998 Linus Torvalds + * Re-organised Feb 1998 Russell King + */ + +#include +#include "check.h" +#include "atari.h" + +/* ++guenther: this should be settable by the user ("make config")?. + */ +#define ICD_PARTS + +/* check if a partition entry looks valid -- Atari format is assumed if at + least one of the primary entries is ok this way */ +#define VALID_PARTITION(pi,hdsiz) \ + (((pi)->flg & 1) && \ + isalnum((pi)->id[0]) && isalnum((pi)->id[1]) && isalnum((pi)->id[2]) && \ + be32_to_cpu((pi)->st) <= (hdsiz) && \ + be32_to_cpu((pi)->st) + be32_to_cpu((pi)->siz) <= (hdsiz)) + +static inline int OK_id(char *s) +{ + return memcmp (s, "GEM", 3) == 0 || memcmp (s, "BGM", 3) == 0 || + memcmp (s, "LNX", 3) == 0 || memcmp (s, "SWP", 3) == 0 || + memcmp (s, "RAW", 3) == 0 ; +} + +int atari_partition(struct parsed_partitions *state) +{ + Sector sect; + struct rootsector *rs; + struct partition_info *pi; + u32 extensect; + u32 hd_size; + int slot; +#ifdef ICD_PARTS + int part_fmt = 0; /* 0:unknown, 1:AHDI, 2:ICD/Supra */ +#endif + + rs = read_part_sector(state, 0, §); + if (!rs) + return -1; + + /* Verify this is an Atari rootsector: */ + hd_size = state->bdev->bd_inode->i_size >> 9; + if (!VALID_PARTITION(&rs->part[0], hd_size) && + !VALID_PARTITION(&rs->part[1], hd_size) && + !VALID_PARTITION(&rs->part[2], hd_size) && + !VALID_PARTITION(&rs->part[3], hd_size)) { + /* + * if there's no valid primary partition, assume that no Atari + * format partition table (there's no reliable magic or the like + * :-() + */ + put_dev_sector(sect); + return 0; + } + + pi = &rs->part[0]; + strlcat(state->pp_buf, " AHDI", PAGE_SIZE); + for (slot = 1; pi < &rs->part[4] && slot < state->limit; slot++, pi++) { + struct rootsector *xrs; + Sector sect2; + ulong partsect; + + if ( !(pi->flg & 1) ) + continue; + /* active partition */ + if (memcmp (pi->id, "XGM", 3) != 0) { + /* we don't care about other id's */ + put_partition (state, slot, be32_to_cpu(pi->st), + be32_to_cpu(pi->siz)); + continue; + } + /* extension partition */ +#ifdef ICD_PARTS + part_fmt = 1; +#endif + strlcat(state->pp_buf, " XGM<", PAGE_SIZE); + partsect = extensect = be32_to_cpu(pi->st); + while (1) { + xrs = read_part_sector(state, partsect, §2); + if (!xrs) { + printk (" block %ld read failed\n", partsect); + put_dev_sector(sect); + return -1; + } + + /* ++roman: sanity check: bit 0 of flg field must be set */ + if (!(xrs->part[0].flg & 1)) { + printk( "\nFirst sub-partition in extended partition is not valid!\n" ); + put_dev_sector(sect2); + break; + } + + put_partition(state, slot, + partsect + be32_to_cpu(xrs->part[0].st), + be32_to_cpu(xrs->part[0].siz)); + + if (!(xrs->part[1].flg & 1)) { + /* end of linked partition list */ + put_dev_sector(sect2); + break; + } + if (memcmp( xrs->part[1].id, "XGM", 3 ) != 0) { + printk("\nID of extended partition is not XGM!\n"); + put_dev_sector(sect2); + break; + } + + partsect = be32_to_cpu(xrs->part[1].st) + extensect; + put_dev_sector(sect2); + if (++slot == state->limit) { + printk( "\nMaximum number of partitions reached!\n" ); + break; + } + } + strlcat(state->pp_buf, " >", PAGE_SIZE); + } +#ifdef ICD_PARTS + if ( part_fmt!=1 ) { /* no extended partitions -> test ICD-format */ + pi = &rs->icdpart[0]; + /* sanity check: no ICD format if first partition invalid */ + if (OK_id(pi->id)) { + strlcat(state->pp_buf, " ICD<", PAGE_SIZE); + for (; pi < &rs->icdpart[8] && slot < state->limit; slot++, pi++) { + /* accept only GEM,BGM,RAW,LNX,SWP partitions */ + if (!((pi->flg & 1) && OK_id(pi->id))) + continue; + part_fmt = 2; + put_partition (state, slot, + be32_to_cpu(pi->st), + be32_to_cpu(pi->siz)); + } + strlcat(state->pp_buf, " >", PAGE_SIZE); + } + } +#endif + put_dev_sector(sect); + + strlcat(state->pp_buf, "\n", PAGE_SIZE); + + return 1; +} diff --git a/block/partitions/atari.h b/block/partitions/atari.h new file mode 100644 index 000000000..f2ec43bfe --- /dev/null +++ b/block/partitions/atari.h @@ -0,0 +1,36 @@ +/* + * fs/partitions/atari.h + * Moved by Russell King from: + * + * linux/include/linux/atari_rootsec.h + * definitions for Atari Rootsector layout + * by Andreas Schwab (schwab@ls5.informatik.uni-dortmund.de) + * + * modified for ICD/Supra partitioning scheme restricted to at most 12 + * partitions + * by Guenther Kelleter (guenther@pool.informatik.rwth-aachen.de) + */ + +#include + +struct partition_info +{ + u8 flg; /* bit 0: active; bit 7: bootable */ + char id[3]; /* "GEM", "BGM", "XGM", or other */ + __be32 st; /* start of partition */ + __be32 siz; /* length of partition */ +}; + +struct rootsector +{ + char unused[0x156]; /* room for boot code */ + struct partition_info icdpart[8]; /* info for ICD-partitions 5..12 */ + char unused2[0xc]; + u32 hd_siz; /* size of disk in blocks */ + struct partition_info part[4]; + u32 bsl_st; /* start of bad sector list */ + u32 bsl_cnt; /* length of bad sector list */ + u16 checksum; /* checksum for bootable disks */ +} __packed; + +int atari_partition(struct parsed_partitions *state); diff --git a/block/partitions/check.c b/block/partitions/check.c new file mode 100644 index 000000000..16118d11d --- /dev/null +++ b/block/partitions/check.c @@ -0,0 +1,197 @@ +/* + * fs/partitions/check.c + * + * Code extracted from drivers/block/genhd.c + * Copyright (C) 1991-1998 Linus Torvalds + * Re-organised Feb 1998 Russell King + * + * We now have independent partition support from the + * block drivers, which allows all the partition code to + * be grouped in one location, and it to be mostly self + * contained. + * + * Added needed MAJORS for new pairs, {hdi,hdj}, {hdk,hdl} + */ + +#include +#include +#include +#include + +#include "check.h" + +#include "acorn.h" +#include "amiga.h" +#include "atari.h" +#include "ldm.h" +#include "mac.h" +#include "msdos.h" +#include "osf.h" +#include "sgi.h" +#include "sun.h" +#include "ibm.h" +#include "ultrix.h" +#include "efi.h" +#include "karma.h" +#include "sysv68.h" +#include "cmdline.h" + +int warn_no_part = 1; /*This is ugly: should make genhd removable media aware*/ + +static int (*check_part[])(struct parsed_partitions *) = { + /* + * Probe partition formats with tables at disk address 0 + * that also have an ADFS boot block at 0xdc0. + */ +#ifdef CONFIG_ACORN_PARTITION_ICS + adfspart_check_ICS, +#endif +#ifdef CONFIG_ACORN_PARTITION_POWERTEC + adfspart_check_POWERTEC, +#endif +#ifdef CONFIG_ACORN_PARTITION_EESOX + adfspart_check_EESOX, +#endif + + /* + * Now move on to formats that only have partition info at + * disk address 0xdc0. Since these may also have stale + * PC/BIOS partition tables, they need to come before + * the msdos entry. + */ +#ifdef CONFIG_ACORN_PARTITION_CUMANA + adfspart_check_CUMANA, +#endif +#ifdef CONFIG_ACORN_PARTITION_ADFS + adfspart_check_ADFS, +#endif + +#ifdef CONFIG_CMDLINE_PARTITION + cmdline_partition, +#endif +#ifdef CONFIG_EFI_PARTITION + efi_partition, /* this must come before msdos */ +#endif +#ifdef CONFIG_SGI_PARTITION + sgi_partition, +#endif +#ifdef CONFIG_LDM_PARTITION + ldm_partition, /* this must come before msdos */ +#endif +#ifdef CONFIG_MSDOS_PARTITION + msdos_partition, +#endif +#ifdef CONFIG_OSF_PARTITION + osf_partition, +#endif +#ifdef CONFIG_SUN_PARTITION + sun_partition, +#endif +#ifdef CONFIG_AMIGA_PARTITION + amiga_partition, +#endif +#ifdef CONFIG_ATARI_PARTITION + atari_partition, +#endif +#ifdef CONFIG_MAC_PARTITION + mac_partition, +#endif +#ifdef CONFIG_ULTRIX_PARTITION + ultrix_partition, +#endif +#ifdef CONFIG_IBM_PARTITION + ibm_partition, +#endif +#ifdef CONFIG_KARMA_PARTITION + karma_partition, +#endif +#ifdef CONFIG_SYSV68_PARTITION + sysv68_partition, +#endif + NULL +}; + +static struct parsed_partitions *allocate_partitions(struct gendisk *hd) +{ + struct parsed_partitions *state; + int nr; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return NULL; + + nr = disk_max_parts(hd); + state->parts = vzalloc(nr * sizeof(state->parts[0])); + if (!state->parts) { + kfree(state); + return NULL; + } + + state->limit = nr; + + return state; +} + +void free_partitions(struct parsed_partitions *state) +{ + vfree(state->parts); + kfree(state); +} + +struct parsed_partitions * +check_partition(struct gendisk *hd, struct block_device *bdev) +{ + struct parsed_partitions *state; + int i, res, err; + + state = allocate_partitions(hd); + if (!state) + return NULL; + state->pp_buf = (char *)__get_free_page(GFP_KERNEL); + if (!state->pp_buf) { + free_partitions(state); + return NULL; + } + state->pp_buf[0] = '\0'; + + state->bdev = bdev; + disk_name(hd, 0, state->name); + snprintf(state->pp_buf, PAGE_SIZE, " %s:", state->name); + if (isdigit(state->name[strlen(state->name)-1])) + sprintf(state->name, "p"); + + i = res = err = 0; + while (!res && check_part[i]) { + memset(state->parts, 0, state->limit * sizeof(state->parts[0])); + res = check_part[i++](state); + if (res < 0) { + /* We have hit an I/O error which we don't report now. + * But record it, and let the others do their job. + */ + err = res; + res = 0; + } + + } + if (res > 0) { + printk(KERN_INFO "%s", state->pp_buf); + + free_page((unsigned long)state->pp_buf); + return state; + } + if (state->access_beyond_eod) + err = -ENOSPC; + if (err) + /* The partition is unrecognized. So report I/O errors if there were any */ + res = err; + if (res) { + if (warn_no_part) + strlcat(state->pp_buf, + " unable to read partition table\n", PAGE_SIZE); + printk(KERN_INFO "%s", state->pp_buf); + } + + free_page((unsigned long)state->pp_buf); + free_partitions(state); + return ERR_PTR(res); +} diff --git a/block/partitions/check.h b/block/partitions/check.h new file mode 100644 index 000000000..eade17ea9 --- /dev/null +++ b/block/partitions/check.h @@ -0,0 +1,54 @@ +#include +#include +#include + +/* + * add_gd_partition adds a partitions details to the devices partition + * description. + */ +struct parsed_partitions { + struct block_device *bdev; + char name[BDEVNAME_SIZE]; + struct { + sector_t from; + sector_t size; + int flags; + bool has_info; + struct partition_meta_info info; + } *parts; + int next; + int limit; + bool access_beyond_eod; + char *pp_buf; +}; + +void free_partitions(struct parsed_partitions *state); + +struct parsed_partitions * +check_partition(struct gendisk *, struct block_device *); + +static inline void *read_part_sector(struct parsed_partitions *state, + sector_t n, Sector *p) +{ + if (n >= get_capacity(state->bdev->bd_disk)) { + state->access_beyond_eod = true; + return NULL; + } + return read_dev_sector(state->bdev, n, p); +} + +static inline void +put_partition(struct parsed_partitions *p, int n, sector_t from, sector_t size) +{ + if (n < p->limit) { + char tmp[1 + BDEVNAME_SIZE + 10 + 1]; + + p->parts[n].from = from; + p->parts[n].size = size; + snprintf(tmp, sizeof(tmp), " %s%d", p->name, n); + strlcat(p->pp_buf, tmp, PAGE_SIZE); + } +} + +extern int warn_no_part; + diff --git a/block/partitions/cmdline.c b/block/partitions/cmdline.c new file mode 100644 index 000000000..5141b563a --- /dev/null +++ b/block/partitions/cmdline.c @@ -0,0 +1,99 @@ +/* + * Copyright (C) 2013 HUAWEI + * Author: Cai Zhiyong + * + * Read block device partition table from the command line. + * Typically used for fixed block (eMMC) embedded devices. + * It has no MBR, so saves storage space. Bootloader can be easily accessed + * by absolute address of data on the block device. + * Users can easily change the partition. + * + * The format for the command line is just like mtdparts. + * + * For further information, see "Documentation/block/cmdline-partition.txt" + * + */ + +#include + +#include "check.h" +#include "cmdline.h" + +static char *cmdline; +static struct cmdline_parts *bdev_parts; + +static int add_part(int slot, struct cmdline_subpart *subpart, void *param) +{ + int label_min; + struct partition_meta_info *info; + char tmp[sizeof(info->volname) + 4]; + struct parsed_partitions *state = (struct parsed_partitions *)param; + + if (slot >= state->limit) + return 1; + + put_partition(state, slot, subpart->from >> 9, + subpart->size >> 9); + + info = &state->parts[slot].info; + + label_min = min_t(int, sizeof(info->volname) - 1, + sizeof(subpart->name)); + strncpy(info->volname, subpart->name, label_min); + info->volname[label_min] = '\0'; + + snprintf(tmp, sizeof(tmp), "(%s)", info->volname); + strlcat(state->pp_buf, tmp, PAGE_SIZE); + + state->parts[slot].has_info = true; + + return 0; +} + +static int __init cmdline_parts_setup(char *s) +{ + cmdline = s; + return 1; +} +__setup("blkdevparts=", cmdline_parts_setup); + +/* + * Purpose: allocate cmdline partitions. + * Returns: + * -1 if unable to read the partition table + * 0 if this isn't our partition table + * 1 if successful + */ +int cmdline_partition(struct parsed_partitions *state) +{ + sector_t disk_size; + char bdev[BDEVNAME_SIZE]; + struct cmdline_parts *parts; + + if (cmdline) { + if (bdev_parts) + cmdline_parts_free(&bdev_parts); + + if (cmdline_parts_parse(&bdev_parts, cmdline)) { + cmdline = NULL; + return -1; + } + cmdline = NULL; + } + + if (!bdev_parts) + return 0; + + bdevname(state->bdev, bdev); + parts = cmdline_parts_find(bdev_parts, bdev); + if (!parts) + return 0; + + disk_size = get_capacity(state->bdev->bd_disk) << 9; + + cmdline_parts_set(parts, disk_size, 1, add_part, (void *)state); + + strlcat(state->pp_buf, "\n", PAGE_SIZE); + + return 1; +} diff --git a/block/partitions/cmdline.h b/block/partitions/cmdline.h new file mode 100644 index 000000000..26e0f8da1 --- /dev/null +++ b/block/partitions/cmdline.h @@ -0,0 +1,2 @@ + +int cmdline_partition(struct parsed_partitions *state); diff --git a/block/partitions/efi.c b/block/partitions/efi.c new file mode 100644 index 000000000..26cb624ac --- /dev/null +++ b/block/partitions/efi.c @@ -0,0 +1,737 @@ +/************************************************************ + * EFI GUID Partition Table handling + * + * http://www.uefi.org/specs/ + * http://www.intel.com/technology/efi/ + * + * efi.[ch] by Matt Domsch + * Copyright 2000,2001,2002,2004 Dell Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * + * TODO: + * + * Changelog: + * Mon August 5th, 2013 Davidlohr Bueso + * - detect hybrid MBRs, tighter pMBR checking & cleanups. + * + * Mon Nov 09 2004 Matt Domsch + * - test for valid PMBR and valid PGPT before ever reading + * AGPT, allow override with 'gpt' kernel command line option. + * - check for first/last_usable_lba outside of size of disk + * + * Tue Mar 26 2002 Matt Domsch + * - Ported to 2.5.7-pre1 and 2.5.7-dj2 + * - Applied patch to avoid fault in alternate header handling + * - cleaned up find_valid_gpt + * - On-disk structure and copy in memory is *always* LE now - + * swab fields as needed + * - remove print_gpt_header() + * - only use first max_p partition entries, to keep the kernel minor number + * and partition numbers tied. + * + * Mon Feb 04 2002 Matt Domsch + * - Removed __PRIPTR_PREFIX - not being used + * + * Mon Jan 14 2002 Matt Domsch + * - Ported to 2.5.2-pre11 + library crc32 patch Linus applied + * + * Thu Dec 6 2001 Matt Domsch + * - Added compare_gpts(). + * - moved le_efi_guid_to_cpus() back into this file. GPT is the only + * thing that keeps EFI GUIDs on disk. + * - Changed gpt structure names and members to be simpler and more Linux-like. + * + * Wed Oct 17 2001 Matt Domsch + * - Removed CONFIG_DEVFS_VOLUMES_UUID code entirely per Martin Wilck + * + * Wed Oct 10 2001 Matt Domsch + * - Changed function comments to DocBook style per Andreas Dilger suggestion. + * + * Mon Oct 08 2001 Matt Domsch + * - Change read_lba() to use the page cache per Al Viro's work. + * - print u64s properly on all architectures + * - fixed debug_printk(), now Dprintk() + * + * Mon Oct 01 2001 Matt Domsch + * - Style cleanups + * - made most functions static + * - Endianness addition + * - remove test for second alternate header, as it's not per spec, + * and is unnecessary. There's now a method to read/write the last + * sector of an odd-sized disk from user space. No tools have ever + * been released which used this code, so it's effectively dead. + * - Per Asit Mallick of Intel, added a test for a valid PMBR. + * - Added kernel command line option 'gpt' to override valid PMBR test. + * + * Wed Jun 6 2001 Martin Wilck + * - added devfs volume UUID support (/dev/volumes/uuids) for + * mounting file systems by the partition GUID. + * + * Tue Dec 5 2000 Matt Domsch + * - Moved crc32() to linux/lib, added efi_crc32(). + * + * Thu Nov 30 2000 Matt Domsch + * - Replaced Intel's CRC32 function with an equivalent + * non-license-restricted version. + * + * Wed Oct 25 2000 Matt Domsch + * - Fixed the last_lba() call to return the proper last block + * + * Thu Oct 12 2000 Matt Domsch + * - Thanks to Andries Brouwer for his debugging assistance. + * - Code works, detects all the partitions. + * + ************************************************************/ +#include +#include +#include +#include +#include +#include "check.h" +#include "efi.h" + +/* This allows a kernel command line option 'gpt' to override + * the test for invalid PMBR. Not __initdata because reloading + * the partition tables happens after init too. + */ +static int force_gpt; +static int __init +force_gpt_fn(char *str) +{ + force_gpt = 1; + return 1; +} +__setup("gpt", force_gpt_fn); + + +/** + * efi_crc32() - EFI version of crc32 function + * @buf: buffer to calculate crc32 of + * @len: length of buf + * + * Description: Returns EFI-style CRC32 value for @buf + * + * This function uses the little endian Ethernet polynomial + * but seeds the function with ~0, and xor's with ~0 at the end. + * Note, the EFI Specification, v1.02, has a reference to + * Dr. Dobbs Journal, May 1994 (actually it's in May 1992). + */ +static inline u32 +efi_crc32(const void *buf, unsigned long len) +{ + return (crc32(~0L, buf, len) ^ ~0L); +} + +/** + * last_lba(): return number of last logical block of device + * @bdev: block device + * + * Description: Returns last LBA value on success, 0 on error. + * This is stored (by sd and ide-geometry) in + * the part[0] entry for this disk, and is the number of + * physical sectors available on the disk. + */ +static u64 last_lba(struct block_device *bdev) +{ + if (!bdev || !bdev->bd_inode) + return 0; + return div_u64(bdev->bd_inode->i_size, + bdev_logical_block_size(bdev)) - 1ULL; +} + +static inline int pmbr_part_valid(gpt_mbr_record *part) +{ + if (part->os_type != EFI_PMBR_OSTYPE_EFI_GPT) + goto invalid; + + /* set to 0x00000001 (i.e., the LBA of the GPT Partition Header) */ + if (le32_to_cpu(part->starting_lba) != GPT_PRIMARY_PARTITION_TABLE_LBA) + goto invalid; + + return GPT_MBR_PROTECTIVE; +invalid: + return 0; +} + +/** + * is_pmbr_valid(): test Protective MBR for validity + * @mbr: pointer to a legacy mbr structure + * @total_sectors: amount of sectors in the device + * + * Description: Checks for a valid protective or hybrid + * master boot record (MBR). The validity of a pMBR depends + * on all of the following properties: + * 1) MSDOS signature is in the last two bytes of the MBR + * 2) One partition of type 0xEE is found + * + * In addition, a hybrid MBR will have up to three additional + * primary partitions, which point to the same space that's + * marked out by up to three GPT partitions. + * + * Returns 0 upon invalid MBR, or GPT_MBR_PROTECTIVE or + * GPT_MBR_HYBRID depending on the device layout. + */ +static int is_pmbr_valid(legacy_mbr *mbr, sector_t total_sectors) +{ + uint32_t sz = 0; + int i, part = 0, ret = 0; /* invalid by default */ + + if (!mbr || le16_to_cpu(mbr->signature) != MSDOS_MBR_SIGNATURE) + goto done; + + for (i = 0; i < 4; i++) { + ret = pmbr_part_valid(&mbr->partition_record[i]); + if (ret == GPT_MBR_PROTECTIVE) { + part = i; + /* + * Ok, we at least know that there's a protective MBR, + * now check if there are other partition types for + * hybrid MBR. + */ + goto check_hybrid; + } + } + + if (ret != GPT_MBR_PROTECTIVE) + goto done; +check_hybrid: + for (i = 0; i < 4; i++) + if ((mbr->partition_record[i].os_type != + EFI_PMBR_OSTYPE_EFI_GPT) && + (mbr->partition_record[i].os_type != 0x00)) + ret = GPT_MBR_HYBRID; + + /* + * Protective MBRs take up the lesser of the whole disk + * or 2 TiB (32bit LBA), ignoring the rest of the disk. + * Some partitioning programs, nonetheless, choose to set + * the size to the maximum 32-bit limitation, disregarding + * the disk size. + * + * Hybrid MBRs do not necessarily comply with this. + * + * Consider a bad value here to be a warning to support dd'ing + * an image from a smaller disk to a larger disk. + */ + if (ret == GPT_MBR_PROTECTIVE) { + sz = le32_to_cpu(mbr->partition_record[part].size_in_lba); + if (sz != (uint32_t) total_sectors - 1 && sz != 0xFFFFFFFF) + pr_debug("GPT: mbr size in lba (%u) different than whole disk (%u).\n", + sz, min_t(uint32_t, + total_sectors - 1, 0xFFFFFFFF)); + } +done: + return ret; +} + +/** + * read_lba(): Read bytes from disk, starting at given LBA + * @state: disk parsed partitions + * @lba: the Logical Block Address of the partition table + * @buffer: destination buffer + * @count: bytes to read + * + * Description: Reads @count bytes from @state->bdev into @buffer. + * Returns number of bytes read on success, 0 on error. + */ +static size_t read_lba(struct parsed_partitions *state, + u64 lba, u8 *buffer, size_t count) +{ + size_t totalreadcount = 0; + struct block_device *bdev = state->bdev; + sector_t n = lba * (bdev_logical_block_size(bdev) / 512); + + if (!buffer || lba > last_lba(bdev)) + return 0; + + while (count) { + int copied = 512; + Sector sect; + unsigned char *data = read_part_sector(state, n++, §); + if (!data) + break; + if (copied > count) + copied = count; + memcpy(buffer, data, copied); + put_dev_sector(sect); + buffer += copied; + totalreadcount +=copied; + count -= copied; + } + return totalreadcount; +} + +/** + * alloc_read_gpt_entries(): reads partition entries from disk + * @state: disk parsed partitions + * @gpt: GPT header + * + * Description: Returns ptes on success, NULL on error. + * Allocates space for PTEs based on information found in @gpt. + * Notes: remember to free pte when you're done! + */ +static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state, + gpt_header *gpt) +{ + size_t count; + gpt_entry *pte; + + if (!gpt) + return NULL; + + count = le32_to_cpu(gpt->num_partition_entries) * + le32_to_cpu(gpt->sizeof_partition_entry); + if (!count) + return NULL; + pte = kmalloc(count, GFP_KERNEL); + if (!pte) + return NULL; + + if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba), + (u8 *) pte, count) < count) { + kfree(pte); + pte=NULL; + return NULL; + } + return pte; +} + +/** + * alloc_read_gpt_header(): Allocates GPT header, reads into it from disk + * @state: disk parsed partitions + * @lba: the Logical Block Address of the partition table + * + * Description: returns GPT header on success, NULL on error. Allocates + * and fills a GPT header starting at @ from @state->bdev. + * Note: remember to free gpt when finished with it. + */ +static gpt_header *alloc_read_gpt_header(struct parsed_partitions *state, + u64 lba) +{ + gpt_header *gpt; + unsigned ssz = bdev_logical_block_size(state->bdev); + + gpt = kmalloc(ssz, GFP_KERNEL); + if (!gpt) + return NULL; + + if (read_lba(state, lba, (u8 *) gpt, ssz) < ssz) { + kfree(gpt); + gpt=NULL; + return NULL; + } + + return gpt; +} + +/** + * is_gpt_valid() - tests one GPT header and PTEs for validity + * @state: disk parsed partitions + * @lba: logical block address of the GPT header to test + * @gpt: GPT header ptr, filled on return. + * @ptes: PTEs ptr, filled on return. + * + * Description: returns 1 if valid, 0 on error. + * If valid, returns pointers to newly allocated GPT header and PTEs. + */ +static int is_gpt_valid(struct parsed_partitions *state, u64 lba, + gpt_header **gpt, gpt_entry **ptes) +{ + u32 crc, origcrc; + u64 lastlba; + + if (!ptes) + return 0; + if (!(*gpt = alloc_read_gpt_header(state, lba))) + return 0; + + /* Check the GUID Partition Table signature */ + if (le64_to_cpu((*gpt)->signature) != GPT_HEADER_SIGNATURE) { + pr_debug("GUID Partition Table Header signature is wrong:" + "%lld != %lld\n", + (unsigned long long)le64_to_cpu((*gpt)->signature), + (unsigned long long)GPT_HEADER_SIGNATURE); + goto fail; + } + + /* Check the GUID Partition Table header size is too big */ + if (le32_to_cpu((*gpt)->header_size) > + bdev_logical_block_size(state->bdev)) { + pr_debug("GUID Partition Table Header size is too large: %u > %u\n", + le32_to_cpu((*gpt)->header_size), + bdev_logical_block_size(state->bdev)); + goto fail; + } + + /* Check the GUID Partition Table header size is too small */ + if (le32_to_cpu((*gpt)->header_size) < sizeof(gpt_header)) { + pr_debug("GUID Partition Table Header size is too small: %u < %zu\n", + le32_to_cpu((*gpt)->header_size), + sizeof(gpt_header)); + goto fail; + } + + /* Check the GUID Partition Table CRC */ + origcrc = le32_to_cpu((*gpt)->header_crc32); + (*gpt)->header_crc32 = 0; + crc = efi_crc32((const unsigned char *) (*gpt), le32_to_cpu((*gpt)->header_size)); + + if (crc != origcrc) { + pr_debug("GUID Partition Table Header CRC is wrong: %x != %x\n", + crc, origcrc); + goto fail; + } + (*gpt)->header_crc32 = cpu_to_le32(origcrc); + + /* Check that the my_lba entry points to the LBA that contains + * the GUID Partition Table */ + if (le64_to_cpu((*gpt)->my_lba) != lba) { + pr_debug("GPT my_lba incorrect: %lld != %lld\n", + (unsigned long long)le64_to_cpu((*gpt)->my_lba), + (unsigned long long)lba); + goto fail; + } + + /* Check the first_usable_lba and last_usable_lba are + * within the disk. + */ + lastlba = last_lba(state->bdev); + if (le64_to_cpu((*gpt)->first_usable_lba) > lastlba) { + pr_debug("GPT: first_usable_lba incorrect: %lld > %lld\n", + (unsigned long long)le64_to_cpu((*gpt)->first_usable_lba), + (unsigned long long)lastlba); + goto fail; + } + if (le64_to_cpu((*gpt)->last_usable_lba) > lastlba) { + pr_debug("GPT: last_usable_lba incorrect: %lld > %lld\n", + (unsigned long long)le64_to_cpu((*gpt)->last_usable_lba), + (unsigned long long)lastlba); + goto fail; + } + if (le64_to_cpu((*gpt)->last_usable_lba) < le64_to_cpu((*gpt)->first_usable_lba)) { + pr_debug("GPT: last_usable_lba incorrect: %lld > %lld\n", + (unsigned long long)le64_to_cpu((*gpt)->last_usable_lba), + (unsigned long long)le64_to_cpu((*gpt)->first_usable_lba)); + goto fail; + } + /* Check that sizeof_partition_entry has the correct value */ + if (le32_to_cpu((*gpt)->sizeof_partition_entry) != sizeof(gpt_entry)) { + pr_debug("GUID Partitition Entry Size check failed.\n"); + goto fail; + } + + if (!(*ptes = alloc_read_gpt_entries(state, *gpt))) + goto fail; + + /* Check the GUID Partition Entry Array CRC */ + crc = efi_crc32((const unsigned char *) (*ptes), + le32_to_cpu((*gpt)->num_partition_entries) * + le32_to_cpu((*gpt)->sizeof_partition_entry)); + + if (crc != le32_to_cpu((*gpt)->partition_entry_array_crc32)) { + pr_debug("GUID Partitition Entry Array CRC check failed.\n"); + goto fail_ptes; + } + + /* We're done, all's well */ + return 1; + + fail_ptes: + kfree(*ptes); + *ptes = NULL; + fail: + kfree(*gpt); + *gpt = NULL; + return 0; +} + +/** + * is_pte_valid() - tests one PTE for validity + * @pte:pte to check + * @lastlba: last lba of the disk + * + * Description: returns 1 if valid, 0 on error. + */ +static inline int +is_pte_valid(const gpt_entry *pte, const u64 lastlba) +{ + if ((!efi_guidcmp(pte->partition_type_guid, NULL_GUID)) || + le64_to_cpu(pte->starting_lba) > lastlba || + le64_to_cpu(pte->ending_lba) > lastlba) + return 0; + return 1; +} + +/** + * compare_gpts() - Search disk for valid GPT headers and PTEs + * @pgpt: primary GPT header + * @agpt: alternate GPT header + * @lastlba: last LBA number + * + * Description: Returns nothing. Sanity checks pgpt and agpt fields + * and prints warnings on discrepancies. + * + */ +static void +compare_gpts(gpt_header *pgpt, gpt_header *agpt, u64 lastlba) +{ + int error_found = 0; + if (!pgpt || !agpt) + return; + if (le64_to_cpu(pgpt->my_lba) != le64_to_cpu(agpt->alternate_lba)) { + pr_warn("GPT:Primary header LBA != Alt. header alternate_lba\n"); + pr_warn("GPT:%lld != %lld\n", + (unsigned long long)le64_to_cpu(pgpt->my_lba), + (unsigned long long)le64_to_cpu(agpt->alternate_lba)); + error_found++; + } + if (le64_to_cpu(pgpt->alternate_lba) != le64_to_cpu(agpt->my_lba)) { + pr_warn("GPT:Primary header alternate_lba != Alt. header my_lba\n"); + pr_warn("GPT:%lld != %lld\n", + (unsigned long long)le64_to_cpu(pgpt->alternate_lba), + (unsigned long long)le64_to_cpu(agpt->my_lba)); + error_found++; + } + if (le64_to_cpu(pgpt->first_usable_lba) != + le64_to_cpu(agpt->first_usable_lba)) { + pr_warn("GPT:first_usable_lbas don't match.\n"); + pr_warn("GPT:%lld != %lld\n", + (unsigned long long)le64_to_cpu(pgpt->first_usable_lba), + (unsigned long long)le64_to_cpu(agpt->first_usable_lba)); + error_found++; + } + if (le64_to_cpu(pgpt->last_usable_lba) != + le64_to_cpu(agpt->last_usable_lba)) { + pr_warn("GPT:last_usable_lbas don't match.\n"); + pr_warn("GPT:%lld != %lld\n", + (unsigned long long)le64_to_cpu(pgpt->last_usable_lba), + (unsigned long long)le64_to_cpu(agpt->last_usable_lba)); + error_found++; + } + if (efi_guidcmp(pgpt->disk_guid, agpt->disk_guid)) { + pr_warn("GPT:disk_guids don't match.\n"); + error_found++; + } + if (le32_to_cpu(pgpt->num_partition_entries) != + le32_to_cpu(agpt->num_partition_entries)) { + pr_warn("GPT:num_partition_entries don't match: " + "0x%x != 0x%x\n", + le32_to_cpu(pgpt->num_partition_entries), + le32_to_cpu(agpt->num_partition_entries)); + error_found++; + } + if (le32_to_cpu(pgpt->sizeof_partition_entry) != + le32_to_cpu(agpt->sizeof_partition_entry)) { + pr_warn("GPT:sizeof_partition_entry values don't match: " + "0x%x != 0x%x\n", + le32_to_cpu(pgpt->sizeof_partition_entry), + le32_to_cpu(agpt->sizeof_partition_entry)); + error_found++; + } + if (le32_to_cpu(pgpt->partition_entry_array_crc32) != + le32_to_cpu(agpt->partition_entry_array_crc32)) { + pr_warn("GPT:partition_entry_array_crc32 values don't match: " + "0x%x != 0x%x\n", + le32_to_cpu(pgpt->partition_entry_array_crc32), + le32_to_cpu(agpt->partition_entry_array_crc32)); + error_found++; + } + if (le64_to_cpu(pgpt->alternate_lba) != lastlba) { + pr_warn("GPT:Primary header thinks Alt. header is not at the end of the disk.\n"); + pr_warn("GPT:%lld != %lld\n", + (unsigned long long)le64_to_cpu(pgpt->alternate_lba), + (unsigned long long)lastlba); + error_found++; + } + + if (le64_to_cpu(agpt->my_lba) != lastlba) { + pr_warn("GPT:Alternate GPT header not at the end of the disk.\n"); + pr_warn("GPT:%lld != %lld\n", + (unsigned long long)le64_to_cpu(agpt->my_lba), + (unsigned long long)lastlba); + error_found++; + } + + if (error_found) + pr_warn("GPT: Use GNU Parted to correct GPT errors.\n"); + return; +} + +/** + * find_valid_gpt() - Search disk for valid GPT headers and PTEs + * @state: disk parsed partitions + * @gpt: GPT header ptr, filled on return. + * @ptes: PTEs ptr, filled on return. + * + * Description: Returns 1 if valid, 0 on error. + * If valid, returns pointers to newly allocated GPT header and PTEs. + * Validity depends on PMBR being valid (or being overridden by the + * 'gpt' kernel command line option) and finding either the Primary + * GPT header and PTEs valid, or the Alternate GPT header and PTEs + * valid. If the Primary GPT header is not valid, the Alternate GPT header + * is not checked unless the 'gpt' kernel command line option is passed. + * This protects against devices which misreport their size, and forces + * the user to decide to use the Alternate GPT. + */ +static int find_valid_gpt(struct parsed_partitions *state, gpt_header **gpt, + gpt_entry **ptes) +{ + int good_pgpt = 0, good_agpt = 0, good_pmbr = 0; + gpt_header *pgpt = NULL, *agpt = NULL; + gpt_entry *pptes = NULL, *aptes = NULL; + legacy_mbr *legacymbr; + sector_t total_sectors = i_size_read(state->bdev->bd_inode) >> 9; + u64 lastlba; + + if (!ptes) + return 0; + + lastlba = last_lba(state->bdev); + if (!force_gpt) { + /* This will be added to the EFI Spec. per Intel after v1.02. */ + legacymbr = kzalloc(sizeof(*legacymbr), GFP_KERNEL); + if (!legacymbr) + goto fail; + + read_lba(state, 0, (u8 *)legacymbr, sizeof(*legacymbr)); + good_pmbr = is_pmbr_valid(legacymbr, total_sectors); + kfree(legacymbr); + + if (!good_pmbr) + goto fail; + + pr_debug("Device has a %s MBR\n", + good_pmbr == GPT_MBR_PROTECTIVE ? + "protective" : "hybrid"); + } + + good_pgpt = is_gpt_valid(state, GPT_PRIMARY_PARTITION_TABLE_LBA, + &pgpt, &pptes); + if (good_pgpt) + good_agpt = is_gpt_valid(state, + le64_to_cpu(pgpt->alternate_lba), + &agpt, &aptes); + if (!good_agpt && force_gpt) + good_agpt = is_gpt_valid(state, lastlba, &agpt, &aptes); + + /* The obviously unsuccessful case */ + if (!good_pgpt && !good_agpt) + goto fail; + + compare_gpts(pgpt, agpt, lastlba); + + /* The good cases */ + if (good_pgpt) { + *gpt = pgpt; + *ptes = pptes; + kfree(agpt); + kfree(aptes); + if (!good_agpt) + pr_warn("Alternate GPT is invalid, using primary GPT.\n"); + return 1; + } + else if (good_agpt) { + *gpt = agpt; + *ptes = aptes; + kfree(pgpt); + kfree(pptes); + pr_warn("Primary GPT is invalid, using alternate GPT.\n"); + return 1; + } + + fail: + kfree(pgpt); + kfree(agpt); + kfree(pptes); + kfree(aptes); + *gpt = NULL; + *ptes = NULL; + return 0; +} + +/** + * efi_partition(struct parsed_partitions *state) + * @state: disk parsed partitions + * + * Description: called from check.c, if the disk contains GPT + * partitions, sets up partition entries in the kernel. + * + * If the first block on the disk is a legacy MBR, + * it will get handled by msdos_partition(). + * If it's a Protective MBR, we'll handle it here. + * + * We do not create a Linux partition for GPT, but + * only for the actual data partitions. + * Returns: + * -1 if unable to read the partition table + * 0 if this isn't our partition table + * 1 if successful + * + */ +int efi_partition(struct parsed_partitions *state) +{ + gpt_header *gpt = NULL; + gpt_entry *ptes = NULL; + u32 i; + unsigned ssz = bdev_logical_block_size(state->bdev) / 512; + + if (!find_valid_gpt(state, &gpt, &ptes) || !gpt || !ptes) { + kfree(gpt); + kfree(ptes); + return 0; + } + + pr_debug("GUID Partition Table is valid! Yea!\n"); + + for (i = 0; i < le32_to_cpu(gpt->num_partition_entries) && i < state->limit-1; i++) { + struct partition_meta_info *info; + unsigned label_count = 0; + unsigned label_max; + u64 start = le64_to_cpu(ptes[i].starting_lba); + u64 size = le64_to_cpu(ptes[i].ending_lba) - + le64_to_cpu(ptes[i].starting_lba) + 1ULL; + + if (!is_pte_valid(&ptes[i], last_lba(state->bdev))) + continue; + + put_partition(state, i+1, start * ssz, size * ssz); + + /* If this is a RAID volume, tell md */ + if (!efi_guidcmp(ptes[i].partition_type_guid, PARTITION_LINUX_RAID_GUID)) + state->parts[i + 1].flags = ADDPART_FLAG_RAID; + + info = &state->parts[i + 1].info; + efi_guid_to_str(&ptes[i].unique_partition_guid, info->uuid); + + /* Naively convert UTF16-LE to 7 bits. */ + label_max = min(ARRAY_SIZE(info->volname) - 1, + ARRAY_SIZE(ptes[i].partition_name)); + info->volname[label_max] = 0; + while (label_count < label_max) { + u8 c = ptes[i].partition_name[label_count] & 0xff; + if (c && !isprint(c)) + c = '!'; + info->volname[label_count] = c; + label_count++; + } + state->parts[i + 1].has_info = true; + } + kfree(ptes); + kfree(gpt); + strlcat(state->pp_buf, "\n", PAGE_SIZE); + return 1; +} diff --git a/block/partitions/efi.h b/block/partitions/efi.h new file mode 100644 index 000000000..abd0b1928 --- /dev/null +++ b/block/partitions/efi.h @@ -0,0 +1,133 @@ +/************************************************************ + * EFI GUID Partition Table + * Per Intel EFI Specification v1.02 + * http://developer.intel.com/technology/efi/efi.htm + * + * By Matt Domsch Fri Sep 22 22:15:56 CDT 2000 + * Copyright 2000,2001 Dell Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + ************************************************************/ + +#ifndef FS_PART_EFI_H_INCLUDED +#define FS_PART_EFI_H_INCLUDED + +#include +#include +#include +#include +#include +#include +#include +#include + +#define MSDOS_MBR_SIGNATURE 0xaa55 +#define EFI_PMBR_OSTYPE_EFI 0xEF +#define EFI_PMBR_OSTYPE_EFI_GPT 0xEE + +#define GPT_MBR_PROTECTIVE 1 +#define GPT_MBR_HYBRID 2 + +#define GPT_HEADER_SIGNATURE 0x5452415020494645ULL +#define GPT_HEADER_REVISION_V1 0x00010000 +#define GPT_PRIMARY_PARTITION_TABLE_LBA 1 + +#define PARTITION_SYSTEM_GUID \ + EFI_GUID( 0xC12A7328, 0xF81F, 0x11d2, \ + 0xBA, 0x4B, 0x00, 0xA0, 0xC9, 0x3E, 0xC9, 0x3B) +#define LEGACY_MBR_PARTITION_GUID \ + EFI_GUID( 0x024DEE41, 0x33E7, 0x11d3, \ + 0x9D, 0x69, 0x00, 0x08, 0xC7, 0x81, 0xF3, 0x9F) +#define PARTITION_MSFT_RESERVED_GUID \ + EFI_GUID( 0xE3C9E316, 0x0B5C, 0x4DB8, \ + 0x81, 0x7D, 0xF9, 0x2D, 0xF0, 0x02, 0x15, 0xAE) +#define PARTITION_BASIC_DATA_GUID \ + EFI_GUID( 0xEBD0A0A2, 0xB9E5, 0x4433, \ + 0x87, 0xC0, 0x68, 0xB6, 0xB7, 0x26, 0x99, 0xC7) +#define PARTITION_LINUX_RAID_GUID \ + EFI_GUID( 0xa19d880f, 0x05fc, 0x4d3b, \ + 0xa0, 0x06, 0x74, 0x3f, 0x0f, 0x84, 0x91, 0x1e) +#define PARTITION_LINUX_SWAP_GUID \ + EFI_GUID( 0x0657fd6d, 0xa4ab, 0x43c4, \ + 0x84, 0xe5, 0x09, 0x33, 0xc8, 0x4b, 0x4f, 0x4f) +#define PARTITION_LINUX_LVM_GUID \ + EFI_GUID( 0xe6d6d379, 0xf507, 0x44c2, \ + 0xa2, 0x3c, 0x23, 0x8f, 0x2a, 0x3d, 0xf9, 0x28) + +typedef struct _gpt_header { + __le64 signature; + __le32 revision; + __le32 header_size; + __le32 header_crc32; + __le32 reserved1; + __le64 my_lba; + __le64 alternate_lba; + __le64 first_usable_lba; + __le64 last_usable_lba; + efi_guid_t disk_guid; + __le64 partition_entry_lba; + __le32 num_partition_entries; + __le32 sizeof_partition_entry; + __le32 partition_entry_array_crc32; + + /* The rest of the logical block is reserved by UEFI and must be zero. + * EFI standard handles this by: + * + * uint8_t reserved2[ BlockSize - 92 ]; + */ +} __packed gpt_header; + +typedef struct _gpt_entry_attributes { + u64 required_to_function:1; + u64 reserved:47; + u64 type_guid_specific:16; +} __packed gpt_entry_attributes; + +typedef struct _gpt_entry { + efi_guid_t partition_type_guid; + efi_guid_t unique_partition_guid; + __le64 starting_lba; + __le64 ending_lba; + gpt_entry_attributes attributes; + efi_char16_t partition_name[72 / sizeof (efi_char16_t)]; +} __packed gpt_entry; + +typedef struct _gpt_mbr_record { + u8 boot_indicator; /* unused by EFI, set to 0x80 for bootable */ + u8 start_head; /* unused by EFI, pt start in CHS */ + u8 start_sector; /* unused by EFI, pt start in CHS */ + u8 start_track; + u8 os_type; /* EFI and legacy non-EFI OS types */ + u8 end_head; /* unused by EFI, pt end in CHS */ + u8 end_sector; /* unused by EFI, pt end in CHS */ + u8 end_track; /* unused by EFI, pt end in CHS */ + __le32 starting_lba; /* used by EFI - start addr of the on disk pt */ + __le32 size_in_lba; /* used by EFI - size of pt in LBA */ +} __packed gpt_mbr_record; + + +typedef struct _legacy_mbr { + u8 boot_code[440]; + __le32 unique_mbr_signature; + __le16 unknown; + gpt_mbr_record partition_record[4]; + __le16 signature; +} __packed legacy_mbr; + +/* Functions */ +extern int efi_partition(struct parsed_partitions *state); + +#endif diff --git a/block/partitions/ibm.c b/block/partitions/ibm.c new file mode 100644 index 000000000..47a61474e --- /dev/null +++ b/block/partitions/ibm.c @@ -0,0 +1,364 @@ +/* + * Author(s)......: Holger Smolinski + * Volker Sameske + * Bugreports.to..: + * Copyright IBM Corp. 1999, 2012 + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "check.h" +#include "ibm.h" + + +union label_t { + struct vtoc_volume_label_cdl vol; + struct vtoc_volume_label_ldl lnx; + struct vtoc_cms_label cms; +}; + +/* + * compute the block number from a + * cyl-cyl-head-head structure + */ +static sector_t cchh2blk(struct vtoc_cchh *ptr, struct hd_geometry *geo) +{ + sector_t cyl; + __u16 head; + + /* decode cylinder and heads for large volumes */ + cyl = ptr->hh & 0xFFF0; + cyl <<= 12; + cyl |= ptr->cc; + head = ptr->hh & 0x000F; + return cyl * geo->heads * geo->sectors + + head * geo->sectors; +} + +/* + * compute the block number from a + * cyl-cyl-head-head-block structure + */ +static sector_t cchhb2blk(struct vtoc_cchhb *ptr, struct hd_geometry *geo) +{ + sector_t cyl; + __u16 head; + + /* decode cylinder and heads for large volumes */ + cyl = ptr->hh & 0xFFF0; + cyl <<= 12; + cyl |= ptr->cc; + head = ptr->hh & 0x000F; + return cyl * geo->heads * geo->sectors + + head * geo->sectors + + ptr->b; +} + +static int find_label(struct parsed_partitions *state, + dasd_information2_t *info, + struct hd_geometry *geo, + int blocksize, + sector_t *labelsect, + char name[], + char type[], + union label_t *label) +{ + Sector sect; + unsigned char *data; + sector_t testsect[3]; + unsigned char temp[5]; + int found = 0; + int i, testcount; + + /* There a three places where we may find a valid label: + * - on an ECKD disk it's block 2 + * - on an FBA disk it's block 1 + * - on an CMS formatted FBA disk it is sector 1, even if the block size + * is larger than 512 bytes (possible if the DIAG discipline is used) + * If we have a valid info structure, then we know exactly which case we + * have, otherwise we just search through all possebilities. + */ + if (info) { + if ((info->cu_type == 0x6310 && info->dev_type == 0x9336) || + (info->cu_type == 0x3880 && info->dev_type == 0x3370)) + testsect[0] = info->label_block; + else + testsect[0] = info->label_block * (blocksize >> 9); + testcount = 1; + } else { + testsect[0] = 1; + testsect[1] = (blocksize >> 9); + testsect[2] = 2 * (blocksize >> 9); + testcount = 3; + } + for (i = 0; i < testcount; ++i) { + data = read_part_sector(state, testsect[i], §); + if (data == NULL) + continue; + memcpy(label, data, sizeof(*label)); + memcpy(temp, data, 4); + temp[4] = 0; + EBCASC(temp, 4); + put_dev_sector(sect); + if (!strcmp(temp, "VOL1") || + !strcmp(temp, "LNX1") || + !strcmp(temp, "CMS1")) { + if (!strcmp(temp, "VOL1")) { + strncpy(type, label->vol.vollbl, 4); + strncpy(name, label->vol.volid, 6); + } else { + strncpy(type, label->lnx.vollbl, 4); + strncpy(name, label->lnx.volid, 6); + } + EBCASC(type, 4); + EBCASC(name, 6); + *labelsect = testsect[i]; + found = 1; + break; + } + } + if (!found) + memset(label, 0, sizeof(*label)); + + return found; +} + +static int find_vol1_partitions(struct parsed_partitions *state, + struct hd_geometry *geo, + int blocksize, + char name[], + union label_t *label) +{ + sector_t blk; + int counter; + char tmp[64]; + Sector sect; + unsigned char *data; + loff_t offset, size; + struct vtoc_format1_label f1; + int secperblk; + + snprintf(tmp, sizeof(tmp), "VOL1/%8s:", name); + strlcat(state->pp_buf, tmp, PAGE_SIZE); + /* + * get start of VTOC from the disk label and then search for format1 + * and format8 labels + */ + secperblk = blocksize >> 9; + blk = cchhb2blk(&label->vol.vtoc, geo) + 1; + counter = 0; + data = read_part_sector(state, blk * secperblk, §); + while (data != NULL) { + memcpy(&f1, data, sizeof(struct vtoc_format1_label)); + put_dev_sector(sect); + /* skip FMT4 / FMT5 / FMT7 labels */ + if (f1.DS1FMTID == _ascebc['4'] + || f1.DS1FMTID == _ascebc['5'] + || f1.DS1FMTID == _ascebc['7'] + || f1.DS1FMTID == _ascebc['9']) { + blk++; + data = read_part_sector(state, blk * secperblk, §); + continue; + } + /* only FMT1 and 8 labels valid at this point */ + if (f1.DS1FMTID != _ascebc['1'] && + f1.DS1FMTID != _ascebc['8']) + break; + /* OK, we got valid partition data */ + offset = cchh2blk(&f1.DS1EXT1.llimit, geo); + size = cchh2blk(&f1.DS1EXT1.ulimit, geo) - + offset + geo->sectors; + offset *= secperblk; + size *= secperblk; + if (counter >= state->limit) + break; + put_partition(state, counter + 1, offset, size); + counter++; + blk++; + data = read_part_sector(state, blk * secperblk, §); + } + strlcat(state->pp_buf, "\n", PAGE_SIZE); + + if (!data) + return -1; + + return 1; +} + +static int find_lnx1_partitions(struct parsed_partitions *state, + struct hd_geometry *geo, + int blocksize, + char name[], + union label_t *label, + sector_t labelsect, + loff_t i_size, + dasd_information2_t *info) +{ + loff_t offset, geo_size, size; + char tmp[64]; + int secperblk; + + snprintf(tmp, sizeof(tmp), "LNX1/%8s:", name); + strlcat(state->pp_buf, tmp, PAGE_SIZE); + secperblk = blocksize >> 9; + if (label->lnx.ldl_version == 0xf2) { + size = label->lnx.formatted_blocks * secperblk; + } else { + /* + * Formated w/o large volume support. If the sanity check + * 'size based on geo == size based on i_size' is true, then + * we can safely assume that we know the formatted size of + * the disk, otherwise we need additional information + * that we can only get from a real DASD device. + */ + geo_size = geo->cylinders * geo->heads + * geo->sectors * secperblk; + size = i_size >> 9; + if (size != geo_size) { + if (!info) { + strlcat(state->pp_buf, "\n", PAGE_SIZE); + return 1; + } + if (!strcmp(info->type, "ECKD")) + if (geo_size < size) + size = geo_size; + /* else keep size based on i_size */ + } + } + /* first and only partition starts in the first block after the label */ + offset = labelsect + secperblk; + put_partition(state, 1, offset, size - offset); + strlcat(state->pp_buf, "\n", PAGE_SIZE); + return 1; +} + +static int find_cms1_partitions(struct parsed_partitions *state, + struct hd_geometry *geo, + int blocksize, + char name[], + union label_t *label, + sector_t labelsect) +{ + loff_t offset, size; + char tmp[64]; + int secperblk; + + /* + * VM style CMS1 labeled disk + */ + blocksize = label->cms.block_size; + secperblk = blocksize >> 9; + if (label->cms.disk_offset != 0) { + snprintf(tmp, sizeof(tmp), "CMS1/%8s(MDSK):", name); + strlcat(state->pp_buf, tmp, PAGE_SIZE); + /* disk is reserved minidisk */ + offset = label->cms.disk_offset * secperblk; + size = (label->cms.block_count - 1) * secperblk; + } else { + snprintf(tmp, sizeof(tmp), "CMS1/%8s:", name); + strlcat(state->pp_buf, tmp, PAGE_SIZE); + /* + * Special case for FBA devices: + * If an FBA device is CMS formatted with blocksize > 512 byte + * and the DIAG discipline is used, then the CMS label is found + * in sector 1 instead of block 1. However, the partition is + * still supposed to start in block 2. + */ + if (labelsect == 1) + offset = 2 * secperblk; + else + offset = labelsect + secperblk; + size = label->cms.block_count * secperblk; + } + + put_partition(state, 1, offset, size-offset); + strlcat(state->pp_buf, "\n", PAGE_SIZE); + return 1; +} + + +/* + * This is the main function, called by check.c + */ +int ibm_partition(struct parsed_partitions *state) +{ + struct block_device *bdev = state->bdev; + int blocksize, res; + loff_t i_size, offset, size; + dasd_information2_t *info; + struct hd_geometry *geo; + char type[5] = {0,}; + char name[7] = {0,}; + sector_t labelsect; + union label_t *label; + + res = 0; + blocksize = bdev_logical_block_size(bdev); + if (blocksize <= 0) + goto out_exit; + i_size = i_size_read(bdev->bd_inode); + if (i_size == 0) + goto out_exit; + info = kmalloc(sizeof(dasd_information2_t), GFP_KERNEL); + if (info == NULL) + goto out_exit; + geo = kmalloc(sizeof(struct hd_geometry), GFP_KERNEL); + if (geo == NULL) + goto out_nogeo; + label = kmalloc(sizeof(union label_t), GFP_KERNEL); + if (label == NULL) + goto out_nolab; + if (ioctl_by_bdev(bdev, HDIO_GETGEO, (unsigned long)geo) != 0) + goto out_freeall; + if (ioctl_by_bdev(bdev, BIODASDINFO2, (unsigned long)info) != 0) { + kfree(info); + info = NULL; + } + + if (find_label(state, info, geo, blocksize, &labelsect, name, type, + label)) { + if (!strncmp(type, "VOL1", 4)) { + res = find_vol1_partitions(state, geo, blocksize, name, + label); + } else if (!strncmp(type, "LNX1", 4)) { + res = find_lnx1_partitions(state, geo, blocksize, name, + label, labelsect, i_size, + info); + } else if (!strncmp(type, "CMS1", 4)) { + res = find_cms1_partitions(state, geo, blocksize, name, + label, labelsect); + } + } else if (info) { + /* + * ugly but needed for backward compatibility: + * If the block device is a DASD (i.e. BIODASDINFO2 works), + * then we claim it in any case, even though it has no valid + * label. If it has the LDL format, then we simply define a + * partition as if it had an LNX1 label. + */ + res = 1; + if (info->format == DASD_FORMAT_LDL) { + strlcat(state->pp_buf, "(nonl)", PAGE_SIZE); + size = i_size >> 9; + offset = (info->label_block + 1) * (blocksize >> 9); + put_partition(state, 1, offset, size-offset); + strlcat(state->pp_buf, "\n", PAGE_SIZE); + } + } else + res = 0; + +out_freeall: + kfree(label); +out_nolab: + kfree(geo); +out_nogeo: + kfree(info); +out_exit: + return res; +} diff --git a/block/partitions/ibm.h b/block/partitions/ibm.h new file mode 100644 index 000000000..08fb0804a --- /dev/null +++ b/block/partitions/ibm.h @@ -0,0 +1 @@ +int ibm_partition(struct parsed_partitions *); diff --git a/block/partitions/karma.c b/block/partitions/karma.c new file mode 100644 index 000000000..9721fa589 --- /dev/null +++ b/block/partitions/karma.c @@ -0,0 +1,58 @@ +/* + * fs/partitions/karma.c + * Rio Karma partition info. + * + * Copyright (C) 2006 Bob Copeland (me@bobcopeland.com) + * based on osf.c + */ + +#include "check.h" +#include "karma.h" +#include + +int karma_partition(struct parsed_partitions *state) +{ + int i; + int slot = 1; + Sector sect; + unsigned char *data; + struct disklabel { + u8 d_reserved[270]; + struct d_partition { + __le32 p_res; + u8 p_fstype; + u8 p_res2[3]; + __le32 p_offset; + __le32 p_size; + } d_partitions[2]; + u8 d_blank[208]; + __le16 d_magic; + } __packed *label; + struct d_partition *p; + + data = read_part_sector(state, 0, §); + if (!data) + return -1; + + label = (struct disklabel *)data; + if (le16_to_cpu(label->d_magic) != KARMA_LABEL_MAGIC) { + put_dev_sector(sect); + return 0; + } + + p = label->d_partitions; + for (i = 0 ; i < 2; i++, p++) { + if (slot == state->limit) + break; + + if (p->p_fstype == 0x4d && le32_to_cpu(p->p_size)) { + put_partition(state, slot, le32_to_cpu(p->p_offset), + le32_to_cpu(p->p_size)); + } + slot++; + } + strlcat(state->pp_buf, "\n", PAGE_SIZE); + put_dev_sector(sect); + return 1; +} + diff --git a/block/partitions/karma.h b/block/partitions/karma.h new file mode 100644 index 000000000..c764b2e9d --- /dev/null +++ b/block/partitions/karma.h @@ -0,0 +1,8 @@ +/* + * fs/partitions/karma.h + */ + +#define KARMA_LABEL_MAGIC 0xAB56 + +int karma_partition(struct parsed_partitions *state); + diff --git a/block/partitions/ldm.c b/block/partitions/ldm.c new file mode 100644 index 000000000..e507cfbd0 --- /dev/null +++ b/block/partitions/ldm.c @@ -0,0 +1,1567 @@ +/** + * ldm - Support for Windows Logical Disk Manager (Dynamic Disks) + * + * Copyright (C) 2001,2002 Richard Russon + * Copyright (c) 2001-2012 Anton Altaparmakov + * Copyright (C) 2001,2002 Jakob Kemi + * + * Documentation is available at http://www.linux-ntfs.org/doku.php?id=downloads + * + * This program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free Software + * Foundation; either version 2 of the License, or (at your option) any later + * version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along with + * this program (in the main directory of the source in the file COPYING); if + * not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, + * Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include +#include "ldm.h" +#include "check.h" +#include "msdos.h" + +/** + * ldm_debug/info/error/crit - Output an error message + * @f: A printf format string containing the message + * @...: Variables to substitute into @f + * + * ldm_debug() writes a DEBUG level message to the syslog but only if the + * driver was compiled with debug enabled. Otherwise, the call turns into a NOP. + */ +#ifndef CONFIG_LDM_DEBUG +#define ldm_debug(...) do {} while (0) +#else +#define ldm_debug(f, a...) _ldm_printk (KERN_DEBUG, __func__, f, ##a) +#endif + +#define ldm_crit(f, a...) _ldm_printk (KERN_CRIT, __func__, f, ##a) +#define ldm_error(f, a...) _ldm_printk (KERN_ERR, __func__, f, ##a) +#define ldm_info(f, a...) _ldm_printk (KERN_INFO, __func__, f, ##a) + +static __printf(3, 4) +void _ldm_printk(const char *level, const char *function, const char *fmt, ...) +{ + struct va_format vaf; + va_list args; + + va_start (args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; + + printk("%s%s(): %pV\n", level, function, &vaf); + + va_end(args); +} + +/** + * ldm_parse_hexbyte - Convert a ASCII hex number to a byte + * @src: Pointer to at least 2 characters to convert. + * + * Convert a two character ASCII hex string to a number. + * + * Return: 0-255 Success, the byte was parsed correctly + * -1 Error, an invalid character was supplied + */ +static int ldm_parse_hexbyte (const u8 *src) +{ + unsigned int x; /* For correct wrapping */ + int h; + + /* high part */ + x = h = hex_to_bin(src[0]); + if (h < 0) + return -1; + + /* low part */ + h = hex_to_bin(src[1]); + if (h < 0) + return -1; + + return (x << 4) + h; +} + +/** + * ldm_parse_guid - Convert GUID from ASCII to binary + * @src: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba + * @dest: Memory block to hold binary GUID (16 bytes) + * + * N.B. The GUID need not be NULL terminated. + * + * Return: 'true' @dest contains binary GUID + * 'false' @dest contents are undefined + */ +static bool ldm_parse_guid (const u8 *src, u8 *dest) +{ + static const int size[] = { 4, 2, 2, 2, 6 }; + int i, j, v; + + if (src[8] != '-' || src[13] != '-' || + src[18] != '-' || src[23] != '-') + return false; + + for (j = 0; j < 5; j++, src++) + for (i = 0; i < size[j]; i++, src+=2, *dest++ = v) + if ((v = ldm_parse_hexbyte (src)) < 0) + return false; + + return true; +} + +/** + * ldm_parse_privhead - Read the LDM Database PRIVHEAD structure + * @data: Raw database PRIVHEAD structure loaded from the device + * @ph: In-memory privhead structure in which to return parsed information + * + * This parses the LDM database PRIVHEAD structure supplied in @data and + * sets up the in-memory privhead structure @ph with the obtained information. + * + * Return: 'true' @ph contains the PRIVHEAD data + * 'false' @ph contents are undefined + */ +static bool ldm_parse_privhead(const u8 *data, struct privhead *ph) +{ + bool is_vista = false; + + BUG_ON(!data || !ph); + if (MAGIC_PRIVHEAD != get_unaligned_be64(data)) { + ldm_error("Cannot find PRIVHEAD structure. LDM database is" + " corrupt. Aborting."); + return false; + } + ph->ver_major = get_unaligned_be16(data + 0x000C); + ph->ver_minor = get_unaligned_be16(data + 0x000E); + ph->logical_disk_start = get_unaligned_be64(data + 0x011B); + ph->logical_disk_size = get_unaligned_be64(data + 0x0123); + ph->config_start = get_unaligned_be64(data + 0x012B); + ph->config_size = get_unaligned_be64(data + 0x0133); + /* Version 2.11 is Win2k/XP and version 2.12 is Vista. */ + if (ph->ver_major == 2 && ph->ver_minor == 12) + is_vista = true; + if (!is_vista && (ph->ver_major != 2 || ph->ver_minor != 11)) { + ldm_error("Expected PRIVHEAD version 2.11 or 2.12, got %d.%d." + " Aborting.", ph->ver_major, ph->ver_minor); + return false; + } + ldm_debug("PRIVHEAD version %d.%d (Windows %s).", ph->ver_major, + ph->ver_minor, is_vista ? "Vista" : "2000/XP"); + if (ph->config_size != LDM_DB_SIZE) { /* 1 MiB in sectors. */ + /* Warn the user and continue, carefully. */ + ldm_info("Database is normally %u bytes, it claims to " + "be %llu bytes.", LDM_DB_SIZE, + (unsigned long long)ph->config_size); + } + if ((ph->logical_disk_size == 0) || (ph->logical_disk_start + + ph->logical_disk_size > ph->config_start)) { + ldm_error("PRIVHEAD disk size doesn't match real disk size"); + return false; + } + if (!ldm_parse_guid(data + 0x0030, ph->disk_id)) { + ldm_error("PRIVHEAD contains an invalid GUID."); + return false; + } + ldm_debug("Parsed PRIVHEAD successfully."); + return true; +} + +/** + * ldm_parse_tocblock - Read the LDM Database TOCBLOCK structure + * @data: Raw database TOCBLOCK structure loaded from the device + * @toc: In-memory toc structure in which to return parsed information + * + * This parses the LDM Database TOCBLOCK (table of contents) structure supplied + * in @data and sets up the in-memory tocblock structure @toc with the obtained + * information. + * + * N.B. The *_start and *_size values returned in @toc are not range-checked. + * + * Return: 'true' @toc contains the TOCBLOCK data + * 'false' @toc contents are undefined + */ +static bool ldm_parse_tocblock (const u8 *data, struct tocblock *toc) +{ + BUG_ON (!data || !toc); + + if (MAGIC_TOCBLOCK != get_unaligned_be64(data)) { + ldm_crit ("Cannot find TOCBLOCK, database may be corrupt."); + return false; + } + strncpy (toc->bitmap1_name, data + 0x24, sizeof (toc->bitmap1_name)); + toc->bitmap1_name[sizeof (toc->bitmap1_name) - 1] = 0; + toc->bitmap1_start = get_unaligned_be64(data + 0x2E); + toc->bitmap1_size = get_unaligned_be64(data + 0x36); + + if (strncmp (toc->bitmap1_name, TOC_BITMAP1, + sizeof (toc->bitmap1_name)) != 0) { + ldm_crit ("TOCBLOCK's first bitmap is '%s', should be '%s'.", + TOC_BITMAP1, toc->bitmap1_name); + return false; + } + strncpy (toc->bitmap2_name, data + 0x46, sizeof (toc->bitmap2_name)); + toc->bitmap2_name[sizeof (toc->bitmap2_name) - 1] = 0; + toc->bitmap2_start = get_unaligned_be64(data + 0x50); + toc->bitmap2_size = get_unaligned_be64(data + 0x58); + if (strncmp (toc->bitmap2_name, TOC_BITMAP2, + sizeof (toc->bitmap2_name)) != 0) { + ldm_crit ("TOCBLOCK's second bitmap is '%s', should be '%s'.", + TOC_BITMAP2, toc->bitmap2_name); + return false; + } + ldm_debug ("Parsed TOCBLOCK successfully."); + return true; +} + +/** + * ldm_parse_vmdb - Read the LDM Database VMDB structure + * @data: Raw database VMDB structure loaded from the device + * @vm: In-memory vmdb structure in which to return parsed information + * + * This parses the LDM Database VMDB structure supplied in @data and sets up + * the in-memory vmdb structure @vm with the obtained information. + * + * N.B. The *_start, *_size and *_seq values will be range-checked later. + * + * Return: 'true' @vm contains VMDB info + * 'false' @vm contents are undefined + */ +static bool ldm_parse_vmdb (const u8 *data, struct vmdb *vm) +{ + BUG_ON (!data || !vm); + + if (MAGIC_VMDB != get_unaligned_be32(data)) { + ldm_crit ("Cannot find the VMDB, database may be corrupt."); + return false; + } + + vm->ver_major = get_unaligned_be16(data + 0x12); + vm->ver_minor = get_unaligned_be16(data + 0x14); + if ((vm->ver_major != 4) || (vm->ver_minor != 10)) { + ldm_error ("Expected VMDB version %d.%d, got %d.%d. " + "Aborting.", 4, 10, vm->ver_major, vm->ver_minor); + return false; + } + + vm->vblk_size = get_unaligned_be32(data + 0x08); + if (vm->vblk_size == 0) { + ldm_error ("Illegal VBLK size"); + return false; + } + + vm->vblk_offset = get_unaligned_be32(data + 0x0C); + vm->last_vblk_seq = get_unaligned_be32(data + 0x04); + + ldm_debug ("Parsed VMDB successfully."); + return true; +} + +/** + * ldm_compare_privheads - Compare two privhead objects + * @ph1: First privhead + * @ph2: Second privhead + * + * This compares the two privhead structures @ph1 and @ph2. + * + * Return: 'true' Identical + * 'false' Different + */ +static bool ldm_compare_privheads (const struct privhead *ph1, + const struct privhead *ph2) +{ + BUG_ON (!ph1 || !ph2); + + return ((ph1->ver_major == ph2->ver_major) && + (ph1->ver_minor == ph2->ver_minor) && + (ph1->logical_disk_start == ph2->logical_disk_start) && + (ph1->logical_disk_size == ph2->logical_disk_size) && + (ph1->config_start == ph2->config_start) && + (ph1->config_size == ph2->config_size) && + !memcmp (ph1->disk_id, ph2->disk_id, GUID_SIZE)); +} + +/** + * ldm_compare_tocblocks - Compare two tocblock objects + * @toc1: First toc + * @toc2: Second toc + * + * This compares the two tocblock structures @toc1 and @toc2. + * + * Return: 'true' Identical + * 'false' Different + */ +static bool ldm_compare_tocblocks (const struct tocblock *toc1, + const struct tocblock *toc2) +{ + BUG_ON (!toc1 || !toc2); + + return ((toc1->bitmap1_start == toc2->bitmap1_start) && + (toc1->bitmap1_size == toc2->bitmap1_size) && + (toc1->bitmap2_start == toc2->bitmap2_start) && + (toc1->bitmap2_size == toc2->bitmap2_size) && + !strncmp (toc1->bitmap1_name, toc2->bitmap1_name, + sizeof (toc1->bitmap1_name)) && + !strncmp (toc1->bitmap2_name, toc2->bitmap2_name, + sizeof (toc1->bitmap2_name))); +} + +/** + * ldm_validate_privheads - Compare the primary privhead with its backups + * @state: Partition check state including device holding the LDM Database + * @ph1: Memory struct to fill with ph contents + * + * Read and compare all three privheads from disk. + * + * The privheads on disk show the size and location of the main disk area and + * the configuration area (the database). The values are range-checked against + * @hd, which contains the real size of the disk. + * + * Return: 'true' Success + * 'false' Error + */ +static bool ldm_validate_privheads(struct parsed_partitions *state, + struct privhead *ph1) +{ + static const int off[3] = { OFF_PRIV1, OFF_PRIV2, OFF_PRIV3 }; + struct privhead *ph[3] = { ph1 }; + Sector sect; + u8 *data; + bool result = false; + long num_sects; + int i; + + BUG_ON (!state || !ph1); + + ph[1] = kmalloc (sizeof (*ph[1]), GFP_KERNEL); + ph[2] = kmalloc (sizeof (*ph[2]), GFP_KERNEL); + if (!ph[1] || !ph[2]) { + ldm_crit ("Out of memory."); + goto out; + } + + /* off[1 & 2] are relative to ph[0]->config_start */ + ph[0]->config_start = 0; + + /* Read and parse privheads */ + for (i = 0; i < 3; i++) { + data = read_part_sector(state, ph[0]->config_start + off[i], + §); + if (!data) { + ldm_crit ("Disk read failed."); + goto out; + } + result = ldm_parse_privhead (data, ph[i]); + put_dev_sector (sect); + if (!result) { + ldm_error ("Cannot find PRIVHEAD %d.", i+1); /* Log again */ + if (i < 2) + goto out; /* Already logged */ + else + break; /* FIXME ignore for now, 3rd PH can fail on odd-sized disks */ + } + } + + num_sects = state->bdev->bd_inode->i_size >> 9; + + if ((ph[0]->config_start > num_sects) || + ((ph[0]->config_start + ph[0]->config_size) > num_sects)) { + ldm_crit ("Database extends beyond the end of the disk."); + goto out; + } + + if ((ph[0]->logical_disk_start > ph[0]->config_start) || + ((ph[0]->logical_disk_start + ph[0]->logical_disk_size) + > ph[0]->config_start)) { + ldm_crit ("Disk and database overlap."); + goto out; + } + + if (!ldm_compare_privheads (ph[0], ph[1])) { + ldm_crit ("Primary and backup PRIVHEADs don't match."); + goto out; + } + /* FIXME ignore this for now + if (!ldm_compare_privheads (ph[0], ph[2])) { + ldm_crit ("Primary and backup PRIVHEADs don't match."); + goto out; + }*/ + ldm_debug ("Validated PRIVHEADs successfully."); + result = true; +out: + kfree (ph[1]); + kfree (ph[2]); + return result; +} + +/** + * ldm_validate_tocblocks - Validate the table of contents and its backups + * @state: Partition check state including device holding the LDM Database + * @base: Offset, into @state->bdev, of the database + * @ldb: Cache of the database structures + * + * Find and compare the four tables of contents of the LDM Database stored on + * @state->bdev and return the parsed information into @toc1. + * + * The offsets and sizes of the configs are range-checked against a privhead. + * + * Return: 'true' @toc1 contains validated TOCBLOCK info + * 'false' @toc1 contents are undefined + */ +static bool ldm_validate_tocblocks(struct parsed_partitions *state, + unsigned long base, struct ldmdb *ldb) +{ + static const int off[4] = { OFF_TOCB1, OFF_TOCB2, OFF_TOCB3, OFF_TOCB4}; + struct tocblock *tb[4]; + struct privhead *ph; + Sector sect; + u8 *data; + int i, nr_tbs; + bool result = false; + + BUG_ON(!state || !ldb); + ph = &ldb->ph; + tb[0] = &ldb->toc; + tb[1] = kmalloc(sizeof(*tb[1]) * 3, GFP_KERNEL); + if (!tb[1]) { + ldm_crit("Out of memory."); + goto err; + } + tb[2] = (struct tocblock*)((u8*)tb[1] + sizeof(*tb[1])); + tb[3] = (struct tocblock*)((u8*)tb[2] + sizeof(*tb[2])); + /* + * Try to read and parse all four TOCBLOCKs. + * + * Windows Vista LDM v2.12 does not always have all four TOCBLOCKs so + * skip any that fail as long as we get at least one valid TOCBLOCK. + */ + for (nr_tbs = i = 0; i < 4; i++) { + data = read_part_sector(state, base + off[i], §); + if (!data) { + ldm_error("Disk read failed for TOCBLOCK %d.", i); + continue; + } + if (ldm_parse_tocblock(data, tb[nr_tbs])) + nr_tbs++; + put_dev_sector(sect); + } + if (!nr_tbs) { + ldm_crit("Failed to find a valid TOCBLOCK."); + goto err; + } + /* Range check the TOCBLOCK against a privhead. */ + if (((tb[0]->bitmap1_start + tb[0]->bitmap1_size) > ph->config_size) || + ((tb[0]->bitmap2_start + tb[0]->bitmap2_size) > + ph->config_size)) { + ldm_crit("The bitmaps are out of range. Giving up."); + goto err; + } + /* Compare all loaded TOCBLOCKs. */ + for (i = 1; i < nr_tbs; i++) { + if (!ldm_compare_tocblocks(tb[0], tb[i])) { + ldm_crit("TOCBLOCKs 0 and %d do not match.", i); + goto err; + } + } + ldm_debug("Validated %d TOCBLOCKs successfully.", nr_tbs); + result = true; +err: + kfree(tb[1]); + return result; +} + +/** + * ldm_validate_vmdb - Read the VMDB and validate it + * @state: Partition check state including device holding the LDM Database + * @base: Offset, into @bdev, of the database + * @ldb: Cache of the database structures + * + * Find the vmdb of the LDM Database stored on @bdev and return the parsed + * information in @ldb. + * + * Return: 'true' @ldb contains validated VBDB info + * 'false' @ldb contents are undefined + */ +static bool ldm_validate_vmdb(struct parsed_partitions *state, + unsigned long base, struct ldmdb *ldb) +{ + Sector sect; + u8 *data; + bool result = false; + struct vmdb *vm; + struct tocblock *toc; + + BUG_ON (!state || !ldb); + + vm = &ldb->vm; + toc = &ldb->toc; + + data = read_part_sector(state, base + OFF_VMDB, §); + if (!data) { + ldm_crit ("Disk read failed."); + return false; + } + + if (!ldm_parse_vmdb (data, vm)) + goto out; /* Already logged */ + + /* Are there uncommitted transactions? */ + if (get_unaligned_be16(data + 0x10) != 0x01) { + ldm_crit ("Database is not in a consistent state. Aborting."); + goto out; + } + + if (vm->vblk_offset != 512) + ldm_info ("VBLKs start at offset 0x%04x.", vm->vblk_offset); + + /* + * The last_vblkd_seq can be before the end of the vmdb, just make sure + * it is not out of bounds. + */ + if ((vm->vblk_size * vm->last_vblk_seq) > (toc->bitmap1_size << 9)) { + ldm_crit ("VMDB exceeds allowed size specified by TOCBLOCK. " + "Database is corrupt. Aborting."); + goto out; + } + + result = true; +out: + put_dev_sector (sect); + return result; +} + + +/** + * ldm_validate_partition_table - Determine whether bdev might be a dynamic disk + * @state: Partition check state including device holding the LDM Database + * + * This function provides a weak test to decide whether the device is a dynamic + * disk or not. It looks for an MS-DOS-style partition table containing at + * least one partition of type 0x42 (formerly SFS, now used by Windows for + * dynamic disks). + * + * N.B. The only possible error can come from the read_part_sector and that is + * only likely to happen if the underlying device is strange. If that IS + * the case we should return zero to let someone else try. + * + * Return: 'true' @state->bdev is a dynamic disk + * 'false' @state->bdev is not a dynamic disk, or an error occurred + */ +static bool ldm_validate_partition_table(struct parsed_partitions *state) +{ + Sector sect; + u8 *data; + struct partition *p; + int i; + bool result = false; + + BUG_ON(!state); + + data = read_part_sector(state, 0, §); + if (!data) { + ldm_info ("Disk read failed."); + return false; + } + + if (*(__le16*) (data + 0x01FE) != cpu_to_le16 (MSDOS_LABEL_MAGIC)) + goto out; + + p = (struct partition*)(data + 0x01BE); + for (i = 0; i < 4; i++, p++) + if (SYS_IND (p) == LDM_PARTITION) { + result = true; + break; + } + + if (result) + ldm_debug ("Found W2K dynamic disk partition type."); + +out: + put_dev_sector (sect); + return result; +} + +/** + * ldm_get_disk_objid - Search a linked list of vblk's for a given Disk Id + * @ldb: Cache of the database structures + * + * The LDM Database contains a list of all partitions on all dynamic disks. + * The primary PRIVHEAD, at the beginning of the physical disk, tells us + * the GUID of this disk. This function searches for the GUID in a linked + * list of vblk's. + * + * Return: Pointer, A matching vblk was found + * NULL, No match, or an error + */ +static struct vblk * ldm_get_disk_objid (const struct ldmdb *ldb) +{ + struct list_head *item; + + BUG_ON (!ldb); + + list_for_each (item, &ldb->v_disk) { + struct vblk *v = list_entry (item, struct vblk, list); + if (!memcmp (v->vblk.disk.disk_id, ldb->ph.disk_id, GUID_SIZE)) + return v; + } + + return NULL; +} + +/** + * ldm_create_data_partitions - Create data partitions for this device + * @pp: List of the partitions parsed so far + * @ldb: Cache of the database structures + * + * The database contains ALL the partitions for ALL disk groups, so we need to + * filter out this specific disk. Using the disk's object id, we can find all + * the partitions in the database that belong to this disk. + * + * Add each partition in our database, to the parsed_partitions structure. + * + * N.B. This function creates the partitions in the order it finds partition + * objects in the linked list. + * + * Return: 'true' Partition created + * 'false' Error, probably a range checking problem + */ +static bool ldm_create_data_partitions (struct parsed_partitions *pp, + const struct ldmdb *ldb) +{ + struct list_head *item; + struct vblk *vb; + struct vblk *disk; + struct vblk_part *part; + int part_num = 1; + + BUG_ON (!pp || !ldb); + + disk = ldm_get_disk_objid (ldb); + if (!disk) { + ldm_crit ("Can't find the ID of this disk in the database."); + return false; + } + + strlcat(pp->pp_buf, " [LDM]", PAGE_SIZE); + + /* Create the data partitions */ + list_for_each (item, &ldb->v_part) { + vb = list_entry (item, struct vblk, list); + part = &vb->vblk.part; + + if (part->disk_id != disk->obj_id) + continue; + + put_partition (pp, part_num, ldb->ph.logical_disk_start + + part->start, part->size); + part_num++; + } + + strlcat(pp->pp_buf, "\n", PAGE_SIZE); + return true; +} + + +/** + * ldm_relative - Calculate the next relative offset + * @buffer: Block of data being worked on + * @buflen: Size of the block of data + * @base: Size of the previous fixed width fields + * @offset: Cumulative size of the previous variable-width fields + * + * Because many of the VBLK fields are variable-width, it's necessary + * to calculate each offset based on the previous one and the length + * of the field it pointed to. + * + * Return: -1 Error, the calculated offset exceeded the size of the buffer + * n OK, a range-checked offset into buffer + */ +static int ldm_relative(const u8 *buffer, int buflen, int base, int offset) +{ + + base += offset; + if (!buffer || offset < 0 || base > buflen) { + if (!buffer) + ldm_error("!buffer"); + if (offset < 0) + ldm_error("offset (%d) < 0", offset); + if (base > buflen) + ldm_error("base (%d) > buflen (%d)", base, buflen); + return -1; + } + if (base + buffer[base] >= buflen) { + ldm_error("base (%d) + buffer[base] (%d) >= buflen (%d)", base, + buffer[base], buflen); + return -1; + } + return buffer[base] + offset + 1; +} + +/** + * ldm_get_vnum - Convert a variable-width, big endian number, into cpu order + * @block: Pointer to the variable-width number to convert + * + * Large numbers in the LDM Database are often stored in a packed format. Each + * number is prefixed by a one byte width marker. All numbers in the database + * are stored in big-endian byte order. This function reads one of these + * numbers and returns the result + * + * N.B. This function DOES NOT perform any range checking, though the most + * it will read is eight bytes. + * + * Return: n A number + * 0 Zero, or an error occurred + */ +static u64 ldm_get_vnum (const u8 *block) +{ + u64 tmp = 0; + u8 length; + + BUG_ON (!block); + + length = *block++; + + if (length && length <= 8) + while (length--) + tmp = (tmp << 8) | *block++; + else + ldm_error ("Illegal length %d.", length); + + return tmp; +} + +/** + * ldm_get_vstr - Read a length-prefixed string into a buffer + * @block: Pointer to the length marker + * @buffer: Location to copy string to + * @buflen: Size of the output buffer + * + * Many of the strings in the LDM Database are not NULL terminated. Instead + * they are prefixed by a one byte length marker. This function copies one of + * these strings into a buffer. + * + * N.B. This function DOES NOT perform any range checking on the input. + * If the buffer is too small, the output will be truncated. + * + * Return: 0, Error and @buffer contents are undefined + * n, String length in characters (excluding NULL) + * buflen-1, String was truncated. + */ +static int ldm_get_vstr (const u8 *block, u8 *buffer, int buflen) +{ + int length; + + BUG_ON (!block || !buffer); + + length = block[0]; + if (length >= buflen) { + ldm_error ("Truncating string %d -> %d.", length, buflen); + length = buflen - 1; + } + memcpy (buffer, block + 1, length); + buffer[length] = 0; + return length; +} + + +/** + * ldm_parse_cmp3 - Read a raw VBLK Component object into a vblk structure + * @buffer: Block of data being worked on + * @buflen: Size of the block of data + * @vb: In-memory vblk in which to return information + * + * Read a raw VBLK Component object (version 3) into a vblk structure. + * + * Return: 'true' @vb contains a Component VBLK + * 'false' @vb contents are not defined + */ +static bool ldm_parse_cmp3 (const u8 *buffer, int buflen, struct vblk *vb) +{ + int r_objid, r_name, r_vstate, r_child, r_parent, r_stripe, r_cols, len; + struct vblk_comp *comp; + + BUG_ON (!buffer || !vb); + + r_objid = ldm_relative (buffer, buflen, 0x18, 0); + r_name = ldm_relative (buffer, buflen, 0x18, r_objid); + r_vstate = ldm_relative (buffer, buflen, 0x18, r_name); + r_child = ldm_relative (buffer, buflen, 0x1D, r_vstate); + r_parent = ldm_relative (buffer, buflen, 0x2D, r_child); + + if (buffer[0x12] & VBLK_FLAG_COMP_STRIPE) { + r_stripe = ldm_relative (buffer, buflen, 0x2E, r_parent); + r_cols = ldm_relative (buffer, buflen, 0x2E, r_stripe); + len = r_cols; + } else { + r_stripe = 0; + r_cols = 0; + len = r_parent; + } + if (len < 0) + return false; + + len += VBLK_SIZE_CMP3; + if (len != get_unaligned_be32(buffer + 0x14)) + return false; + + comp = &vb->vblk.comp; + ldm_get_vstr (buffer + 0x18 + r_name, comp->state, + sizeof (comp->state)); + comp->type = buffer[0x18 + r_vstate]; + comp->children = ldm_get_vnum (buffer + 0x1D + r_vstate); + comp->parent_id = ldm_get_vnum (buffer + 0x2D + r_child); + comp->chunksize = r_stripe ? ldm_get_vnum (buffer+r_parent+0x2E) : 0; + + return true; +} + +/** + * ldm_parse_dgr3 - Read a raw VBLK Disk Group object into a vblk structure + * @buffer: Block of data being worked on + * @buflen: Size of the block of data + * @vb: In-memory vblk in which to return information + * + * Read a raw VBLK Disk Group object (version 3) into a vblk structure. + * + * Return: 'true' @vb contains a Disk Group VBLK + * 'false' @vb contents are not defined + */ +static int ldm_parse_dgr3 (const u8 *buffer, int buflen, struct vblk *vb) +{ + int r_objid, r_name, r_diskid, r_id1, r_id2, len; + struct vblk_dgrp *dgrp; + + BUG_ON (!buffer || !vb); + + r_objid = ldm_relative (buffer, buflen, 0x18, 0); + r_name = ldm_relative (buffer, buflen, 0x18, r_objid); + r_diskid = ldm_relative (buffer, buflen, 0x18, r_name); + + if (buffer[0x12] & VBLK_FLAG_DGR3_IDS) { + r_id1 = ldm_relative (buffer, buflen, 0x24, r_diskid); + r_id2 = ldm_relative (buffer, buflen, 0x24, r_id1); + len = r_id2; + } else { + r_id1 = 0; + r_id2 = 0; + len = r_diskid; + } + if (len < 0) + return false; + + len += VBLK_SIZE_DGR3; + if (len != get_unaligned_be32(buffer + 0x14)) + return false; + + dgrp = &vb->vblk.dgrp; + ldm_get_vstr (buffer + 0x18 + r_name, dgrp->disk_id, + sizeof (dgrp->disk_id)); + return true; +} + +/** + * ldm_parse_dgr4 - Read a raw VBLK Disk Group object into a vblk structure + * @buffer: Block of data being worked on + * @buflen: Size of the block of data + * @vb: In-memory vblk in which to return information + * + * Read a raw VBLK Disk Group object (version 4) into a vblk structure. + * + * Return: 'true' @vb contains a Disk Group VBLK + * 'false' @vb contents are not defined + */ +static bool ldm_parse_dgr4 (const u8 *buffer, int buflen, struct vblk *vb) +{ + char buf[64]; + int r_objid, r_name, r_id1, r_id2, len; + struct vblk_dgrp *dgrp; + + BUG_ON (!buffer || !vb); + + r_objid = ldm_relative (buffer, buflen, 0x18, 0); + r_name = ldm_relative (buffer, buflen, 0x18, r_objid); + + if (buffer[0x12] & VBLK_FLAG_DGR4_IDS) { + r_id1 = ldm_relative (buffer, buflen, 0x44, r_name); + r_id2 = ldm_relative (buffer, buflen, 0x44, r_id1); + len = r_id2; + } else { + r_id1 = 0; + r_id2 = 0; + len = r_name; + } + if (len < 0) + return false; + + len += VBLK_SIZE_DGR4; + if (len != get_unaligned_be32(buffer + 0x14)) + return false; + + dgrp = &vb->vblk.dgrp; + + ldm_get_vstr (buffer + 0x18 + r_objid, buf, sizeof (buf)); + return true; +} + +/** + * ldm_parse_dsk3 - Read a raw VBLK Disk object into a vblk structure + * @buffer: Block of data being worked on + * @buflen: Size of the block of data + * @vb: In-memory vblk in which to return information + * + * Read a raw VBLK Disk object (version 3) into a vblk structure. + * + * Return: 'true' @vb contains a Disk VBLK + * 'false' @vb contents are not defined + */ +static bool ldm_parse_dsk3 (const u8 *buffer, int buflen, struct vblk *vb) +{ + int r_objid, r_name, r_diskid, r_altname, len; + struct vblk_disk *disk; + + BUG_ON (!buffer || !vb); + + r_objid = ldm_relative (buffer, buflen, 0x18, 0); + r_name = ldm_relative (buffer, buflen, 0x18, r_objid); + r_diskid = ldm_relative (buffer, buflen, 0x18, r_name); + r_altname = ldm_relative (buffer, buflen, 0x18, r_diskid); + len = r_altname; + if (len < 0) + return false; + + len += VBLK_SIZE_DSK3; + if (len != get_unaligned_be32(buffer + 0x14)) + return false; + + disk = &vb->vblk.disk; + ldm_get_vstr (buffer + 0x18 + r_diskid, disk->alt_name, + sizeof (disk->alt_name)); + if (!ldm_parse_guid (buffer + 0x19 + r_name, disk->disk_id)) + return false; + + return true; +} + +/** + * ldm_parse_dsk4 - Read a raw VBLK Disk object into a vblk structure + * @buffer: Block of data being worked on + * @buflen: Size of the block of data + * @vb: In-memory vblk in which to return information + * + * Read a raw VBLK Disk object (version 4) into a vblk structure. + * + * Return: 'true' @vb contains a Disk VBLK + * 'false' @vb contents are not defined + */ +static bool ldm_parse_dsk4 (const u8 *buffer, int buflen, struct vblk *vb) +{ + int r_objid, r_name, len; + struct vblk_disk *disk; + + BUG_ON (!buffer || !vb); + + r_objid = ldm_relative (buffer, buflen, 0x18, 0); + r_name = ldm_relative (buffer, buflen, 0x18, r_objid); + len = r_name; + if (len < 0) + return false; + + len += VBLK_SIZE_DSK4; + if (len != get_unaligned_be32(buffer + 0x14)) + return false; + + disk = &vb->vblk.disk; + memcpy (disk->disk_id, buffer + 0x18 + r_name, GUID_SIZE); + return true; +} + +/** + * ldm_parse_prt3 - Read a raw VBLK Partition object into a vblk structure + * @buffer: Block of data being worked on + * @buflen: Size of the block of data + * @vb: In-memory vblk in which to return information + * + * Read a raw VBLK Partition object (version 3) into a vblk structure. + * + * Return: 'true' @vb contains a Partition VBLK + * 'false' @vb contents are not defined + */ +static bool ldm_parse_prt3(const u8 *buffer, int buflen, struct vblk *vb) +{ + int r_objid, r_name, r_size, r_parent, r_diskid, r_index, len; + struct vblk_part *part; + + BUG_ON(!buffer || !vb); + r_objid = ldm_relative(buffer, buflen, 0x18, 0); + if (r_objid < 0) { + ldm_error("r_objid %d < 0", r_objid); + return false; + } + r_name = ldm_relative(buffer, buflen, 0x18, r_objid); + if (r_name < 0) { + ldm_error("r_name %d < 0", r_name); + return false; + } + r_size = ldm_relative(buffer, buflen, 0x34, r_name); + if (r_size < 0) { + ldm_error("r_size %d < 0", r_size); + return false; + } + r_parent = ldm_relative(buffer, buflen, 0x34, r_size); + if (r_parent < 0) { + ldm_error("r_parent %d < 0", r_parent); + return false; + } + r_diskid = ldm_relative(buffer, buflen, 0x34, r_parent); + if (r_diskid < 0) { + ldm_error("r_diskid %d < 0", r_diskid); + return false; + } + if (buffer[0x12] & VBLK_FLAG_PART_INDEX) { + r_index = ldm_relative(buffer, buflen, 0x34, r_diskid); + if (r_index < 0) { + ldm_error("r_index %d < 0", r_index); + return false; + } + len = r_index; + } else { + r_index = 0; + len = r_diskid; + } + if (len < 0) { + ldm_error("len %d < 0", len); + return false; + } + len += VBLK_SIZE_PRT3; + if (len > get_unaligned_be32(buffer + 0x14)) { + ldm_error("len %d > BE32(buffer + 0x14) %d", len, + get_unaligned_be32(buffer + 0x14)); + return false; + } + part = &vb->vblk.part; + part->start = get_unaligned_be64(buffer + 0x24 + r_name); + part->volume_offset = get_unaligned_be64(buffer + 0x2C + r_name); + part->size = ldm_get_vnum(buffer + 0x34 + r_name); + part->parent_id = ldm_get_vnum(buffer + 0x34 + r_size); + part->disk_id = ldm_get_vnum(buffer + 0x34 + r_parent); + if (vb->flags & VBLK_FLAG_PART_INDEX) + part->partnum = buffer[0x35 + r_diskid]; + else + part->partnum = 0; + return true; +} + +/** + * ldm_parse_vol5 - Read a raw VBLK Volume object into a vblk structure + * @buffer: Block of data being worked on + * @buflen: Size of the block of data + * @vb: In-memory vblk in which to return information + * + * Read a raw VBLK Volume object (version 5) into a vblk structure. + * + * Return: 'true' @vb contains a Volume VBLK + * 'false' @vb contents are not defined + */ +static bool ldm_parse_vol5(const u8 *buffer, int buflen, struct vblk *vb) +{ + int r_objid, r_name, r_vtype, r_disable_drive_letter, r_child, r_size; + int r_id1, r_id2, r_size2, r_drive, len; + struct vblk_volu *volu; + + BUG_ON(!buffer || !vb); + r_objid = ldm_relative(buffer, buflen, 0x18, 0); + if (r_objid < 0) { + ldm_error("r_objid %d < 0", r_objid); + return false; + } + r_name = ldm_relative(buffer, buflen, 0x18, r_objid); + if (r_name < 0) { + ldm_error("r_name %d < 0", r_name); + return false; + } + r_vtype = ldm_relative(buffer, buflen, 0x18, r_name); + if (r_vtype < 0) { + ldm_error("r_vtype %d < 0", r_vtype); + return false; + } + r_disable_drive_letter = ldm_relative(buffer, buflen, 0x18, r_vtype); + if (r_disable_drive_letter < 0) { + ldm_error("r_disable_drive_letter %d < 0", + r_disable_drive_letter); + return false; + } + r_child = ldm_relative(buffer, buflen, 0x2D, r_disable_drive_letter); + if (r_child < 0) { + ldm_error("r_child %d < 0", r_child); + return false; + } + r_size = ldm_relative(buffer, buflen, 0x3D, r_child); + if (r_size < 0) { + ldm_error("r_size %d < 0", r_size); + return false; + } + if (buffer[0x12] & VBLK_FLAG_VOLU_ID1) { + r_id1 = ldm_relative(buffer, buflen, 0x52, r_size); + if (r_id1 < 0) { + ldm_error("r_id1 %d < 0", r_id1); + return false; + } + } else + r_id1 = r_size; + if (buffer[0x12] & VBLK_FLAG_VOLU_ID2) { + r_id2 = ldm_relative(buffer, buflen, 0x52, r_id1); + if (r_id2 < 0) { + ldm_error("r_id2 %d < 0", r_id2); + return false; + } + } else + r_id2 = r_id1; + if (buffer[0x12] & VBLK_FLAG_VOLU_SIZE) { + r_size2 = ldm_relative(buffer, buflen, 0x52, r_id2); + if (r_size2 < 0) { + ldm_error("r_size2 %d < 0", r_size2); + return false; + } + } else + r_size2 = r_id2; + if (buffer[0x12] & VBLK_FLAG_VOLU_DRIVE) { + r_drive = ldm_relative(buffer, buflen, 0x52, r_size2); + if (r_drive < 0) { + ldm_error("r_drive %d < 0", r_drive); + return false; + } + } else + r_drive = r_size2; + len = r_drive; + if (len < 0) { + ldm_error("len %d < 0", len); + return false; + } + len += VBLK_SIZE_VOL5; + if (len > get_unaligned_be32(buffer + 0x14)) { + ldm_error("len %d > BE32(buffer + 0x14) %d", len, + get_unaligned_be32(buffer + 0x14)); + return false; + } + volu = &vb->vblk.volu; + ldm_get_vstr(buffer + 0x18 + r_name, volu->volume_type, + sizeof(volu->volume_type)); + memcpy(volu->volume_state, buffer + 0x18 + r_disable_drive_letter, + sizeof(volu->volume_state)); + volu->size = ldm_get_vnum(buffer + 0x3D + r_child); + volu->partition_type = buffer[0x41 + r_size]; + memcpy(volu->guid, buffer + 0x42 + r_size, sizeof(volu->guid)); + if (buffer[0x12] & VBLK_FLAG_VOLU_DRIVE) { + ldm_get_vstr(buffer + 0x52 + r_size, volu->drive_hint, + sizeof(volu->drive_hint)); + } + return true; +} + +/** + * ldm_parse_vblk - Read a raw VBLK object into a vblk structure + * @buf: Block of data being worked on + * @len: Size of the block of data + * @vb: In-memory vblk in which to return information + * + * Read a raw VBLK object into a vblk structure. This function just reads the + * information common to all VBLK types, then delegates the rest of the work to + * helper functions: ldm_parse_*. + * + * Return: 'true' @vb contains a VBLK + * 'false' @vb contents are not defined + */ +static bool ldm_parse_vblk (const u8 *buf, int len, struct vblk *vb) +{ + bool result = false; + int r_objid; + + BUG_ON (!buf || !vb); + + r_objid = ldm_relative (buf, len, 0x18, 0); + if (r_objid < 0) { + ldm_error ("VBLK header is corrupt."); + return false; + } + + vb->flags = buf[0x12]; + vb->type = buf[0x13]; + vb->obj_id = ldm_get_vnum (buf + 0x18); + ldm_get_vstr (buf+0x18+r_objid, vb->name, sizeof (vb->name)); + + switch (vb->type) { + case VBLK_CMP3: result = ldm_parse_cmp3 (buf, len, vb); break; + case VBLK_DSK3: result = ldm_parse_dsk3 (buf, len, vb); break; + case VBLK_DSK4: result = ldm_parse_dsk4 (buf, len, vb); break; + case VBLK_DGR3: result = ldm_parse_dgr3 (buf, len, vb); break; + case VBLK_DGR4: result = ldm_parse_dgr4 (buf, len, vb); break; + case VBLK_PRT3: result = ldm_parse_prt3 (buf, len, vb); break; + case VBLK_VOL5: result = ldm_parse_vol5 (buf, len, vb); break; + } + + if (result) + ldm_debug ("Parsed VBLK 0x%llx (type: 0x%02x) ok.", + (unsigned long long) vb->obj_id, vb->type); + else + ldm_error ("Failed to parse VBLK 0x%llx (type: 0x%02x).", + (unsigned long long) vb->obj_id, vb->type); + + return result; +} + + +/** + * ldm_ldmdb_add - Adds a raw VBLK entry to the ldmdb database + * @data: Raw VBLK to add to the database + * @len: Size of the raw VBLK + * @ldb: Cache of the database structures + * + * The VBLKs are sorted into categories. Partitions are also sorted by offset. + * + * N.B. This function does not check the validity of the VBLKs. + * + * Return: 'true' The VBLK was added + * 'false' An error occurred + */ +static bool ldm_ldmdb_add (u8 *data, int len, struct ldmdb *ldb) +{ + struct vblk *vb; + struct list_head *item; + + BUG_ON (!data || !ldb); + + vb = kmalloc (sizeof (*vb), GFP_KERNEL); + if (!vb) { + ldm_crit ("Out of memory."); + return false; + } + + if (!ldm_parse_vblk (data, len, vb)) { + kfree(vb); + return false; /* Already logged */ + } + + /* Put vblk into the correct list. */ + switch (vb->type) { + case VBLK_DGR3: + case VBLK_DGR4: + list_add (&vb->list, &ldb->v_dgrp); + break; + case VBLK_DSK3: + case VBLK_DSK4: + list_add (&vb->list, &ldb->v_disk); + break; + case VBLK_VOL5: + list_add (&vb->list, &ldb->v_volu); + break; + case VBLK_CMP3: + list_add (&vb->list, &ldb->v_comp); + break; + case VBLK_PRT3: + /* Sort by the partition's start sector. */ + list_for_each (item, &ldb->v_part) { + struct vblk *v = list_entry (item, struct vblk, list); + if ((v->vblk.part.disk_id == vb->vblk.part.disk_id) && + (v->vblk.part.start > vb->vblk.part.start)) { + list_add_tail (&vb->list, &v->list); + return true; + } + } + list_add_tail (&vb->list, &ldb->v_part); + break; + } + return true; +} + +/** + * ldm_frag_add - Add a VBLK fragment to a list + * @data: Raw fragment to be added to the list + * @size: Size of the raw fragment + * @frags: Linked list of VBLK fragments + * + * Fragmented VBLKs may not be consecutive in the database, so they are placed + * in a list so they can be pieced together later. + * + * Return: 'true' Success, the VBLK was added to the list + * 'false' Error, a problem occurred + */ +static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags) +{ + struct frag *f; + struct list_head *item; + int rec, num, group; + + BUG_ON (!data || !frags); + + if (size < 2 * VBLK_SIZE_HEAD) { + ldm_error("Value of size is to small."); + return false; + } + + group = get_unaligned_be32(data + 0x08); + rec = get_unaligned_be16(data + 0x0C); + num = get_unaligned_be16(data + 0x0E); + if ((num < 1) || (num > 4)) { + ldm_error ("A VBLK claims to have %d parts.", num); + return false; + } + if (rec >= num) { + ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num); + return false; + } + + list_for_each (item, frags) { + f = list_entry (item, struct frag, list); + if (f->group == group) + goto found; + } + + f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL); + if (!f) { + ldm_crit ("Out of memory."); + return false; + } + + f->group = group; + f->num = num; + f->rec = rec; + f->map = 0xFF << num; + + list_add_tail (&f->list, frags); +found: + if (rec >= f->num) { + ldm_error("REC value (%d) exceeds NUM value (%d)", rec, f->num); + return false; + } + if (f->map & (1 << rec)) { + ldm_error ("Duplicate VBLK, part %d.", rec); + f->map &= 0x7F; /* Mark the group as broken */ + return false; + } + f->map |= (1 << rec); + if (!rec) + memcpy(f->data, data, VBLK_SIZE_HEAD); + data += VBLK_SIZE_HEAD; + size -= VBLK_SIZE_HEAD; + memcpy(f->data + VBLK_SIZE_HEAD + rec * size, data, size); + return true; +} + +/** + * ldm_frag_free - Free a linked list of VBLK fragments + * @list: Linked list of fragments + * + * Free a linked list of VBLK fragments + * + * Return: none + */ +static void ldm_frag_free (struct list_head *list) +{ + struct list_head *item, *tmp; + + BUG_ON (!list); + + list_for_each_safe (item, tmp, list) + kfree (list_entry (item, struct frag, list)); +} + +/** + * ldm_frag_commit - Validate fragmented VBLKs and add them to the database + * @frags: Linked list of VBLK fragments + * @ldb: Cache of the database structures + * + * Now that all the fragmented VBLKs have been collected, they must be added to + * the database for later use. + * + * Return: 'true' All the fragments we added successfully + * 'false' One or more of the fragments we invalid + */ +static bool ldm_frag_commit (struct list_head *frags, struct ldmdb *ldb) +{ + struct frag *f; + struct list_head *item; + + BUG_ON (!frags || !ldb); + + list_for_each (item, frags) { + f = list_entry (item, struct frag, list); + + if (f->map != 0xFF) { + ldm_error ("VBLK group %d is incomplete (0x%02x).", + f->group, f->map); + return false; + } + + if (!ldm_ldmdb_add (f->data, f->num*ldb->vm.vblk_size, ldb)) + return false; /* Already logged */ + } + return true; +} + +/** + * ldm_get_vblks - Read the on-disk database of VBLKs into memory + * @state: Partition check state including device holding the LDM Database + * @base: Offset, into @state->bdev, of the database + * @ldb: Cache of the database structures + * + * To use the information from the VBLKs, they need to be read from the disk, + * unpacked and validated. We cache them in @ldb according to their type. + * + * Return: 'true' All the VBLKs were read successfully + * 'false' An error occurred + */ +static bool ldm_get_vblks(struct parsed_partitions *state, unsigned long base, + struct ldmdb *ldb) +{ + int size, perbuf, skip, finish, s, v, recs; + u8 *data = NULL; + Sector sect; + bool result = false; + LIST_HEAD (frags); + + BUG_ON(!state || !ldb); + + size = ldb->vm.vblk_size; + perbuf = 512 / size; + skip = ldb->vm.vblk_offset >> 9; /* Bytes to sectors */ + finish = (size * ldb->vm.last_vblk_seq) >> 9; + + for (s = skip; s < finish; s++) { /* For each sector */ + data = read_part_sector(state, base + OFF_VMDB + s, §); + if (!data) { + ldm_crit ("Disk read failed."); + goto out; + } + + for (v = 0; v < perbuf; v++, data+=size) { /* For each vblk */ + if (MAGIC_VBLK != get_unaligned_be32(data)) { + ldm_error ("Expected to find a VBLK."); + goto out; + } + + recs = get_unaligned_be16(data + 0x0E); /* Number of records */ + if (recs == 1) { + if (!ldm_ldmdb_add (data, size, ldb)) + goto out; /* Already logged */ + } else if (recs > 1) { + if (!ldm_frag_add (data, size, &frags)) + goto out; /* Already logged */ + } + /* else Record is not in use, ignore it. */ + } + put_dev_sector (sect); + data = NULL; + } + + result = ldm_frag_commit (&frags, ldb); /* Failures, already logged */ +out: + if (data) + put_dev_sector (sect); + ldm_frag_free (&frags); + + return result; +} + +/** + * ldm_free_vblks - Free a linked list of vblk's + * @lh: Head of a linked list of struct vblk + * + * Free a list of vblk's and free the memory used to maintain the list. + * + * Return: none + */ +static void ldm_free_vblks (struct list_head *lh) +{ + struct list_head *item, *tmp; + + BUG_ON (!lh); + + list_for_each_safe (item, tmp, lh) + kfree (list_entry (item, struct vblk, list)); +} + + +/** + * ldm_partition - Find out whether a device is a dynamic disk and handle it + * @state: Partition check state including device holding the LDM Database + * + * This determines whether the device @bdev is a dynamic disk and if so creates + * the partitions necessary in the gendisk structure pointed to by @hd. + * + * We create a dummy device 1, which contains the LDM database, and then create + * each partition described by the LDM database in sequence as devices 2+. For + * example, if the device is hda, we would have: hda1: LDM database, hda2, hda3, + * and so on: the actual data containing partitions. + * + * Return: 1 Success, @state->bdev is a dynamic disk and we handled it + * 0 Success, @state->bdev is not a dynamic disk + * -1 An error occurred before enough information had been read + * Or @state->bdev is a dynamic disk, but it may be corrupted + */ +int ldm_partition(struct parsed_partitions *state) +{ + struct ldmdb *ldb; + unsigned long base; + int result = -1; + + BUG_ON(!state); + + /* Look for signs of a Dynamic Disk */ + if (!ldm_validate_partition_table(state)) + return 0; + + ldb = kmalloc (sizeof (*ldb), GFP_KERNEL); + if (!ldb) { + ldm_crit ("Out of memory."); + goto out; + } + + /* Parse and check privheads. */ + if (!ldm_validate_privheads(state, &ldb->ph)) + goto out; /* Already logged */ + + /* All further references are relative to base (database start). */ + base = ldb->ph.config_start; + + /* Parse and check tocs and vmdb. */ + if (!ldm_validate_tocblocks(state, base, ldb) || + !ldm_validate_vmdb(state, base, ldb)) + goto out; /* Already logged */ + + /* Initialize vblk lists in ldmdb struct */ + INIT_LIST_HEAD (&ldb->v_dgrp); + INIT_LIST_HEAD (&ldb->v_disk); + INIT_LIST_HEAD (&ldb->v_volu); + INIT_LIST_HEAD (&ldb->v_comp); + INIT_LIST_HEAD (&ldb->v_part); + + if (!ldm_get_vblks(state, base, ldb)) { + ldm_crit ("Failed to read the VBLKs from the database."); + goto cleanup; + } + + /* Finally, create the data partition devices. */ + if (ldm_create_data_partitions(state, ldb)) { + ldm_debug ("Parsed LDM database successfully."); + result = 1; + } + /* else Already logged */ + +cleanup: + ldm_free_vblks (&ldb->v_dgrp); + ldm_free_vblks (&ldb->v_disk); + ldm_free_vblks (&ldb->v_volu); + ldm_free_vblks (&ldb->v_comp); + ldm_free_vblks (&ldb->v_part); +out: + kfree (ldb); + return result; +} diff --git a/block/partitions/ldm.h b/block/partitions/ldm.h new file mode 100644 index 000000000..374242c09 --- /dev/null +++ b/block/partitions/ldm.h @@ -0,0 +1,215 @@ +/** + * ldm - Part of the Linux-NTFS project. + * + * Copyright (C) 2001,2002 Richard Russon + * Copyright (c) 2001-2007 Anton Altaparmakov + * Copyright (C) 2001,2002 Jakob Kemi + * + * Documentation is available at http://www.linux-ntfs.org/doku.php?id=downloads + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program (in the main directory of the Linux-NTFS source + * in the file COPYING); if not, write to the Free Software Foundation, + * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _FS_PT_LDM_H_ +#define _FS_PT_LDM_H_ + +#include +#include +#include +#include +#include +#include + +struct parsed_partitions; + +/* Magic numbers in CPU format. */ +#define MAGIC_VMDB 0x564D4442 /* VMDB */ +#define MAGIC_VBLK 0x56424C4B /* VBLK */ +#define MAGIC_PRIVHEAD 0x5052495648454144ULL /* PRIVHEAD */ +#define MAGIC_TOCBLOCK 0x544F43424C4F434BULL /* TOCBLOCK */ + +/* The defined vblk types. */ +#define VBLK_VOL5 0x51 /* Volume, version 5 */ +#define VBLK_CMP3 0x32 /* Component, version 3 */ +#define VBLK_PRT3 0x33 /* Partition, version 3 */ +#define VBLK_DSK3 0x34 /* Disk, version 3 */ +#define VBLK_DSK4 0x44 /* Disk, version 4 */ +#define VBLK_DGR3 0x35 /* Disk Group, version 3 */ +#define VBLK_DGR4 0x45 /* Disk Group, version 4 */ + +/* vblk flags indicating extra information will be present */ +#define VBLK_FLAG_COMP_STRIPE 0x10 +#define VBLK_FLAG_PART_INDEX 0x08 +#define VBLK_FLAG_DGR3_IDS 0x08 +#define VBLK_FLAG_DGR4_IDS 0x08 +#define VBLK_FLAG_VOLU_ID1 0x08 +#define VBLK_FLAG_VOLU_ID2 0x20 +#define VBLK_FLAG_VOLU_SIZE 0x80 +#define VBLK_FLAG_VOLU_DRIVE 0x02 + +/* size of a vblk's static parts */ +#define VBLK_SIZE_HEAD 16 +#define VBLK_SIZE_CMP3 22 /* Name and version */ +#define VBLK_SIZE_DGR3 12 +#define VBLK_SIZE_DGR4 44 +#define VBLK_SIZE_DSK3 12 +#define VBLK_SIZE_DSK4 45 +#define VBLK_SIZE_PRT3 28 +#define VBLK_SIZE_VOL5 58 + +/* component types */ +#define COMP_STRIPE 0x01 /* Stripe-set */ +#define COMP_BASIC 0x02 /* Basic disk */ +#define COMP_RAID 0x03 /* Raid-set */ + +/* Other constants. */ +#define LDM_DB_SIZE 2048 /* Size in sectors (= 1MiB). */ + +#define OFF_PRIV1 6 /* Offset of the first privhead + relative to the start of the + device in sectors */ + +/* Offsets to structures within the LDM Database in sectors. */ +#define OFF_PRIV2 1856 /* Backup private headers. */ +#define OFF_PRIV3 2047 + +#define OFF_TOCB1 1 /* Tables of contents. */ +#define OFF_TOCB2 2 +#define OFF_TOCB3 2045 +#define OFF_TOCB4 2046 + +#define OFF_VMDB 17 /* List of partitions. */ + +#define LDM_PARTITION 0x42 /* Formerly SFS (Landis). */ + +#define TOC_BITMAP1 "config" /* Names of the two defined */ +#define TOC_BITMAP2 "log" /* bitmaps in the TOCBLOCK. */ + +/* Borrowed from msdos.c */ +#define SYS_IND(p) (get_unaligned(&(p)->sys_ind)) + +struct frag { /* VBLK Fragment handling */ + struct list_head list; + u32 group; + u8 num; /* Total number of records */ + u8 rec; /* This is record number n */ + u8 map; /* Which portions are in use */ + u8 data[0]; +}; + +/* In memory LDM database structures. */ + +#define GUID_SIZE 16 + +struct privhead { /* Offsets and sizes are in sectors. */ + u16 ver_major; + u16 ver_minor; + u64 logical_disk_start; + u64 logical_disk_size; + u64 config_start; + u64 config_size; + u8 disk_id[GUID_SIZE]; +}; + +struct tocblock { /* We have exactly two bitmaps. */ + u8 bitmap1_name[16]; + u64 bitmap1_start; + u64 bitmap1_size; + u8 bitmap2_name[16]; + u64 bitmap2_start; + u64 bitmap2_size; +}; + +struct vmdb { /* VMDB: The database header */ + u16 ver_major; + u16 ver_minor; + u32 vblk_size; + u32 vblk_offset; + u32 last_vblk_seq; +}; + +struct vblk_comp { /* VBLK Component */ + u8 state[16]; + u64 parent_id; + u8 type; + u8 children; + u16 chunksize; +}; + +struct vblk_dgrp { /* VBLK Disk Group */ + u8 disk_id[64]; +}; + +struct vblk_disk { /* VBLK Disk */ + u8 disk_id[GUID_SIZE]; + u8 alt_name[128]; +}; + +struct vblk_part { /* VBLK Partition */ + u64 start; + u64 size; /* start, size and vol_off in sectors */ + u64 volume_offset; + u64 parent_id; + u64 disk_id; + u8 partnum; +}; + +struct vblk_volu { /* VBLK Volume */ + u8 volume_type[16]; + u8 volume_state[16]; + u8 guid[16]; + u8 drive_hint[4]; + u64 size; + u8 partition_type; +}; + +struct vblk_head { /* VBLK standard header */ + u32 group; + u16 rec; + u16 nrec; +}; + +struct vblk { /* Generalised VBLK */ + u8 name[64]; + u64 obj_id; + u32 sequence; + u8 flags; + u8 type; + union { + struct vblk_comp comp; + struct vblk_dgrp dgrp; + struct vblk_disk disk; + struct vblk_part part; + struct vblk_volu volu; + } vblk; + struct list_head list; +}; + +struct ldmdb { /* Cache of the database */ + struct privhead ph; + struct tocblock toc; + struct vmdb vm; + struct list_head v_dgrp; + struct list_head v_disk; + struct list_head v_volu; + struct list_head v_comp; + struct list_head v_part; +}; + +int ldm_partition(struct parsed_partitions *state); + +#endif /* _FS_PT_LDM_H_ */ + diff --git a/block/partitions/mac.c b/block/partitions/mac.c new file mode 100644 index 000000000..c2c48ec64 --- /dev/null +++ b/block/partitions/mac.c @@ -0,0 +1,138 @@ +/* + * fs/partitions/mac.c + * + * Code extracted from drivers/block/genhd.c + * Copyright (C) 1991-1998 Linus Torvalds + * Re-organised Feb 1998 Russell King + */ + +#include +#include "check.h" +#include "mac.h" + +#ifdef CONFIG_PPC_PMAC +#include +extern void note_bootable_part(dev_t dev, int part, int goodness); +#endif + +/* + * Code to understand MacOS partition tables. + */ + +static inline void mac_fix_string(char *stg, int len) +{ + int i; + + for (i = len - 1; i >= 0 && stg[i] == ' '; i--) + stg[i] = 0; +} + +int mac_partition(struct parsed_partitions *state) +{ + Sector sect; + unsigned char *data; + int slot, blocks_in_map; + unsigned secsize; +#ifdef CONFIG_PPC_PMAC + int found_root = 0; + int found_root_goodness = 0; +#endif + struct mac_partition *part; + struct mac_driver_desc *md; + + /* Get 0th block and look at the first partition map entry. */ + md = read_part_sector(state, 0, §); + if (!md) + return -1; + if (be16_to_cpu(md->signature) != MAC_DRIVER_MAGIC) { + put_dev_sector(sect); + return 0; + } + secsize = be16_to_cpu(md->block_size); + put_dev_sector(sect); + data = read_part_sector(state, secsize/512, §); + if (!data) + return -1; + part = (struct mac_partition *) (data + secsize%512); + if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC) { + put_dev_sector(sect); + return 0; /* not a MacOS disk */ + } + blocks_in_map = be32_to_cpu(part->map_count); + if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) { + put_dev_sector(sect); + return 0; + } + + if (blocks_in_map >= state->limit) + blocks_in_map = state->limit - 1; + + strlcat(state->pp_buf, " [mac]", PAGE_SIZE); + for (slot = 1; slot <= blocks_in_map; ++slot) { + int pos = slot * secsize; + put_dev_sector(sect); + data = read_part_sector(state, pos/512, §); + if (!data) + return -1; + part = (struct mac_partition *) (data + pos%512); + if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC) + break; + put_partition(state, slot, + be32_to_cpu(part->start_block) * (secsize/512), + be32_to_cpu(part->block_count) * (secsize/512)); + + if (!strncasecmp(part->type, "Linux_RAID", 10)) + state->parts[slot].flags = ADDPART_FLAG_RAID; +#ifdef CONFIG_PPC_PMAC + /* + * If this is the first bootable partition, tell the + * setup code, in case it wants to make this the root. + */ + if (machine_is(powermac)) { + int goodness = 0; + + mac_fix_string(part->processor, 16); + mac_fix_string(part->name, 32); + mac_fix_string(part->type, 32); + + if ((be32_to_cpu(part->status) & MAC_STATUS_BOOTABLE) + && strcasecmp(part->processor, "powerpc") == 0) + goodness++; + + if (strcasecmp(part->type, "Apple_UNIX_SVR2") == 0 + || (strncasecmp(part->type, "Linux", 5) == 0 + && strcasecmp(part->type, "Linux_swap") != 0)) { + int i, l; + + goodness++; + l = strlen(part->name); + if (strcmp(part->name, "/") == 0) + goodness++; + for (i = 0; i <= l - 4; ++i) { + if (strncasecmp(part->name + i, "root", + 4) == 0) { + goodness += 2; + break; + } + } + if (strncasecmp(part->name, "swap", 4) == 0) + goodness--; + } + + if (goodness > found_root_goodness) { + found_root = slot; + found_root_goodness = goodness; + } + } +#endif /* CONFIG_PPC_PMAC */ + } +#ifdef CONFIG_PPC_PMAC + if (found_root_goodness) + note_bootable_part(state->bdev->bd_dev, found_root, + found_root_goodness); +#endif + + put_dev_sector(sect); + strlcat(state->pp_buf, "\n", PAGE_SIZE); + return 1; +} diff --git a/block/partitions/mac.h b/block/partitions/mac.h new file mode 100644 index 000000000..3c7d98436 --- /dev/null +++ b/block/partitions/mac.h @@ -0,0 +1,44 @@ +/* + * fs/partitions/mac.h + */ + +#define MAC_PARTITION_MAGIC 0x504d + +/* type field value for A/UX or other Unix partitions */ +#define APPLE_AUX_TYPE "Apple_UNIX_SVR2" + +struct mac_partition { + __be16 signature; /* expected to be MAC_PARTITION_MAGIC */ + __be16 res1; + __be32 map_count; /* # blocks in partition map */ + __be32 start_block; /* absolute starting block # of partition */ + __be32 block_count; /* number of blocks in partition */ + char name[32]; /* partition name */ + char type[32]; /* string type description */ + __be32 data_start; /* rel block # of first data block */ + __be32 data_count; /* number of data blocks */ + __be32 status; /* partition status bits */ + __be32 boot_start; + __be32 boot_size; + __be32 boot_load; + __be32 boot_load2; + __be32 boot_entry; + __be32 boot_entry2; + __be32 boot_cksum; + char processor[16]; /* identifies ISA of boot */ + /* there is more stuff after this that we don't need */ +}; + +#define MAC_STATUS_BOOTABLE 8 /* partition is bootable */ + +#define MAC_DRIVER_MAGIC 0x4552 + +/* Driver descriptor structure, in block 0 */ +struct mac_driver_desc { + __be16 signature; /* expected to be MAC_DRIVER_MAGIC */ + __be16 block_size; + __be32 block_count; + /* ... more stuff */ +}; + +int mac_partition(struct parsed_partitions *state); diff --git a/block/partitions/msdos.c b/block/partitions/msdos.c new file mode 100644 index 000000000..93e7c1b32 --- /dev/null +++ b/block/partitions/msdos.c @@ -0,0 +1,582 @@ +/* + * fs/partitions/msdos.c + * + * Code extracted from drivers/block/genhd.c + * Copyright (C) 1991-1998 Linus Torvalds + * + * Thanks to Branko Lankester, lankeste@fwi.uva.nl, who found a bug + * in the early extended-partition checks and added DM partitions + * + * Support for DiskManager v6.0x added by Mark Lord, + * with information provided by OnTrack. This now works for linux fdisk + * and LILO, as well as loadlin and bootln. Note that disks other than + * /dev/hda *must* have a "DOS" type 0x51 partition in the first slot (hda1). + * + * More flexible handling of extended partitions - aeb, 950831 + * + * Check partition table on IDE disks for common CHS translations + * + * Re-organised Feb 1998 Russell King + */ +#include + +#include "check.h" +#include "msdos.h" +#include "efi.h" +#include "aix.h" + +/* + * Many architectures don't like unaligned accesses, while + * the nr_sects and start_sect partition table entries are + * at a 2 (mod 4) address. + */ +#include + +#define SYS_IND(p) get_unaligned(&p->sys_ind) + +static inline sector_t nr_sects(struct partition *p) +{ + return (sector_t)get_unaligned_le32(&p->nr_sects); +} + +static inline sector_t start_sect(struct partition *p) +{ + return (sector_t)get_unaligned_le32(&p->start_sect); +} + +static inline int is_extended_partition(struct partition *p) +{ + return (SYS_IND(p) == DOS_EXTENDED_PARTITION || + SYS_IND(p) == WIN98_EXTENDED_PARTITION || + SYS_IND(p) == LINUX_EXTENDED_PARTITION); +} + +#define MSDOS_LABEL_MAGIC1 0x55 +#define MSDOS_LABEL_MAGIC2 0xAA + +static inline int +msdos_magic_present(unsigned char *p) +{ + return (p[0] == MSDOS_LABEL_MAGIC1 && p[1] == MSDOS_LABEL_MAGIC2); +} + +/* Value is EBCDIC 'IBMA' */ +#define AIX_LABEL_MAGIC1 0xC9 +#define AIX_LABEL_MAGIC2 0xC2 +#define AIX_LABEL_MAGIC3 0xD4 +#define AIX_LABEL_MAGIC4 0xC1 +static int aix_magic_present(struct parsed_partitions *state, unsigned char *p) +{ + struct partition *pt = (struct partition *) (p + 0x1be); + Sector sect; + unsigned char *d; + int slot, ret = 0; + + if (!(p[0] == AIX_LABEL_MAGIC1 && + p[1] == AIX_LABEL_MAGIC2 && + p[2] == AIX_LABEL_MAGIC3 && + p[3] == AIX_LABEL_MAGIC4)) + return 0; + /* Assume the partition table is valid if Linux partitions exists */ + for (slot = 1; slot <= 4; slot++, pt++) { + if (pt->sys_ind == LINUX_SWAP_PARTITION || + pt->sys_ind == LINUX_RAID_PARTITION || + pt->sys_ind == LINUX_DATA_PARTITION || + pt->sys_ind == LINUX_LVM_PARTITION || + is_extended_partition(pt)) + return 0; + } + d = read_part_sector(state, 7, §); + if (d) { + if (d[0] == '_' && d[1] == 'L' && d[2] == 'V' && d[3] == 'M') + ret = 1; + put_dev_sector(sect); + } + return ret; +} + +static void set_info(struct parsed_partitions *state, int slot, + u32 disksig) +{ + struct partition_meta_info *info = &state->parts[slot].info; + + snprintf(info->uuid, sizeof(info->uuid), "%08x-%02x", disksig, + slot); + info->volname[0] = 0; + state->parts[slot].has_info = true; +} + +/* + * Create devices for each logical partition in an extended partition. + * The logical partitions form a linked list, with each entry being + * a partition table with two entries. The first entry + * is the real data partition (with a start relative to the partition + * table start). The second is a pointer to the next logical partition + * (with a start relative to the entire extended partition). + * We do not create a Linux partition for the partition tables, but + * only for the actual data partitions. + */ + +static void parse_extended(struct parsed_partitions *state, + sector_t first_sector, sector_t first_size, + u32 disksig) +{ + struct partition *p; + Sector sect; + unsigned char *data; + sector_t this_sector, this_size; + sector_t sector_size = bdev_logical_block_size(state->bdev) / 512; + int loopct = 0; /* number of links followed + without finding a data partition */ + int i; + + this_sector = first_sector; + this_size = first_size; + + while (1) { + if (++loopct > 100) + return; + if (state->next == state->limit) + return; + data = read_part_sector(state, this_sector, §); + if (!data) + return; + + if (!msdos_magic_present(data + 510)) + goto done; + + p = (struct partition *) (data + 0x1be); + + /* + * Usually, the first entry is the real data partition, + * the 2nd entry is the next extended partition, or empty, + * and the 3rd and 4th entries are unused. + * However, DRDOS sometimes has the extended partition as + * the first entry (when the data partition is empty), + * and OS/2 seems to use all four entries. + */ + + /* + * First process the data partition(s) + */ + for (i = 0; i < 4; i++, p++) { + sector_t offs, size, next; + + if (!nr_sects(p) || is_extended_partition(p)) + continue; + + /* Check the 3rd and 4th entries - + these sometimes contain random garbage */ + offs = start_sect(p)*sector_size; + size = nr_sects(p)*sector_size; + next = this_sector + offs; + if (i >= 2) { + if (offs + size > this_size) + continue; + if (next < first_sector) + continue; + if (next + size > first_sector + first_size) + continue; + } + + put_partition(state, state->next, next, size); + set_info(state, state->next, disksig); + if (SYS_IND(p) == LINUX_RAID_PARTITION) + state->parts[state->next].flags = ADDPART_FLAG_RAID; + loopct = 0; + if (++state->next == state->limit) + goto done; + } + /* + * Next, process the (first) extended partition, if present. + * (So far, there seems to be no reason to make + * parse_extended() recursive and allow a tree + * of extended partitions.) + * It should be a link to the next logical partition. + */ + p -= 4; + for (i = 0; i < 4; i++, p++) + if (nr_sects(p) && is_extended_partition(p)) + break; + if (i == 4) + goto done; /* nothing left to do */ + + this_sector = first_sector + start_sect(p) * sector_size; + this_size = nr_sects(p) * sector_size; + put_dev_sector(sect); + } +done: + put_dev_sector(sect); +} + +/* james@bpgc.com: Solaris has a nasty indicator: 0x82 which also + indicates linux swap. Be careful before believing this is Solaris. */ + +static void parse_solaris_x86(struct parsed_partitions *state, + sector_t offset, sector_t size, int origin) +{ +#ifdef CONFIG_SOLARIS_X86_PARTITION + Sector sect; + struct solaris_x86_vtoc *v; + int i; + short max_nparts; + + v = read_part_sector(state, offset + 1, §); + if (!v) + return; + if (le32_to_cpu(v->v_sanity) != SOLARIS_X86_VTOC_SANE) { + put_dev_sector(sect); + return; + } + { + char tmp[1 + BDEVNAME_SIZE + 10 + 11 + 1]; + + snprintf(tmp, sizeof(tmp), " %s%d: name, origin); + strlcat(state->pp_buf, tmp, PAGE_SIZE); + } + if (le32_to_cpu(v->v_version) != 1) { + char tmp[64]; + + snprintf(tmp, sizeof(tmp), " cannot handle version %d vtoc>\n", + le32_to_cpu(v->v_version)); + strlcat(state->pp_buf, tmp, PAGE_SIZE); + put_dev_sector(sect); + return; + } + /* Ensure we can handle previous case of VTOC with 8 entries gracefully */ + max_nparts = le16_to_cpu(v->v_nparts) > 8 ? SOLARIS_X86_NUMSLICE : 8; + for (i = 0; i < max_nparts && state->next < state->limit; i++) { + struct solaris_x86_slice *s = &v->v_slice[i]; + char tmp[3 + 10 + 1 + 1]; + + if (s->s_size == 0) + continue; + snprintf(tmp, sizeof(tmp), " [s%d]", i); + strlcat(state->pp_buf, tmp, PAGE_SIZE); + /* solaris partitions are relative to current MS-DOS + * one; must add the offset of the current partition */ + put_partition(state, state->next++, + le32_to_cpu(s->s_start)+offset, + le32_to_cpu(s->s_size)); + } + put_dev_sector(sect); + strlcat(state->pp_buf, " >\n", PAGE_SIZE); +#endif +} + +#if defined(CONFIG_BSD_DISKLABEL) +/* + * Create devices for BSD partitions listed in a disklabel, under a + * dos-like partition. See parse_extended() for more information. + */ +static void parse_bsd(struct parsed_partitions *state, + sector_t offset, sector_t size, int origin, char *flavour, + int max_partitions) +{ + Sector sect; + struct bsd_disklabel *l; + struct bsd_partition *p; + char tmp[64]; + + l = read_part_sector(state, offset + 1, §); + if (!l) + return; + if (le32_to_cpu(l->d_magic) != BSD_DISKMAGIC) { + put_dev_sector(sect); + return; + } + + snprintf(tmp, sizeof(tmp), " %s%d: <%s:", state->name, origin, flavour); + strlcat(state->pp_buf, tmp, PAGE_SIZE); + + if (le16_to_cpu(l->d_npartitions) < max_partitions) + max_partitions = le16_to_cpu(l->d_npartitions); + for (p = l->d_partitions; p - l->d_partitions < max_partitions; p++) { + sector_t bsd_start, bsd_size; + + if (state->next == state->limit) + break; + if (p->p_fstype == BSD_FS_UNUSED) + continue; + bsd_start = le32_to_cpu(p->p_offset); + bsd_size = le32_to_cpu(p->p_size); + if (offset == bsd_start && size == bsd_size) + /* full parent partition, we have it already */ + continue; + if (offset > bsd_start || offset+size < bsd_start+bsd_size) { + strlcat(state->pp_buf, "bad subpartition - ignored\n", PAGE_SIZE); + continue; + } + put_partition(state, state->next++, bsd_start, bsd_size); + } + put_dev_sector(sect); + if (le16_to_cpu(l->d_npartitions) > max_partitions) { + snprintf(tmp, sizeof(tmp), " (ignored %d more)", + le16_to_cpu(l->d_npartitions) - max_partitions); + strlcat(state->pp_buf, tmp, PAGE_SIZE); + } + strlcat(state->pp_buf, " >\n", PAGE_SIZE); +} +#endif + +static void parse_freebsd(struct parsed_partitions *state, + sector_t offset, sector_t size, int origin) +{ +#ifdef CONFIG_BSD_DISKLABEL + parse_bsd(state, offset, size, origin, "bsd", BSD_MAXPARTITIONS); +#endif +} + +static void parse_netbsd(struct parsed_partitions *state, + sector_t offset, sector_t size, int origin) +{ +#ifdef CONFIG_BSD_DISKLABEL + parse_bsd(state, offset, size, origin, "netbsd", BSD_MAXPARTITIONS); +#endif +} + +static void parse_openbsd(struct parsed_partitions *state, + sector_t offset, sector_t size, int origin) +{ +#ifdef CONFIG_BSD_DISKLABEL + parse_bsd(state, offset, size, origin, "openbsd", + OPENBSD_MAXPARTITIONS); +#endif +} + +/* + * Create devices for Unixware partitions listed in a disklabel, under a + * dos-like partition. See parse_extended() for more information. + */ +static void parse_unixware(struct parsed_partitions *state, + sector_t offset, sector_t size, int origin) +{ +#ifdef CONFIG_UNIXWARE_DISKLABEL + Sector sect; + struct unixware_disklabel *l; + struct unixware_slice *p; + + l = read_part_sector(state, offset + 29, §); + if (!l) + return; + if (le32_to_cpu(l->d_magic) != UNIXWARE_DISKMAGIC || + le32_to_cpu(l->vtoc.v_magic) != UNIXWARE_DISKMAGIC2) { + put_dev_sector(sect); + return; + } + { + char tmp[1 + BDEVNAME_SIZE + 10 + 12 + 1]; + + snprintf(tmp, sizeof(tmp), " %s%d: name, origin); + strlcat(state->pp_buf, tmp, PAGE_SIZE); + } + p = &l->vtoc.v_slice[1]; + /* I omit the 0th slice as it is the same as whole disk. */ + while (p - &l->vtoc.v_slice[0] < UNIXWARE_NUMSLICE) { + if (state->next == state->limit) + break; + + if (p->s_label != UNIXWARE_FS_UNUSED) + put_partition(state, state->next++, + le32_to_cpu(p->start_sect), + le32_to_cpu(p->nr_sects)); + p++; + } + put_dev_sector(sect); + strlcat(state->pp_buf, " >\n", PAGE_SIZE); +#endif +} + +/* + * Minix 2.0.0/2.0.2 subpartition support. + * Anand Krishnamurthy + * Rajeev V. Pillai + */ +static void parse_minix(struct parsed_partitions *state, + sector_t offset, sector_t size, int origin) +{ +#ifdef CONFIG_MINIX_SUBPARTITION + Sector sect; + unsigned char *data; + struct partition *p; + int i; + + data = read_part_sector(state, offset, §); + if (!data) + return; + + p = (struct partition *)(data + 0x1be); + + /* The first sector of a Minix partition can have either + * a secondary MBR describing its subpartitions, or + * the normal boot sector. */ + if (msdos_magic_present(data + 510) && + SYS_IND(p) == MINIX_PARTITION) { /* subpartition table present */ + char tmp[1 + BDEVNAME_SIZE + 10 + 9 + 1]; + + snprintf(tmp, sizeof(tmp), " %s%d: name, origin); + strlcat(state->pp_buf, tmp, PAGE_SIZE); + for (i = 0; i < MINIX_NR_SUBPARTITIONS; i++, p++) { + if (state->next == state->limit) + break; + /* add each partition in use */ + if (SYS_IND(p) == MINIX_PARTITION) + put_partition(state, state->next++, + start_sect(p), nr_sects(p)); + } + strlcat(state->pp_buf, " >\n", PAGE_SIZE); + } + put_dev_sector(sect); +#endif /* CONFIG_MINIX_SUBPARTITION */ +} + +static struct { + unsigned char id; + void (*parse)(struct parsed_partitions *, sector_t, sector_t, int); +} subtypes[] = { + {FREEBSD_PARTITION, parse_freebsd}, + {NETBSD_PARTITION, parse_netbsd}, + {OPENBSD_PARTITION, parse_openbsd}, + {MINIX_PARTITION, parse_minix}, + {UNIXWARE_PARTITION, parse_unixware}, + {SOLARIS_X86_PARTITION, parse_solaris_x86}, + {NEW_SOLARIS_X86_PARTITION, parse_solaris_x86}, + {0, NULL}, +}; + +int msdos_partition(struct parsed_partitions *state) +{ + sector_t sector_size = bdev_logical_block_size(state->bdev) / 512; + Sector sect; + unsigned char *data; + struct partition *p; + struct fat_boot_sector *fb; + int slot; + u32 disksig; + + data = read_part_sector(state, 0, §); + if (!data) + return -1; + + /* + * Note order! (some AIX disks, e.g. unbootable kind, + * have no MSDOS 55aa) + */ + if (aix_magic_present(state, data)) { + put_dev_sector(sect); +#ifdef CONFIG_AIX_PARTITION + return aix_partition(state); +#else + strlcat(state->pp_buf, " [AIX]", PAGE_SIZE); + return 0; +#endif + } + + if (!msdos_magic_present(data + 510)) { + put_dev_sector(sect); + return 0; + } + + /* + * Now that the 55aa signature is present, this is probably + * either the boot sector of a FAT filesystem or a DOS-type + * partition table. Reject this in case the boot indicator + * is not 0 or 0x80. + */ + p = (struct partition *) (data + 0x1be); + for (slot = 1; slot <= 4; slot++, p++) { + if (p->boot_ind != 0 && p->boot_ind != 0x80) { + /* + * Even without a valid boot inidicator value + * its still possible this is valid FAT filesystem + * without a partition table. + */ + fb = (struct fat_boot_sector *) data; + if (slot == 1 && fb->reserved && fb->fats + && fat_valid_media(fb->media)) { + strlcat(state->pp_buf, "\n", PAGE_SIZE); + put_dev_sector(sect); + return 1; + } else { + put_dev_sector(sect); + return 0; + } + } + } + +#ifdef CONFIG_EFI_PARTITION + p = (struct partition *) (data + 0x1be); + for (slot = 1 ; slot <= 4 ; slot++, p++) { + /* If this is an EFI GPT disk, msdos should ignore it. */ + if (SYS_IND(p) == EFI_PMBR_OSTYPE_EFI_GPT) { + put_dev_sector(sect); + return 0; + } + } +#endif + p = (struct partition *) (data + 0x1be); + + disksig = le32_to_cpup((__le32 *)(data + 0x1b8)); + + /* + * Look for partitions in two passes: + * First find the primary and DOS-type extended partitions. + * On the second pass look inside *BSD, Unixware and Solaris partitions. + */ + + state->next = 5; + for (slot = 1 ; slot <= 4 ; slot++, p++) { + sector_t start = start_sect(p)*sector_size; + sector_t size = nr_sects(p)*sector_size; + + if (!size) + continue; + if (is_extended_partition(p)) { + /* + * prevent someone doing mkfs or mkswap on an + * extended partition, but leave room for LILO + * FIXME: this uses one logical sector for > 512b + * sector, although it may not be enough/proper. + */ + sector_t n = 2; + + n = min(size, max(sector_size, n)); + put_partition(state, slot, start, n); + + strlcat(state->pp_buf, " <", PAGE_SIZE); + parse_extended(state, start, size, disksig); + strlcat(state->pp_buf, " >", PAGE_SIZE); + continue; + } + put_partition(state, slot, start, size); + set_info(state, slot, disksig); + if (SYS_IND(p) == LINUX_RAID_PARTITION) + state->parts[slot].flags = ADDPART_FLAG_RAID; + if (SYS_IND(p) == DM6_PARTITION) + strlcat(state->pp_buf, "[DM]", PAGE_SIZE); + if (SYS_IND(p) == EZD_PARTITION) + strlcat(state->pp_buf, "[EZD]", PAGE_SIZE); + } + + strlcat(state->pp_buf, "\n", PAGE_SIZE); + + /* second pass - output for each on a separate line */ + p = (struct partition *) (0x1be + data); + for (slot = 1 ; slot <= 4 ; slot++, p++) { + unsigned char id = SYS_IND(p); + int n; + + if (!nr_sects(p)) + continue; + + for (n = 0; subtypes[n].parse && id != subtypes[n].id; n++) + ; + + if (!subtypes[n].parse) + continue; + subtypes[n].parse(state, start_sect(p) * sector_size, + nr_sects(p) * sector_size, slot); + } + put_dev_sector(sect); + return 1; +} diff --git a/block/partitions/msdos.h b/block/partitions/msdos.h new file mode 100644 index 000000000..38c781c49 --- /dev/null +++ b/block/partitions/msdos.h @@ -0,0 +1,8 @@ +/* + * fs/partitions/msdos.h + */ + +#define MSDOS_LABEL_MAGIC 0xAA55 + +int msdos_partition(struct parsed_partitions *state); + diff --git a/block/partitions/osf.c b/block/partitions/osf.c new file mode 100644 index 000000000..764b86a01 --- /dev/null +++ b/block/partitions/osf.c @@ -0,0 +1,86 @@ +/* + * fs/partitions/osf.c + * + * Code extracted from drivers/block/genhd.c + * + * Copyright (C) 1991-1998 Linus Torvalds + * Re-organised Feb 1998 Russell King + */ + +#include "check.h" +#include "osf.h" + +#define MAX_OSF_PARTITIONS 18 + +int osf_partition(struct parsed_partitions *state) +{ + int i; + int slot = 1; + unsigned int npartitions; + Sector sect; + unsigned char *data; + struct disklabel { + __le32 d_magic; + __le16 d_type,d_subtype; + u8 d_typename[16]; + u8 d_packname[16]; + __le32 d_secsize; + __le32 d_nsectors; + __le32 d_ntracks; + __le32 d_ncylinders; + __le32 d_secpercyl; + __le32 d_secprtunit; + __le16 d_sparespertrack; + __le16 d_sparespercyl; + __le32 d_acylinders; + __le16 d_rpm, d_interleave, d_trackskew, d_cylskew; + __le32 d_headswitch, d_trkseek, d_flags; + __le32 d_drivedata[5]; + __le32 d_spare[5]; + __le32 d_magic2; + __le16 d_checksum; + __le16 d_npartitions; + __le32 d_bbsize, d_sbsize; + struct d_partition { + __le32 p_size; + __le32 p_offset; + __le32 p_fsize; + u8 p_fstype; + u8 p_frag; + __le16 p_cpg; + } d_partitions[MAX_OSF_PARTITIONS]; + } * label; + struct d_partition * partition; + + data = read_part_sector(state, 0, §); + if (!data) + return -1; + + label = (struct disklabel *) (data+64); + partition = label->d_partitions; + if (le32_to_cpu(label->d_magic) != DISKLABELMAGIC) { + put_dev_sector(sect); + return 0; + } + if (le32_to_cpu(label->d_magic2) != DISKLABELMAGIC) { + put_dev_sector(sect); + return 0; + } + npartitions = le16_to_cpu(label->d_npartitions); + if (npartitions > MAX_OSF_PARTITIONS) { + put_dev_sector(sect); + return 0; + } + for (i = 0 ; i < npartitions; i++, partition++) { + if (slot == state->limit) + break; + if (le32_to_cpu(partition->p_size)) + put_partition(state, slot, + le32_to_cpu(partition->p_offset), + le32_to_cpu(partition->p_size)); + slot++; + } + strlcat(state->pp_buf, "\n", PAGE_SIZE); + put_dev_sector(sect); + return 1; +} diff --git a/block/partitions/osf.h b/block/partitions/osf.h new file mode 100644 index 000000000..20ed2315e --- /dev/null +++ b/block/partitions/osf.h @@ -0,0 +1,7 @@ +/* + * fs/partitions/osf.h + */ + +#define DISKLABELMAGIC (0x82564557UL) + +int osf_partition(struct parsed_partitions *state); diff --git a/block/partitions/sgi.c b/block/partitions/sgi.c new file mode 100644 index 000000000..ea8a86dce --- /dev/null +++ b/block/partitions/sgi.c @@ -0,0 +1,82 @@ +/* + * fs/partitions/sgi.c + * + * Code extracted from drivers/block/genhd.c + */ + +#include "check.h" +#include "sgi.h" + +struct sgi_disklabel { + __be32 magic_mushroom; /* Big fat spliff... */ + __be16 root_part_num; /* Root partition number */ + __be16 swap_part_num; /* Swap partition number */ + s8 boot_file[16]; /* Name of boot file for ARCS */ + u8 _unused0[48]; /* Device parameter useless crapola.. */ + struct sgi_volume { + s8 name[8]; /* Name of volume */ + __be32 block_num; /* Logical block number */ + __be32 num_bytes; /* How big, in bytes */ + } volume[15]; + struct sgi_partition { + __be32 num_blocks; /* Size in logical blocks */ + __be32 first_block; /* First logical block */ + __be32 type; /* Type of this partition */ + } partitions[16]; + __be32 csum; /* Disk label checksum */ + __be32 _unused1; /* Padding */ +}; + +int sgi_partition(struct parsed_partitions *state) +{ + int i, csum; + __be32 magic; + int slot = 1; + unsigned int start, blocks; + __be32 *ui, cs; + Sector sect; + struct sgi_disklabel *label; + struct sgi_partition *p; + char b[BDEVNAME_SIZE]; + + label = read_part_sector(state, 0, §); + if (!label) + return -1; + p = &label->partitions[0]; + magic = label->magic_mushroom; + if(be32_to_cpu(magic) != SGI_LABEL_MAGIC) { + /*printk("Dev %s SGI disklabel: bad magic %08x\n", + bdevname(bdev, b), be32_to_cpu(magic));*/ + put_dev_sector(sect); + return 0; + } + ui = ((__be32 *) (label + 1)) - 1; + for(csum = 0; ui >= ((__be32 *) label);) { + cs = *ui--; + csum += be32_to_cpu(cs); + } + if(csum) { + printk(KERN_WARNING "Dev %s SGI disklabel: csum bad, label corrupted\n", + bdevname(state->bdev, b)); + put_dev_sector(sect); + return 0; + } + /* All SGI disk labels have 16 partitions, disks under Linux only + * have 15 minor's. Luckily there are always a few zero length + * partitions which we don't care about so we never overflow the + * current_minor. + */ + for(i = 0; i < 16; i++, p++) { + blocks = be32_to_cpu(p->num_blocks); + start = be32_to_cpu(p->first_block); + if (blocks) { + put_partition(state, slot, start, blocks); + if (be32_to_cpu(p->type) == LINUX_RAID_PARTITION) + state->parts[slot].flags = ADDPART_FLAG_RAID; + } + slot++; + } + strlcat(state->pp_buf, "\n", PAGE_SIZE); + put_dev_sector(sect); + return 1; +} diff --git a/block/partitions/sgi.h b/block/partitions/sgi.h new file mode 100644 index 000000000..b9553ebdd --- /dev/null +++ b/block/partitions/sgi.h @@ -0,0 +1,8 @@ +/* + * fs/partitions/sgi.h + */ + +extern int sgi_partition(struct parsed_partitions *state); + +#define SGI_LABEL_MAGIC 0x0be5a941 + diff --git a/block/partitions/sun.c b/block/partitions/sun.c new file mode 100644 index 000000000..b5b6fcfb3 --- /dev/null +++ b/block/partitions/sun.c @@ -0,0 +1,122 @@ +/* + * fs/partitions/sun.c + * + * Code extracted from drivers/block/genhd.c + * + * Copyright (C) 1991-1998 Linus Torvalds + * Re-organised Feb 1998 Russell King + */ + +#include "check.h" +#include "sun.h" + +int sun_partition(struct parsed_partitions *state) +{ + int i; + __be16 csum; + int slot = 1; + __be16 *ush; + Sector sect; + struct sun_disklabel { + unsigned char info[128]; /* Informative text string */ + struct sun_vtoc { + __be32 version; /* Layout version */ + char volume[8]; /* Volume name */ + __be16 nparts; /* Number of partitions */ + struct sun_info { /* Partition hdrs, sec 2 */ + __be16 id; + __be16 flags; + } infos[8]; + __be16 padding; /* Alignment padding */ + __be32 bootinfo[3]; /* Info needed by mboot */ + __be32 sanity; /* To verify vtoc sanity */ + __be32 reserved[10]; /* Free space */ + __be32 timestamp[8]; /* Partition timestamp */ + } vtoc; + __be32 write_reinstruct; /* sectors to skip, writes */ + __be32 read_reinstruct; /* sectors to skip, reads */ + unsigned char spare[148]; /* Padding */ + __be16 rspeed; /* Disk rotational speed */ + __be16 pcylcount; /* Physical cylinder count */ + __be16 sparecyl; /* extra sects per cylinder */ + __be16 obs1; /* gap1 */ + __be16 obs2; /* gap2 */ + __be16 ilfact; /* Interleave factor */ + __be16 ncyl; /* Data cylinder count */ + __be16 nacyl; /* Alt. cylinder count */ + __be16 ntrks; /* Tracks per cylinder */ + __be16 nsect; /* Sectors per track */ + __be16 obs3; /* bhead - Label head offset */ + __be16 obs4; /* ppart - Physical Partition */ + struct sun_partition { + __be32 start_cylinder; + __be32 num_sectors; + } partitions[8]; + __be16 magic; /* Magic number */ + __be16 csum; /* Label xor'd checksum */ + } * label; + struct sun_partition *p; + unsigned long spc; + char b[BDEVNAME_SIZE]; + int use_vtoc; + int nparts; + + label = read_part_sector(state, 0, §); + if (!label) + return -1; + + p = label->partitions; + if (be16_to_cpu(label->magic) != SUN_LABEL_MAGIC) { +/* printk(KERN_INFO "Dev %s Sun disklabel: bad magic %04x\n", + bdevname(bdev, b), be16_to_cpu(label->magic)); */ + put_dev_sector(sect); + return 0; + } + /* Look at the checksum */ + ush = ((__be16 *) (label+1)) - 1; + for (csum = 0; ush >= ((__be16 *) label);) + csum ^= *ush--; + if (csum) { + printk("Dev %s Sun disklabel: Csum bad, label corrupted\n", + bdevname(state->bdev, b)); + put_dev_sector(sect); + return 0; + } + + /* Check to see if we can use the VTOC table */ + use_vtoc = ((be32_to_cpu(label->vtoc.sanity) == SUN_VTOC_SANITY) && + (be32_to_cpu(label->vtoc.version) == 1) && + (be16_to_cpu(label->vtoc.nparts) <= 8)); + + /* Use 8 partition entries if not specified in validated VTOC */ + nparts = (use_vtoc) ? be16_to_cpu(label->vtoc.nparts) : 8; + + /* + * So that old Linux-Sun partitions continue to work, + * alow the VTOC to be used under the additional condition ... + */ + use_vtoc = use_vtoc || !(label->vtoc.sanity || + label->vtoc.version || label->vtoc.nparts); + spc = be16_to_cpu(label->ntrks) * be16_to_cpu(label->nsect); + for (i = 0; i < nparts; i++, p++) { + unsigned long st_sector; + unsigned int num_sectors; + + st_sector = be32_to_cpu(p->start_cylinder) * spc; + num_sectors = be32_to_cpu(p->num_sectors); + if (num_sectors) { + put_partition(state, slot, st_sector, num_sectors); + state->parts[slot].flags = 0; + if (use_vtoc) { + if (be16_to_cpu(label->vtoc.infos[i].id) == LINUX_RAID_PARTITION) + state->parts[slot].flags |= ADDPART_FLAG_RAID; + else if (be16_to_cpu(label->vtoc.infos[i].id) == SUN_WHOLE_DISK) + state->parts[slot].flags |= ADDPART_FLAG_WHOLEDISK; + } + } + slot++; + } + strlcat(state->pp_buf, "\n", PAGE_SIZE); + put_dev_sector(sect); + return 1; +} diff --git a/block/partitions/sun.h b/block/partitions/sun.h new file mode 100644 index 000000000..2424baa83 --- /dev/null +++ b/block/partitions/sun.h @@ -0,0 +1,8 @@ +/* + * fs/partitions/sun.h + */ + +#define SUN_LABEL_MAGIC 0xDABE +#define SUN_VTOC_SANITY 0x600DDEEE + +int sun_partition(struct parsed_partitions *state); diff --git a/block/partitions/sysv68.c b/block/partitions/sysv68.c new file mode 100644 index 000000000..9627ccffc --- /dev/null +++ b/block/partitions/sysv68.c @@ -0,0 +1,95 @@ +/* + * fs/partitions/sysv68.c + * + * Copyright (C) 2007 Philippe De Muyter + */ + +#include "check.h" +#include "sysv68.h" + +/* + * Volume ID structure: on first 256-bytes sector of disk + */ + +struct volumeid { + u8 vid_unused[248]; + u8 vid_mac[8]; /* ASCII string "MOTOROLA" */ +}; + +/* + * config block: second 256-bytes sector on disk + */ + +struct dkconfig { + u8 ios_unused0[128]; + __be32 ios_slcblk; /* Slice table block number */ + __be16 ios_slccnt; /* Number of entries in slice table */ + u8 ios_unused1[122]; +}; + +/* + * combined volumeid and dkconfig block + */ + +struct dkblk0 { + struct volumeid dk_vid; + struct dkconfig dk_ios; +}; + +/* + * Slice Table Structure + */ + +struct slice { + __be32 nblocks; /* slice size (in blocks) */ + __be32 blkoff; /* block offset of slice */ +}; + + +int sysv68_partition(struct parsed_partitions *state) +{ + int i, slices; + int slot = 1; + Sector sect; + unsigned char *data; + struct dkblk0 *b; + struct slice *slice; + char tmp[64]; + + data = read_part_sector(state, 0, §); + if (!data) + return -1; + + b = (struct dkblk0 *)data; + if (memcmp(b->dk_vid.vid_mac, "MOTOROLA", sizeof(b->dk_vid.vid_mac))) { + put_dev_sector(sect); + return 0; + } + slices = be16_to_cpu(b->dk_ios.ios_slccnt); + i = be32_to_cpu(b->dk_ios.ios_slcblk); + put_dev_sector(sect); + + data = read_part_sector(state, i, §); + if (!data) + return -1; + + slices -= 1; /* last slice is the whole disk */ + snprintf(tmp, sizeof(tmp), "sysV68: %s(s%u)", state->name, slices); + strlcat(state->pp_buf, tmp, PAGE_SIZE); + slice = (struct slice *)data; + for (i = 0; i < slices; i++, slice++) { + if (slot == state->limit) + break; + if (be32_to_cpu(slice->nblocks)) { + put_partition(state, slot, + be32_to_cpu(slice->blkoff), + be32_to_cpu(slice->nblocks)); + snprintf(tmp, sizeof(tmp), "(s%u)", i); + strlcat(state->pp_buf, tmp, PAGE_SIZE); + } + slot++; + } + strlcat(state->pp_buf, "\n", PAGE_SIZE); + put_dev_sector(sect); + return 1; +} diff --git a/block/partitions/sysv68.h b/block/partitions/sysv68.h new file mode 100644 index 000000000..bf2f5ffa9 --- /dev/null +++ b/block/partitions/sysv68.h @@ -0,0 +1 @@ +extern int sysv68_partition(struct parsed_partitions *state); diff --git a/block/partitions/ultrix.c b/block/partitions/ultrix.c new file mode 100644 index 000000000..8dbaf9f77 --- /dev/null +++ b/block/partitions/ultrix.c @@ -0,0 +1,48 @@ +/* + * fs/partitions/ultrix.c + * + * Code extracted from drivers/block/genhd.c + * + * Re-organised Jul 1999 Russell King + */ + +#include "check.h" +#include "ultrix.h" + +int ultrix_partition(struct parsed_partitions *state) +{ + int i; + Sector sect; + unsigned char *data; + struct ultrix_disklabel { + s32 pt_magic; /* magic no. indicating part. info exits */ + s32 pt_valid; /* set by driver if pt is current */ + struct pt_info { + s32 pi_nblocks; /* no. of sectors */ + u32 pi_blkoff; /* block offset for start */ + } pt_part[8]; + } *label; + +#define PT_MAGIC 0x032957 /* Partition magic number */ +#define PT_VALID 1 /* Indicates if struct is valid */ + + data = read_part_sector(state, (16384 - sizeof(*label))/512, §); + if (!data) + return -1; + + label = (struct ultrix_disklabel *)(data + 512 - sizeof(*label)); + + if (label->pt_magic == PT_MAGIC && label->pt_valid == PT_VALID) { + for (i=0; i<8; i++) + if (label->pt_part[i].pi_nblocks) + put_partition(state, i+1, + label->pt_part[i].pi_blkoff, + label->pt_part[i].pi_nblocks); + put_dev_sector(sect); + strlcat(state->pp_buf, "\n", PAGE_SIZE); + return 1; + } else { + put_dev_sector(sect); + return 0; + } +} diff --git a/block/partitions/ultrix.h b/block/partitions/ultrix.h new file mode 100644 index 000000000..a3cc00b2b --- /dev/null +++ b/block/partitions/ultrix.h @@ -0,0 +1,5 @@ +/* + * fs/partitions/ultrix.h + */ + +int ultrix_partition(struct parsed_partitions *state); diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c new file mode 100644 index 000000000..55b6f15da --- /dev/null +++ b/block/scsi_ioctl.c @@ -0,0 +1,748 @@ +/* + * Copyright (C) 2001 Jens Axboe + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public Licens + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +struct blk_cmd_filter { + unsigned long read_ok[BLK_SCSI_CMD_PER_LONG]; + unsigned long write_ok[BLK_SCSI_CMD_PER_LONG]; +}; + +static struct blk_cmd_filter blk_default_cmd_filter; + +/* Command group 3 is reserved and should never be used. */ +const unsigned char scsi_command_size_tbl[8] = +{ + 6, 10, 10, 12, + 16, 12, 10, 10 +}; +EXPORT_SYMBOL(scsi_command_size_tbl); + +#include + +static int sg_get_version(int __user *p) +{ + static const int sg_version_num = 30527; + return put_user(sg_version_num, p); +} + +static int scsi_get_idlun(struct request_queue *q, int __user *p) +{ + return put_user(0, p); +} + +static int scsi_get_bus(struct request_queue *q, int __user *p) +{ + return put_user(0, p); +} + +static int sg_get_timeout(struct request_queue *q) +{ + return jiffies_to_clock_t(q->sg_timeout); +} + +static int sg_set_timeout(struct request_queue *q, int __user *p) +{ + int timeout, err = get_user(timeout, p); + + if (!err) + q->sg_timeout = clock_t_to_jiffies(timeout); + + return err; +} + +static int max_sectors_bytes(struct request_queue *q) +{ + unsigned int max_sectors = queue_max_sectors(q); + + max_sectors = min_t(unsigned int, max_sectors, INT_MAX >> 9); + + return max_sectors << 9; +} + +static int sg_get_reserved_size(struct request_queue *q, int __user *p) +{ + int val = min_t(int, q->sg_reserved_size, max_sectors_bytes(q)); + + return put_user(val, p); +} + +static int sg_set_reserved_size(struct request_queue *q, int __user *p) +{ + int size, err = get_user(size, p); + + if (err) + return err; + + if (size < 0) + return -EINVAL; + + q->sg_reserved_size = min(size, max_sectors_bytes(q)); + return 0; +} + +/* + * will always return that we are ATAPI even for a real SCSI drive, I'm not + * so sure this is worth doing anything about (why would you care??) + */ +static int sg_emulated_host(struct request_queue *q, int __user *p) +{ + return put_user(1, p); +} + +static void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter) +{ + /* Basic read-only commands */ + __set_bit(TEST_UNIT_READY, filter->read_ok); + __set_bit(REQUEST_SENSE, filter->read_ok); + __set_bit(READ_6, filter->read_ok); + __set_bit(READ_10, filter->read_ok); + __set_bit(READ_12, filter->read_ok); + __set_bit(READ_16, filter->read_ok); + __set_bit(READ_BUFFER, filter->read_ok); + __set_bit(READ_DEFECT_DATA, filter->read_ok); + __set_bit(READ_CAPACITY, filter->read_ok); + __set_bit(READ_LONG, filter->read_ok); + __set_bit(INQUIRY, filter->read_ok); + __set_bit(MODE_SENSE, filter->read_ok); + __set_bit(MODE_SENSE_10, filter->read_ok); + __set_bit(LOG_SENSE, filter->read_ok); + __set_bit(START_STOP, filter->read_ok); + __set_bit(GPCMD_VERIFY_10, filter->read_ok); + __set_bit(VERIFY_16, filter->read_ok); + __set_bit(REPORT_LUNS, filter->read_ok); + __set_bit(SERVICE_ACTION_IN_16, filter->read_ok); + __set_bit(RECEIVE_DIAGNOSTIC, filter->read_ok); + __set_bit(MAINTENANCE_IN, filter->read_ok); + __set_bit(GPCMD_READ_BUFFER_CAPACITY, filter->read_ok); + + /* Audio CD commands */ + __set_bit(GPCMD_PLAY_CD, filter->read_ok); + __set_bit(GPCMD_PLAY_AUDIO_10, filter->read_ok); + __set_bit(GPCMD_PLAY_AUDIO_MSF, filter->read_ok); + __set_bit(GPCMD_PLAY_AUDIO_TI, filter->read_ok); + __set_bit(GPCMD_PAUSE_RESUME, filter->read_ok); + + /* CD/DVD data reading */ + __set_bit(GPCMD_READ_CD, filter->read_ok); + __set_bit(GPCMD_READ_CD_MSF, filter->read_ok); + __set_bit(GPCMD_READ_DISC_INFO, filter->read_ok); + __set_bit(GPCMD_READ_CDVD_CAPACITY, filter->read_ok); + __set_bit(GPCMD_READ_DVD_STRUCTURE, filter->read_ok); + __set_bit(GPCMD_READ_HEADER, filter->read_ok); + __set_bit(GPCMD_READ_TRACK_RZONE_INFO, filter->read_ok); + __set_bit(GPCMD_READ_SUBCHANNEL, filter->read_ok); + __set_bit(GPCMD_READ_TOC_PMA_ATIP, filter->read_ok); + __set_bit(GPCMD_REPORT_KEY, filter->read_ok); + __set_bit(GPCMD_SCAN, filter->read_ok); + __set_bit(GPCMD_GET_CONFIGURATION, filter->read_ok); + __set_bit(GPCMD_READ_FORMAT_CAPACITIES, filter->read_ok); + __set_bit(GPCMD_GET_EVENT_STATUS_NOTIFICATION, filter->read_ok); + __set_bit(GPCMD_GET_PERFORMANCE, filter->read_ok); + __set_bit(GPCMD_SEEK, filter->read_ok); + __set_bit(GPCMD_STOP_PLAY_SCAN, filter->read_ok); + + /* Basic writing commands */ + __set_bit(WRITE_6, filter->write_ok); + __set_bit(WRITE_10, filter->write_ok); + __set_bit(WRITE_VERIFY, filter->write_ok); + __set_bit(WRITE_12, filter->write_ok); + __set_bit(WRITE_VERIFY_12, filter->write_ok); + __set_bit(WRITE_16, filter->write_ok); + __set_bit(WRITE_LONG, filter->write_ok); + __set_bit(WRITE_LONG_2, filter->write_ok); + __set_bit(ERASE, filter->write_ok); + __set_bit(GPCMD_MODE_SELECT_10, filter->write_ok); + __set_bit(MODE_SELECT, filter->write_ok); + __set_bit(LOG_SELECT, filter->write_ok); + __set_bit(GPCMD_BLANK, filter->write_ok); + __set_bit(GPCMD_CLOSE_TRACK, filter->write_ok); + __set_bit(GPCMD_FLUSH_CACHE, filter->write_ok); + __set_bit(GPCMD_FORMAT_UNIT, filter->write_ok); + __set_bit(GPCMD_REPAIR_RZONE_TRACK, filter->write_ok); + __set_bit(GPCMD_RESERVE_RZONE_TRACK, filter->write_ok); + __set_bit(GPCMD_SEND_DVD_STRUCTURE, filter->write_ok); + __set_bit(GPCMD_SEND_EVENT, filter->write_ok); + __set_bit(GPCMD_SEND_KEY, filter->write_ok); + __set_bit(GPCMD_SEND_OPC, filter->write_ok); + __set_bit(GPCMD_SEND_CUE_SHEET, filter->write_ok); + __set_bit(GPCMD_SET_SPEED, filter->write_ok); + __set_bit(GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL, filter->write_ok); + __set_bit(GPCMD_LOAD_UNLOAD, filter->write_ok); + __set_bit(GPCMD_SET_STREAMING, filter->write_ok); + __set_bit(GPCMD_SET_READ_AHEAD, filter->write_ok); +} + +int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm) +{ + struct blk_cmd_filter *filter = &blk_default_cmd_filter; + + /* root can do any command. */ + if (capable(CAP_SYS_RAWIO)) + return 0; + + /* Anybody who can open the device can do a read-safe command */ + if (test_bit(cmd[0], filter->read_ok)) + return 0; + + /* Write-safe commands require a writable open */ + if (test_bit(cmd[0], filter->write_ok) && has_write_perm) + return 0; + + return -EPERM; +} +EXPORT_SYMBOL(blk_verify_command); + +static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq, + struct sg_io_hdr *hdr, fmode_t mode) +{ + if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len)) + return -EFAULT; + if (blk_verify_command(rq->cmd, mode & FMODE_WRITE)) + return -EPERM; + + /* + * fill in request structure + */ + rq->cmd_len = hdr->cmd_len; + + rq->timeout = msecs_to_jiffies(hdr->timeout); + if (!rq->timeout) + rq->timeout = q->sg_timeout; + if (!rq->timeout) + rq->timeout = BLK_DEFAULT_SG_TIMEOUT; + if (rq->timeout < BLK_MIN_SG_TIMEOUT) + rq->timeout = BLK_MIN_SG_TIMEOUT; + + return 0; +} + +static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr, + struct bio *bio) +{ + int r, ret = 0; + + /* + * fill in all the output members + */ + hdr->status = rq->errors & 0xff; + hdr->masked_status = status_byte(rq->errors); + hdr->msg_status = msg_byte(rq->errors); + hdr->host_status = host_byte(rq->errors); + hdr->driver_status = driver_byte(rq->errors); + hdr->info = 0; + if (hdr->masked_status || hdr->host_status || hdr->driver_status) + hdr->info |= SG_INFO_CHECK; + hdr->resid = rq->resid_len; + hdr->sb_len_wr = 0; + + if (rq->sense_len && hdr->sbp) { + int len = min((unsigned int) hdr->mx_sb_len, rq->sense_len); + + if (!copy_to_user(hdr->sbp, rq->sense, len)) + hdr->sb_len_wr = len; + else + ret = -EFAULT; + } + + r = blk_rq_unmap_user(bio); + if (!ret) + ret = r; + + return ret; +} + +static int sg_io(struct request_queue *q, struct gendisk *bd_disk, + struct sg_io_hdr *hdr, fmode_t mode) +{ + unsigned long start_time; + ssize_t ret = 0; + int writing = 0; + int at_head = 0; + struct request *rq; + char sense[SCSI_SENSE_BUFFERSIZE]; + struct bio *bio; + + if (hdr->interface_id != 'S') + return -EINVAL; + + if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9)) + return -EIO; + + if (hdr->dxfer_len) + switch (hdr->dxfer_direction) { + default: + return -EINVAL; + case SG_DXFER_TO_DEV: + writing = 1; + break; + case SG_DXFER_TO_FROM_DEV: + case SG_DXFER_FROM_DEV: + break; + } + if (hdr->flags & SG_FLAG_Q_AT_HEAD) + at_head = 1; + + ret = -ENOMEM; + rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL); + if (IS_ERR(rq)) + return PTR_ERR(rq); + blk_rq_set_block_pc(rq); + + if (hdr->cmd_len > BLK_MAX_CDB) { + rq->cmd = kzalloc(hdr->cmd_len, GFP_KERNEL); + if (!rq->cmd) + goto out_put_request; + } + + ret = -EFAULT; + if (blk_fill_sghdr_rq(q, rq, hdr, mode)) + goto out_free_cdb; + + ret = 0; + if (hdr->iovec_count) { + struct iov_iter i; + struct iovec *iov = NULL; + + ret = import_iovec(rq_data_dir(rq), + hdr->dxferp, hdr->iovec_count, + 0, &iov, &i); + if (ret < 0) + goto out_free_cdb; + + /* SG_IO howto says that the shorter of the two wins */ + iov_iter_truncate(&i, hdr->dxfer_len); + + ret = blk_rq_map_user_iov(q, rq, NULL, &i, GFP_KERNEL); + kfree(iov); + } else if (hdr->dxfer_len) + ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len, + GFP_KERNEL); + + if (ret) + goto out_free_cdb; + + bio = rq->bio; + memset(sense, 0, sizeof(sense)); + rq->sense = sense; + rq->sense_len = 0; + rq->retries = 0; + + start_time = jiffies; + + /* ignore return value. All information is passed back to caller + * (if he doesn't check that is his problem). + * N.B. a non-zero SCSI status is _not_ necessarily an error. + */ + blk_execute_rq(q, bd_disk, rq, at_head); + + hdr->duration = jiffies_to_msecs(jiffies - start_time); + + ret = blk_complete_sghdr_rq(rq, hdr, bio); + +out_free_cdb: + if (rq->cmd != rq->__cmd) + kfree(rq->cmd); +out_put_request: + blk_put_request(rq); + return ret; +} + +/** + * sg_scsi_ioctl -- handle deprecated SCSI_IOCTL_SEND_COMMAND ioctl + * @file: file this ioctl operates on (optional) + * @q: request queue to send scsi commands down + * @disk: gendisk to operate on (option) + * @sic: userspace structure describing the command to perform + * + * Send down the scsi command described by @sic to the device below + * the request queue @q. If @file is non-NULL it's used to perform + * fine-grained permission checks that allow users to send down + * non-destructive SCSI commands. If the caller has a struct gendisk + * available it should be passed in as @disk to allow the low level + * driver to use the information contained in it. A non-NULL @disk + * is only allowed if the caller knows that the low level driver doesn't + * need it (e.g. in the scsi subsystem). + * + * Notes: + * - This interface is deprecated - users should use the SG_IO + * interface instead, as this is a more flexible approach to + * performing SCSI commands on a device. + * - The SCSI command length is determined by examining the 1st byte + * of the given command. There is no way to override this. + * - Data transfers are limited to PAGE_SIZE + * - The length (x + y) must be at least OMAX_SB_LEN bytes long to + * accommodate the sense buffer when an error occurs. + * The sense buffer is truncated to OMAX_SB_LEN (16) bytes so that + * old code will not be surprised. + * - If a Unix error occurs (e.g. ENOMEM) then the user will receive + * a negative return and the Unix error code in 'errno'. + * If the SCSI command succeeds then 0 is returned. + * Positive numbers returned are the compacted SCSI error codes (4 + * bytes in one int) where the lowest byte is the SCSI status. + */ +#define OMAX_SB_LEN 16 /* For backward compatibility */ +int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode, + struct scsi_ioctl_command __user *sic) +{ + struct request *rq; + int err; + unsigned int in_len, out_len, bytes, opcode, cmdlen; + char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE]; + + if (!sic) + return -EINVAL; + + /* + * get in an out lengths, verify they don't exceed a page worth of data + */ + if (get_user(in_len, &sic->inlen)) + return -EFAULT; + if (get_user(out_len, &sic->outlen)) + return -EFAULT; + if (in_len > PAGE_SIZE || out_len > PAGE_SIZE) + return -EINVAL; + if (get_user(opcode, sic->data)) + return -EFAULT; + + bytes = max(in_len, out_len); + if (bytes) { + buffer = kzalloc(bytes, q->bounce_gfp | GFP_USER| __GFP_NOWARN); + if (!buffer) + return -ENOMEM; + + } + + rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto error_free_buffer; + } + blk_rq_set_block_pc(rq); + + cmdlen = COMMAND_SIZE(opcode); + + /* + * get command and data to send to device, if any + */ + err = -EFAULT; + rq->cmd_len = cmdlen; + if (copy_from_user(rq->cmd, sic->data, cmdlen)) + goto error; + + if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len)) + goto error; + + err = blk_verify_command(rq->cmd, mode & FMODE_WRITE); + if (err) + goto error; + + /* default. possible overriden later */ + rq->retries = 5; + + switch (opcode) { + case SEND_DIAGNOSTIC: + case FORMAT_UNIT: + rq->timeout = FORMAT_UNIT_TIMEOUT; + rq->retries = 1; + break; + case START_STOP: + rq->timeout = START_STOP_TIMEOUT; + break; + case MOVE_MEDIUM: + rq->timeout = MOVE_MEDIUM_TIMEOUT; + break; + case READ_ELEMENT_STATUS: + rq->timeout = READ_ELEMENT_STATUS_TIMEOUT; + break; + case READ_DEFECT_DATA: + rq->timeout = READ_DEFECT_DATA_TIMEOUT; + rq->retries = 1; + break; + default: + rq->timeout = BLK_DEFAULT_SG_TIMEOUT; + break; + } + + if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_WAIT)) { + err = DRIVER_ERROR << 24; + goto error; + } + + memset(sense, 0, sizeof(sense)); + rq->sense = sense; + rq->sense_len = 0; + + blk_execute_rq(q, disk, rq, 0); + + err = rq->errors & 0xff; /* only 8 bit SCSI status */ + if (err) { + if (rq->sense_len && rq->sense) { + bytes = (OMAX_SB_LEN > rq->sense_len) ? + rq->sense_len : OMAX_SB_LEN; + if (copy_to_user(sic->data, rq->sense, bytes)) + err = -EFAULT; + } + } else { + if (copy_to_user(sic->data, buffer, out_len)) + err = -EFAULT; + } + +error: + blk_put_request(rq); + +error_free_buffer: + kfree(buffer); + + return err; +} +EXPORT_SYMBOL_GPL(sg_scsi_ioctl); + +/* Send basic block requests */ +static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk, + int cmd, int data) +{ + struct request *rq; + int err; + + rq = blk_get_request(q, WRITE, __GFP_WAIT); + if (IS_ERR(rq)) + return PTR_ERR(rq); + blk_rq_set_block_pc(rq); + rq->timeout = BLK_DEFAULT_SG_TIMEOUT; + rq->cmd[0] = cmd; + rq->cmd[4] = data; + rq->cmd_len = 6; + err = blk_execute_rq(q, bd_disk, rq, 0); + blk_put_request(rq); + + return err; +} + +static inline int blk_send_start_stop(struct request_queue *q, + struct gendisk *bd_disk, int data) +{ + return __blk_send_generic(q, bd_disk, GPCMD_START_STOP_UNIT, data); +} + +int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mode, + unsigned int cmd, void __user *arg) +{ + int err; + + if (!q) + return -ENXIO; + + switch (cmd) { + /* + * new sgv3 interface + */ + case SG_GET_VERSION_NUM: + err = sg_get_version(arg); + break; + case SCSI_IOCTL_GET_IDLUN: + err = scsi_get_idlun(q, arg); + break; + case SCSI_IOCTL_GET_BUS_NUMBER: + err = scsi_get_bus(q, arg); + break; + case SG_SET_TIMEOUT: + err = sg_set_timeout(q, arg); + break; + case SG_GET_TIMEOUT: + err = sg_get_timeout(q); + break; + case SG_GET_RESERVED_SIZE: + err = sg_get_reserved_size(q, arg); + break; + case SG_SET_RESERVED_SIZE: + err = sg_set_reserved_size(q, arg); + break; + case SG_EMULATED_HOST: + err = sg_emulated_host(q, arg); + break; + case SG_IO: { + struct sg_io_hdr hdr; + + err = -EFAULT; + if (copy_from_user(&hdr, arg, sizeof(hdr))) + break; + err = sg_io(q, bd_disk, &hdr, mode); + if (err == -EFAULT) + break; + + if (copy_to_user(arg, &hdr, sizeof(hdr))) + err = -EFAULT; + break; + } + case CDROM_SEND_PACKET: { + struct cdrom_generic_command cgc; + struct sg_io_hdr hdr; + + err = -EFAULT; + if (copy_from_user(&cgc, arg, sizeof(cgc))) + break; + cgc.timeout = clock_t_to_jiffies(cgc.timeout); + memset(&hdr, 0, sizeof(hdr)); + hdr.interface_id = 'S'; + hdr.cmd_len = sizeof(cgc.cmd); + hdr.dxfer_len = cgc.buflen; + err = 0; + switch (cgc.data_direction) { + case CGC_DATA_UNKNOWN: + hdr.dxfer_direction = SG_DXFER_UNKNOWN; + break; + case CGC_DATA_WRITE: + hdr.dxfer_direction = SG_DXFER_TO_DEV; + break; + case CGC_DATA_READ: + hdr.dxfer_direction = SG_DXFER_FROM_DEV; + break; + case CGC_DATA_NONE: + hdr.dxfer_direction = SG_DXFER_NONE; + break; + default: + err = -EINVAL; + } + if (err) + break; + + hdr.dxferp = cgc.buffer; + hdr.sbp = cgc.sense; + if (hdr.sbp) + hdr.mx_sb_len = sizeof(struct request_sense); + hdr.timeout = jiffies_to_msecs(cgc.timeout); + hdr.cmdp = ((struct cdrom_generic_command __user*) arg)->cmd; + hdr.cmd_len = sizeof(cgc.cmd); + + err = sg_io(q, bd_disk, &hdr, mode); + if (err == -EFAULT) + break; + + if (hdr.status) + err = -EIO; + + cgc.stat = err; + cgc.buflen = hdr.resid; + if (copy_to_user(arg, &cgc, sizeof(cgc))) + err = -EFAULT; + + break; + } + + /* + * old junk scsi send command ioctl + */ + case SCSI_IOCTL_SEND_COMMAND: + printk(KERN_WARNING "program %s is using a deprecated SCSI ioctl, please convert it to SG_IO\n", current->comm); + err = -EINVAL; + if (!arg) + break; + + err = sg_scsi_ioctl(q, bd_disk, mode, arg); + break; + case CDROMCLOSETRAY: + err = blk_send_start_stop(q, bd_disk, 0x03); + break; + case CDROMEJECT: + err = blk_send_start_stop(q, bd_disk, 0x02); + break; + default: + err = -ENOTTY; + } + + return err; +} +EXPORT_SYMBOL(scsi_cmd_ioctl); + +int scsi_verify_blk_ioctl(struct block_device *bd, unsigned int cmd) +{ + if (bd && bd == bd->bd_contains) + return 0; + + /* Actually none of these is particularly useful on a partition, + * but they are safe. + */ + switch (cmd) { + case SCSI_IOCTL_GET_IDLUN: + case SCSI_IOCTL_GET_BUS_NUMBER: + case SCSI_IOCTL_GET_PCI: + case SCSI_IOCTL_PROBE_HOST: + case SG_GET_VERSION_NUM: + case SG_SET_TIMEOUT: + case SG_GET_TIMEOUT: + case SG_GET_RESERVED_SIZE: + case SG_SET_RESERVED_SIZE: + case SG_EMULATED_HOST: + return 0; + case CDROM_GET_CAPABILITY: + /* Keep this until we remove the printk below. udev sends it + * and we do not want to spam dmesg about it. CD-ROMs do + * not have partitions, so we get here only for disks. + */ + return -ENOIOCTLCMD; + default: + break; + } + + if (capable(CAP_SYS_RAWIO)) + return 0; + + /* In particular, rule out all resets and host-specific ioctls. */ + printk_ratelimited(KERN_WARNING + "%s: sending ioctl %x to a partition!\n", current->comm, cmd); + + return -ENOIOCTLCMD; +} +EXPORT_SYMBOL(scsi_verify_blk_ioctl); + +int scsi_cmd_blk_ioctl(struct block_device *bd, fmode_t mode, + unsigned int cmd, void __user *arg) +{ + int ret; + + ret = scsi_verify_blk_ioctl(bd, cmd); + if (ret < 0) + return ret; + + return scsi_cmd_ioctl(bd->bd_disk->queue, bd->bd_disk, mode, cmd, arg); +} +EXPORT_SYMBOL(scsi_cmd_blk_ioctl); + +static int __init blk_scsi_ioctl_init(void) +{ + blk_set_cmd_filter_defaults(&blk_default_cmd_filter); + return 0; +} +fs_initcall(blk_scsi_ioctl_init); diff --git a/block/t10-pi.c b/block/t10-pi.c new file mode 100644 index 000000000..24d6e9715 --- /dev/null +++ b/block/t10-pi.c @@ -0,0 +1,197 @@ +/* + * t10_pi.c - Functions for generating and verifying T10 Protection + * Information. + * + * Copyright (C) 2007, 2008, 2014 Oracle Corporation + * Written by: Martin K. Petersen + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, + * USA. + * + */ + +#include +#include +#include +#include + +typedef __be16 (csum_fn) (void *, unsigned int); + +static const __be16 APP_ESCAPE = (__force __be16) 0xffff; +static const __be32 REF_ESCAPE = (__force __be32) 0xffffffff; + +static __be16 t10_pi_crc_fn(void *data, unsigned int len) +{ + return cpu_to_be16(crc_t10dif(data, len)); +} + +static __be16 t10_pi_ip_fn(void *data, unsigned int len) +{ + return (__force __be16)ip_compute_csum(data, len); +} + +/* + * Type 1 and Type 2 protection use the same format: 16 bit guard tag, + * 16 bit app tag, 32 bit reference tag. Type 3 does not define the ref + * tag. + */ +static int t10_pi_generate(struct blk_integrity_iter *iter, csum_fn *fn, + unsigned int type) +{ + unsigned int i; + + for (i = 0 ; i < iter->data_size ; i += iter->interval) { + struct t10_pi_tuple *pi = iter->prot_buf; + + pi->guard_tag = fn(iter->data_buf, iter->interval); + pi->app_tag = 0; + + if (type == 1) + pi->ref_tag = cpu_to_be32(lower_32_bits(iter->seed)); + else + pi->ref_tag = 0; + + iter->data_buf += iter->interval; + iter->prot_buf += sizeof(struct t10_pi_tuple); + iter->seed++; + } + + return 0; +} + +static int t10_pi_verify(struct blk_integrity_iter *iter, csum_fn *fn, + unsigned int type) +{ + unsigned int i; + + for (i = 0 ; i < iter->data_size ; i += iter->interval) { + struct t10_pi_tuple *pi = iter->prot_buf; + __be16 csum; + + switch (type) { + case 1: + case 2: + if (pi->app_tag == APP_ESCAPE) + goto next; + + if (be32_to_cpu(pi->ref_tag) != + lower_32_bits(iter->seed)) { + pr_err("%s: ref tag error at location %llu " \ + "(rcvd %u)\n", iter->disk_name, + (unsigned long long) + iter->seed, be32_to_cpu(pi->ref_tag)); + return -EILSEQ; + } + break; + case 3: + if (pi->app_tag == APP_ESCAPE && + pi->ref_tag == REF_ESCAPE) + goto next; + break; + } + + csum = fn(iter->data_buf, iter->interval); + + if (pi->guard_tag != csum) { + pr_err("%s: guard tag error at sector %llu " \ + "(rcvd %04x, want %04x)\n", iter->disk_name, + (unsigned long long)iter->seed, + be16_to_cpu(pi->guard_tag), be16_to_cpu(csum)); + return -EILSEQ; + } + +next: + iter->data_buf += iter->interval; + iter->prot_buf += sizeof(struct t10_pi_tuple); + iter->seed++; + } + + return 0; +} + +static int t10_pi_type1_generate_crc(struct blk_integrity_iter *iter) +{ + return t10_pi_generate(iter, t10_pi_crc_fn, 1); +} + +static int t10_pi_type1_generate_ip(struct blk_integrity_iter *iter) +{ + return t10_pi_generate(iter, t10_pi_ip_fn, 1); +} + +static int t10_pi_type1_verify_crc(struct blk_integrity_iter *iter) +{ + return t10_pi_verify(iter, t10_pi_crc_fn, 1); +} + +static int t10_pi_type1_verify_ip(struct blk_integrity_iter *iter) +{ + return t10_pi_verify(iter, t10_pi_ip_fn, 1); +} + +static int t10_pi_type3_generate_crc(struct blk_integrity_iter *iter) +{ + return t10_pi_generate(iter, t10_pi_crc_fn, 3); +} + +static int t10_pi_type3_generate_ip(struct blk_integrity_iter *iter) +{ + return t10_pi_generate(iter, t10_pi_ip_fn, 3); +} + +static int t10_pi_type3_verify_crc(struct blk_integrity_iter *iter) +{ + return t10_pi_verify(iter, t10_pi_crc_fn, 3); +} + +static int t10_pi_type3_verify_ip(struct blk_integrity_iter *iter) +{ + return t10_pi_verify(iter, t10_pi_ip_fn, 3); +} + +struct blk_integrity t10_pi_type1_crc = { + .name = "T10-DIF-TYPE1-CRC", + .generate_fn = t10_pi_type1_generate_crc, + .verify_fn = t10_pi_type1_verify_crc, + .tuple_size = sizeof(struct t10_pi_tuple), + .tag_size = 0, +}; +EXPORT_SYMBOL(t10_pi_type1_crc); + +struct blk_integrity t10_pi_type1_ip = { + .name = "T10-DIF-TYPE1-IP", + .generate_fn = t10_pi_type1_generate_ip, + .verify_fn = t10_pi_type1_verify_ip, + .tuple_size = sizeof(struct t10_pi_tuple), + .tag_size = 0, +}; +EXPORT_SYMBOL(t10_pi_type1_ip); + +struct blk_integrity t10_pi_type3_crc = { + .name = "T10-DIF-TYPE3-CRC", + .generate_fn = t10_pi_type3_generate_crc, + .verify_fn = t10_pi_type3_verify_crc, + .tuple_size = sizeof(struct t10_pi_tuple), + .tag_size = 0, +}; +EXPORT_SYMBOL(t10_pi_type3_crc); + +struct blk_integrity t10_pi_type3_ip = { + .name = "T10-DIF-TYPE3-IP", + .generate_fn = t10_pi_type3_generate_ip, + .verify_fn = t10_pi_type3_verify_ip, + .tuple_size = sizeof(struct t10_pi_tuple), + .tag_size = 0, +}; +EXPORT_SYMBOL(t10_pi_type3_ip); diff --git a/block/uuid.c b/block/uuid.c new file mode 100644 index 000000000..722d53b63 --- /dev/null +++ b/block/uuid.c @@ -0,0 +1,509 @@ +#include +#include +#include +#include +#include + +static int debug_enabled; + +#define PRINTK(fmt, args...) do { \ + if (debug_enabled) \ + printk(KERN_DEBUG fmt, ## args); \ + } while(0) + +#define PRINT_HEX_DUMP(v1, v2, v3, v4, v5, v6, v7, v8) \ + do { \ + if (debug_enabled) \ + print_hex_dump(v1, v2, v3, v4, v5, v6, v7, v8); \ + } while(0) + +/* + * Simple UUID translation + */ + +struct uuid_info { + const char *key; + const char *name; + long bkoff; + unsigned sboff; + unsigned sig_len; + const char *magic; + int uuid_offset; + int last_mount_offset; + int last_mount_size; +}; + +/* + * Based on libuuid's blkid_magic array. Note that I don't + * have uuid offsets for all of these yet - mssing ones are 0x0. + * Further information welcome. + * + * Rearranged by page of fs signature for optimisation. + */ +static struct uuid_info uuid_list[] = { + { NULL, "oracleasm", 0, 32, 8, "ORCLDISK", 0x0, 0, 0 }, + { "ntfs", "ntfs", 0, 3, 8, "NTFS ", 0x0, 0, 0 }, + { "vfat", "vfat", 0, 0x52, 5, "MSWIN", 0x0, 0, 0 }, + { "vfat", "vfat", 0, 0x52, 8, "FAT32 ", 0x0, 0, 0 }, + { "vfat", "vfat", 0, 0x36, 5, "MSDOS", 0x0, 0, 0 }, + { "vfat", "vfat", 0, 0x36, 8, "FAT16 ", 0x0, 0, 0 }, + { "vfat", "vfat", 0, 0x36, 8, "FAT12 ", 0x0, 0, 0 }, + { "vfat", "vfat", 0, 0, 1, "\353", 0x0, 0, 0 }, + { "vfat", "vfat", 0, 0, 1, "\351", 0x0, 0, 0 }, + { "vfat", "vfat", 0, 0x1fe, 2, "\125\252", 0x0, 0, 0 }, + { "xfs", "xfs", 0, 0, 4, "XFSB", 0x20, 0, 0 }, + { "romfs", "romfs", 0, 0, 8, "-rom1fs-", 0x0, 0, 0 }, + { "bfs", "bfs", 0, 0, 4, "\316\372\173\033", 0, 0, 0 }, + { "cramfs", "cramfs", 0, 0, 4, "E=\315\050", 0x0, 0, 0 }, + { "qnx4", "qnx4", 0, 4, 6, "QNX4FS", 0, 0, 0 }, + { NULL, "crypt_LUKS", 0, 0, 6, "LUKS\xba\xbe", 0x0, 0, 0 }, + { "squashfs", "squashfs", 0, 0, 4, "sqsh", 0, 0, 0 }, + { "squashfs", "squashfs", 0, 0, 4, "hsqs", 0, 0, 0 }, + { "ocfs", "ocfs", 0, 8, 9, "OracleCFS", 0x0, 0, 0 }, + { "lvm2pv", "lvm2pv", 0, 0x018, 8, "LVM2 001", 0x0, 0, 0 }, + { "sysv", "sysv", 0, 0x3f8, 4, "\020~\030\375", 0, 0, 0 }, + { "ext", "ext", 1, 0x38, 2, "\123\357", 0x468, 0x42c, 4 }, + { "minix", "minix", 1, 0x10, 2, "\177\023", 0, 0, 0 }, + { "minix", "minix", 1, 0x10, 2, "\217\023", 0, 0, 0 }, + { "minix", "minix", 1, 0x10, 2, "\150\044", 0, 0, 0 }, + { "minix", "minix", 1, 0x10, 2, "\170\044", 0, 0, 0 }, + { "lvm2pv", "lvm2pv", 1, 0x018, 8, "LVM2 001", 0x0, 0, 0 }, + { "vxfs", "vxfs", 1, 0, 4, "\365\374\001\245", 0, 0, 0 }, + { "hfsplus", "hfsplus", 1, 0, 2, "BD", 0x0, 0, 0 }, + { "hfsplus", "hfsplus", 1, 0, 2, "H+", 0x0, 0, 0 }, + { "hfsplus", "hfsplus", 1, 0, 2, "HX", 0x0, 0, 0 }, + { "hfs", "hfs", 1, 0, 2, "BD", 0x0, 0, 0 }, + { "ocfs2", "ocfs2", 1, 0, 6, "OCFSV2", 0x0, 0, 0 }, + { "lvm2pv", "lvm2pv", 0, 0x218, 8, "LVM2 001", 0x0, 0, 0 }, + { "lvm2pv", "lvm2pv", 1, 0x218, 8, "LVM2 001", 0x0, 0, 0 }, + { "ocfs2", "ocfs2", 2, 0, 6, "OCFSV2", 0x0, 0, 0 }, + { "swap", "swap", 0, 0xff6, 10, "SWAP-SPACE", 0x40c, 0, 0 }, + { "swap", "swap", 0, 0xff6, 10, "SWAPSPACE2", 0x40c, 0, 0 }, + { "swap", "swsuspend", 0, 0xff6, 9, "S1SUSPEND", 0x40c, 0, 0 }, + { "swap", "swsuspend", 0, 0xff6, 9, "S2SUSPEND", 0x40c, 0, 0 }, + { "swap", "swsuspend", 0, 0xff6, 9, "ULSUSPEND", 0x40c, 0, 0 }, + { "ocfs2", "ocfs2", 4, 0, 6, "OCFSV2", 0x0, 0, 0 }, + { "ocfs2", "ocfs2", 8, 0, 6, "OCFSV2", 0x0, 0, 0 }, + { "hpfs", "hpfs", 8, 0, 4, "I\350\225\371", 0, 0, 0 }, + { "reiserfs", "reiserfs", 8, 0x34, 8, "ReIsErFs", 0x10054, 0, 0 }, + { "reiserfs", "reiserfs", 8, 20, 8, "ReIsErFs", 0x10054, 0, 0 }, + { "zfs", "zfs", 8, 0, 8, "\0\0\x02\xf5\xb0\x07\xb1\x0c", 0x0, 0, 0 }, + { "zfs", "zfs", 8, 0, 8, "\x0c\xb1\x07\xb0\xf5\x02\0\0", 0x0, 0, 0 }, + { "ufs", "ufs", 8, 0x55c, 4, "T\031\001\000", 0, 0, 0 }, + { "swap", "swap", 0, 0x1ff6, 10, "SWAP-SPACE", 0x40c, 0, 0 }, + { "swap", "swap", 0, 0x1ff6, 10, "SWAPSPACE2", 0x40c, 0, 0 }, + { "swap", "swsuspend", 0, 0x1ff6, 9, "S1SUSPEND", 0x40c, 0, 0 }, + { "swap", "swsuspend", 0, 0x1ff6, 9, "S2SUSPEND", 0x40c, 0, 0 }, + { "swap", "swsuspend", 0, 0x1ff6, 9, "ULSUSPEND", 0x40c, 0, 0 }, + { "reiserfs", "reiserfs", 64, 0x34, 9, "ReIsEr2Fs", 0x10054, 0, 0 }, + { "reiserfs", "reiserfs", 64, 0x34, 9, "ReIsEr3Fs", 0x10054, 0, 0 }, + { "reiserfs", "reiserfs", 64, 0x34, 8, "ReIsErFs", 0x10054, 0, 0 }, + { "reiser4", "reiser4", 64, 0, 7, "ReIsEr4", 0x100544, 0, 0 }, + { "gfs2", "gfs2", 64, 0, 4, "\x01\x16\x19\x70", 0x0, 0, 0 }, + { "gfs", "gfs", 64, 0, 4, "\x01\x16\x19\x70", 0x0, 0, 0 }, + { "btrfs", "btrfs", 64, 0x40, 8, "_BHRfS_M", 0x0, 0, 0 }, + { "swap", "swap", 0, 0x3ff6, 10, "SWAP-SPACE", 0x40c, 0, 0 }, + { "swap", "swap", 0, 0x3ff6, 10, "SWAPSPACE2", 0x40c, 0, 0 }, + { "swap", "swsuspend", 0, 0x3ff6, 9, "S1SUSPEND", 0x40c, 0, 0 }, + { "swap", "swsuspend", 0, 0x3ff6, 9, "S2SUSPEND", 0x40c, 0, 0 }, + { "swap", "swsuspend", 0, 0x3ff6, 9, "ULSUSPEND", 0x40c, 0, 0 }, + { "udf", "udf", 32, 1, 5, "BEA01", 0x0, 0, 0 }, + { "udf", "udf", 32, 1, 5, "BOOT2", 0x0, 0, 0 }, + { "udf", "udf", 32, 1, 5, "CD001", 0x0, 0, 0 }, + { "udf", "udf", 32, 1, 5, "CDW02", 0x0, 0, 0 }, + { "udf", "udf", 32, 1, 5, "NSR02", 0x0, 0, 0 }, + { "udf", "udf", 32, 1, 5, "NSR03", 0x0, 0, 0 }, + { "udf", "udf", 32, 1, 5, "TEA01", 0x0, 0, 0 }, + { "iso9660", "iso9660", 32, 1, 5, "CD001", 0x0, 0, 0 }, + { "iso9660", "iso9660", 32, 9, 5, "CDROM", 0x0, 0, 0 }, + { "jfs", "jfs", 32, 0, 4, "JFS1", 0x88, 0, 0 }, + { "swap", "swap", 0, 0x7ff6, 10, "SWAP-SPACE", 0x40c, 0, 0 }, + { "swap", "swap", 0, 0x7ff6, 10, "SWAPSPACE2", 0x40c, 0, 0 }, + { "swap", "swsuspend", 0, 0x7ff6, 9, "S1SUSPEND", 0x40c, 0, 0 }, + { "swap", "swsuspend", 0, 0x7ff6, 9, "S2SUSPEND", 0x40c, 0, 0 }, + { "swap", "swsuspend", 0, 0x7ff6, 9, "ULSUSPEND", 0x40c, 0, 0 }, + { "swap", "swap", 0, 0xfff6, 10, "SWAP-SPACE", 0x40c, 0, 0 }, + { "swap", "swap", 0, 0xfff6, 10, "SWAPSPACE2", 0x40c, 0, 0 }, + { "swap", "swsuspend", 0, 0xfff6, 9, "S1SUSPEND", 0x40c, 0, 0 }, + { "swap", "swsuspend", 0, 0xfff6, 9, "S2SUSPEND", 0x40c, 0, 0 }, + { "swap", "swsuspend", 0, 0xfff6, 9, "ULSUSPEND", 0x40c, 0, 0 }, + { "zfs", "zfs", 264, 0, 8, "\0\0\x02\xf5\xb0\x07\xb1\x0c", 0x0, 0, 0 }, + { "zfs", "zfs", 264, 0, 8, "\x0c\xb1\x07\xb0\xf5\x02\0\0", 0x0, 0, 0 }, + { NULL, NULL, 0, 0, 0, NULL, 0x0, 0, 0 } +}; + +static int null_uuid(const char *uuid) +{ + int i; + + for (i = 0; i < 16 && !uuid[i]; i++); + + return (i == 16); +} + + +static void uuid_end_bio(struct bio *bio, int err) +{ + struct page *page = bio->bi_io_vec[0].bv_page; + + if(!test_bit(BIO_UPTODATE, &bio->bi_flags)) + SetPageError(page); + + unlock_page(page); + bio_put(bio); +} + + +/** + * submit - submit BIO request + * @dev: The block device we're using. + * @page_num: The page we're reading. + * + * Based on Patrick Mochell's pmdisk code from long ago: "Straight from the + * textbook - allocate and initialize the bio. If we're writing, make sure + * the page is marked as dirty. Then submit it and carry on." + **/ +static struct page *read_bdev_page(struct block_device *dev, int page_num) +{ + struct bio *bio = NULL; + struct page *page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); + + if (!page) { + printk(KERN_ERR "Failed to allocate a page for reading data " + "in UUID checks."); + return NULL; + } + + bio = bio_alloc(GFP_NOFS, 1); + bio->bi_bdev = dev; + bio->bi_iter.bi_sector = page_num << 3; + bio->bi_end_io = uuid_end_bio; + bio->bi_flags |= (1 << BIO_TOI); + + PRINTK("Submitting bio on device %lx, page %d using bio %p and page %p.\n", + (unsigned long) dev->bd_dev, page_num, bio, page); + + if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { + printk(KERN_DEBUG "ERROR: adding page to bio at %d\n", + page_num); + bio_put(bio); + __free_page(page); + printk(KERN_DEBUG "read_bdev_page freed page %p (in error " + "path).\n", page); + return NULL; + } + + lock_page(page); + submit_bio(READ | REQ_SYNC, bio); + + wait_on_page_locked(page); + if (PageError(page)) { + __free_page(page); + page = NULL; + } + return page; +} + +int bdev_matches_key(struct block_device *bdev, const char *key) +{ + unsigned char *data = NULL; + struct page *data_page = NULL; + + int dev_offset, pg_num, pg_off, i; + int last_pg_num = -1; + int result = 0; + char buf[50]; + + if (null_uuid(key)) { + PRINTK("Refusing to find a NULL key.\n"); + return 0; + } + + if (!bdev->bd_disk) { + bdevname(bdev, buf); + PRINTK("bdev %s has no bd_disk.\n", buf); + return 0; + } + + if (!bdev->bd_disk->queue) { + bdevname(bdev, buf); + PRINTK("bdev %s has no queue.\n", buf); + return 0; + } + + for (i = 0; uuid_list[i].name; i++) { + struct uuid_info *dat = &uuid_list[i]; + + if (!dat->key || strcmp(dat->key, key)) + continue; + + dev_offset = (dat->bkoff << 10) + dat->sboff; + pg_num = dev_offset >> 12; + pg_off = dev_offset & 0xfff; + + if ((((pg_num + 1) << 3) - 1) > bdev->bd_part->nr_sects >> 1) + continue; + + if (pg_num != last_pg_num) { + if (data_page) { + kunmap(data_page); + __free_page(data_page); + } + data_page = read_bdev_page(bdev, pg_num); + if (!data_page) + continue; + data = kmap(data_page); + } + + last_pg_num = pg_num; + + if (strncmp(&data[pg_off], dat->magic, dat->sig_len)) + continue; + + result = 1; + break; + } + + if (data_page) { + kunmap(data_page); + __free_page(data_page); + } + + return result; +} + +/* + * part_matches_fs_info - Does the given partition match the details given? + * + * Returns a score saying how good the match is. + * 0 = no UUID match. + * 1 = UUID but last mount time differs. + * 2 = UUID, last mount time but not dev_t + * 3 = perfect match + * + * This lets us cope elegantly with probing resulting in dev_ts changing + * from boot to boot, and with the case where a user copies a partition + * (UUID is non unique), and we need to check the last mount time of the + * correct partition. + */ +int part_matches_fs_info(struct hd_struct *part, struct fs_info *seek) +{ + struct block_device *bdev; + struct fs_info *got; + int result = 0; + char buf[50]; + + if (null_uuid((char *) &seek->uuid)) { + PRINTK("Refusing to find a NULL uuid.\n"); + return 0; + } + + bdev = bdget(part_devt(part)); + + PRINTK("part_matches fs info considering %x.\n", part_devt(part)); + + if (blkdev_get(bdev, FMODE_READ, 0)) { + PRINTK("blkdev_get failed.\n"); + return 0; + } + + if (!bdev->bd_disk) { + bdevname(bdev, buf); + PRINTK("bdev %s has no bd_disk.\n", buf); + goto out; + } + + if (!bdev->bd_disk->queue) { + bdevname(bdev, buf); + PRINTK("bdev %s has no queue.\n", buf); + goto out; + } + + got = fs_info_from_block_dev(bdev); + + if (got && !memcmp(got->uuid, seek->uuid, 16)) { + PRINTK(" Have matching UUID.\n"); + PRINTK(" Got: LMS %d, LM %p.\n", got->last_mount_size, got->last_mount); + PRINTK(" Seek: LMS %d, LM %p.\n", seek->last_mount_size, seek->last_mount); + result = 1; + + if (got->last_mount_size == seek->last_mount_size && + got->last_mount && seek->last_mount && + !memcmp(got->last_mount, seek->last_mount, + got->last_mount_size)) { + result = 2; + + PRINTK(" Matching last mount time.\n"); + + if (part_devt(part) == seek->dev_t) { + result = 3; + PRINTK(" Matching dev_t.\n"); + } else + PRINTK("Dev_ts differ (%x vs %x).\n", part_devt(part), seek->dev_t); + } + } + + PRINTK(" Score for %x is %d.\n", part_devt(part), result); + free_fs_info(got); +out: + blkdev_put(bdev, FMODE_READ); + return result; +} + +void free_fs_info(struct fs_info *fs_info) +{ + if (!fs_info || IS_ERR(fs_info)) + return; + + if (fs_info->last_mount) + kfree(fs_info->last_mount); + + kfree(fs_info); +} + +struct fs_info *fs_info_from_block_dev(struct block_device *bdev) +{ + unsigned char *data = NULL; + struct page *data_page = NULL; + + int dev_offset, pg_num, pg_off; + int uuid_pg_num, uuid_pg_off, i; + unsigned char *uuid_data = NULL; + struct page *uuid_data_page = NULL; + + int last_pg_num = -1, last_uuid_pg_num = 0; + char buf[50]; + struct fs_info *fs_info = NULL; + + bdevname(bdev, buf); + + PRINTK("uuid_from_block_dev looking for partition type of %s.\n", buf); + + for (i = 0; uuid_list[i].name; i++) { + struct uuid_info *dat = &uuid_list[i]; + dev_offset = (dat->bkoff << 10) + dat->sboff; + pg_num = dev_offset >> 12; + pg_off = dev_offset & 0xfff; + uuid_pg_num = dat->uuid_offset >> 12; + uuid_pg_off = dat->uuid_offset & 0xfff; + + if ((((pg_num + 1) << 3) - 1) > bdev->bd_part->nr_sects >> 1) + continue; + + /* Ignore partition types with no UUID offset */ + if (!dat->uuid_offset) + continue; + + if (pg_num != last_pg_num) { + if (data_page) { + kunmap(data_page); + __free_page(data_page); + } + data_page = read_bdev_page(bdev, pg_num); + if (!data_page) + continue; + data = kmap(data_page); + } + + last_pg_num = pg_num; + + if (strncmp(&data[pg_off], dat->magic, dat->sig_len)) + continue; + + PRINTK("This partition looks like %s.\n", dat->name); + + fs_info = kzalloc(sizeof(struct fs_info), GFP_KERNEL); + + if (!fs_info) { + PRINTK("Failed to allocate fs_info struct."); + fs_info = ERR_PTR(-ENOMEM); + break; + } + + /* UUID can't be off the end of the disk */ + if ((uuid_pg_num > bdev->bd_part->nr_sects >> 3) || + !dat->uuid_offset) + goto no_uuid; + + if (!uuid_data || uuid_pg_num != last_uuid_pg_num) { + /* No need to reread the page from above */ + if (uuid_pg_num == pg_num && uuid_data) + memcpy(uuid_data, data, PAGE_SIZE); + else { + if (uuid_data_page) { + kunmap(uuid_data_page); + __free_page(uuid_data_page); + } + uuid_data_page = read_bdev_page(bdev, uuid_pg_num); + if (!uuid_data_page) + continue; + uuid_data = kmap(uuid_data_page); + } + } + + last_uuid_pg_num = uuid_pg_num; + memcpy(&fs_info->uuid, &uuid_data[uuid_pg_off], 16); + fs_info->dev_t = bdev->bd_dev; + +no_uuid: + PRINT_HEX_DUMP(KERN_EMERG, "fs_info_from_block_dev " + "returning uuid ", DUMP_PREFIX_NONE, 16, 1, + fs_info->uuid, 16, 0); + + if (dat->last_mount_size) { + int pg = dat->last_mount_offset >> 12, sz; + int off = dat->last_mount_offset & 0xfff; + struct page *last_mount = read_bdev_page(bdev, pg); + unsigned char *last_mount_data; + char *ptr; + + if (!last_mount) { + fs_info = ERR_PTR(-ENOMEM); + break; + } + last_mount_data = kmap(last_mount); + sz = dat->last_mount_size; + ptr = kmalloc(sz, GFP_KERNEL); + + if (!ptr) { + printk(KERN_EMERG "fs_info_from_block_dev " + "failed to get memory for last mount " + "timestamp."); + free_fs_info(fs_info); + fs_info = ERR_PTR(-ENOMEM); + } else { + fs_info->last_mount = ptr; + fs_info->last_mount_size = sz; + memcpy(ptr, &last_mount_data[off], sz); + } + + kunmap(last_mount); + __free_page(last_mount); + } + break; + } + + if (data_page) { + kunmap(data_page); + __free_page(data_page); + } + + if (uuid_data_page) { + kunmap(uuid_data_page); + __free_page(uuid_data_page); + } + + return fs_info; +} + +static int __init uuid_debug_setup(char *str) +{ + int value; + + if (sscanf(str, "=%d", &value)) + debug_enabled = value; + + return 1; +} + +__setup("uuid_debug", uuid_debug_setup); -- cgit v1.2.3