summaryrefslogtreecommitdiff
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
authorAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-01-20 14:01:31 -0300
committerAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-01-20 14:01:31 -0300
commitb4b7ff4b08e691656c9d77c758fc355833128ac0 (patch)
tree82fcb00e6b918026dc9f2d1f05ed8eee83874cc0 /include/linux/blkdev.h
parent35acfa0fc609f2a2cd95cef4a6a9c3a5c38f1778 (diff)
Linux-libre 4.4-gnupck-4.4-gnu
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h65
1 files changed, 34 insertions, 31 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 19c2e947d..ae43492ce 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -35,6 +35,7 @@ struct sg_io_hdr;
struct bsg_job;
struct blkcg_gq;
struct blk_flush_queue;
+struct pr_ops;
#define BLKDEV_MIN_RQ 4
#define BLKDEV_MAX_RQ 128 /* Default maximum */
@@ -43,7 +44,7 @@ struct blk_flush_queue;
* Maximum number of blkcg policies allowed to be registered concurrently.
* Defined here to simplify include dependency.
*/
-#define BLKCG_MAX_POLS 2
+#define BLKCG_MAX_POLS 3
struct request;
typedef void (rq_end_io_fn)(struct request *, int);
@@ -208,7 +209,7 @@ static inline unsigned short req_get_ioprio(struct request *req)
struct blk_queue_ctx;
typedef void (request_fn_proc) (struct request_queue *q);
-typedef void (make_request_fn) (struct request_queue *q, struct bio *bio);
+typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio);
typedef int (prep_rq_fn) (struct request_queue *, struct request *);
typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
@@ -253,6 +254,7 @@ struct queue_limits {
unsigned long virt_boundary_mask;
unsigned int max_hw_sectors;
+ unsigned int max_dev_sectors;
unsigned int chunk_sectors;
unsigned int max_sectors;
unsigned int max_segment_size;
@@ -369,6 +371,10 @@ struct request_queue {
*/
struct kobject mq_kobj;
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+ struct blk_integrity integrity;
+#endif /* CONFIG_BLK_DEV_INTEGRITY */
+
#ifdef CONFIG_PM
struct device *dev;
int rpm_status;
@@ -450,7 +456,7 @@ struct request_queue {
#endif
struct rcu_head rcu_head;
wait_queue_head_t mq_freeze_wq;
- struct percpu_ref mq_usage_counter;
+ struct percpu_ref q_usage_counter;
struct list_head all_q_node;
struct blk_mq_tag_set *tag_set;
@@ -482,6 +488,7 @@ struct request_queue {
#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */
#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/
+#define QUEUE_FLAG_POLL 22 /* IO polling enabled if set */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
@@ -756,7 +763,7 @@ static inline void rq_flush_dcache_pages(struct request *rq)
extern int blk_register_queue(struct gendisk *disk);
extern void blk_unregister_queue(struct gendisk *disk);
-extern void generic_make_request(struct bio *bio);
+extern blk_qc_t generic_make_request(struct bio *bio);
extern void blk_rq_init(struct request_queue *q, struct request *rq);
extern void blk_put_request(struct request *);
extern void __blk_put_request(struct request_queue *, struct request *);
@@ -767,7 +774,6 @@ extern void blk_rq_set_block_pc(struct request *);
extern void blk_requeue_request(struct request_queue *, struct request *);
extern void blk_add_request_payload(struct request *rq, struct page *page,
unsigned int len);
-extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
extern int blk_lld_busy(struct request_queue *q);
extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
struct bio_set *bs, gfp_t gfp_mask,
@@ -788,7 +794,10 @@ extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
struct scsi_ioctl_command __user *);
+extern int blk_queue_enter(struct request_queue *q, gfp_t gfp);
+extern void blk_queue_exit(struct request_queue *q);
extern void blk_start_queue(struct request_queue *q);
+extern void blk_start_queue_async(struct request_queue *q);
extern void blk_stop_queue(struct request_queue *q);
extern void blk_sync_queue(struct request_queue *q);
extern void __blk_stop_queue(struct request_queue *q);
@@ -809,6 +818,8 @@ extern int blk_execute_rq(struct request_queue *, struct gendisk *,
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
struct request *, int, rq_end_io_fn *);
+bool blk_poll(struct request_queue *q, blk_qc_t cookie);
+
static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
{
return bdev->bd_disk->queue; /* this is never NULL */
@@ -950,7 +961,6 @@ extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
extern void blk_cleanup_queue(struct request_queue *);
extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
extern void blk_queue_bounce_limit(struct request_queue *, u64);
-extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int);
extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
extern void blk_queue_max_segments(struct request_queue *, unsigned short);
@@ -1462,22 +1472,13 @@ struct blk_integrity_iter {
typedef int (integrity_processing_fn) (struct blk_integrity_iter *);
-struct blk_integrity {
- integrity_processing_fn *generate_fn;
- integrity_processing_fn *verify_fn;
-
- unsigned short flags;
- unsigned short tuple_size;
- unsigned short interval;
- unsigned short tag_size;
-
- const char *name;
-
- struct kobject kobj;
+struct blk_integrity_profile {
+ integrity_processing_fn *generate_fn;
+ integrity_processing_fn *verify_fn;
+ const char *name;
};
-extern bool blk_integrity_is_initialized(struct gendisk *);
-extern int blk_integrity_register(struct gendisk *, struct blk_integrity *);
+extern void blk_integrity_register(struct gendisk *, struct blk_integrity *);
extern void blk_integrity_unregister(struct gendisk *);
extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
@@ -1488,15 +1489,20 @@ extern bool blk_integrity_merge_rq(struct request_queue *, struct request *,
extern bool blk_integrity_merge_bio(struct request_queue *, struct request *,
struct bio *);
-static inline
-struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
+static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
{
- return bdev->bd_disk->integrity;
+ struct blk_integrity *bi = &disk->queue->integrity;
+
+ if (!bi->profile)
+ return NULL;
+
+ return bi;
}
-static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
+static inline
+struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
{
- return disk->integrity;
+ return blk_get_integrity(bdev->bd_disk);
}
static inline bool blk_integrity_rq(struct request *rq)
@@ -1570,10 +1576,9 @@ static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b)
{
return 0;
}
-static inline int blk_integrity_register(struct gendisk *d,
+static inline void blk_integrity_register(struct gendisk *d,
struct blk_integrity *b)
{
- return 0;
}
static inline void blk_integrity_unregister(struct gendisk *d)
{
@@ -1598,10 +1603,7 @@ static inline bool blk_integrity_merge_bio(struct request_queue *rq,
{
return true;
}
-static inline bool blk_integrity_is_initialized(struct gendisk *g)
-{
- return 0;
-}
+
static inline bool integrity_req_gap_back_merge(struct request *req,
struct bio *next)
{
@@ -1633,6 +1635,7 @@ struct block_device_operations {
/* this callback is with swap_lock and sometimes page table lock held */
void (*swap_slot_free_notify) (struct block_device *, unsigned long);
struct module *owner;
+ const struct pr_ops *pr_ops;
};
extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,