summaryrefslogtreecommitdiff
path: root/block/blk-settings.c
diff options
context:
space:
mode:
authorAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-09-11 04:34:46 -0300
committerAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-09-11 04:34:46 -0300
commit863981e96738983919de841ec669e157e6bdaeb0 (patch)
treed6d89a12e7eb8017837c057935a2271290907f76 /block/blk-settings.c
parent8dec7c70575785729a6a9e6719a955e9c545bcab (diff)
Linux-libre 4.7.1-gnupck-4.7.1-gnu
Diffstat (limited to 'block/blk-settings.c')
-rw-r--r--block/blk-settings.c59
1 files changed, 41 insertions, 18 deletions
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 331e4eee0..746dc9fee 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -820,31 +820,54 @@ void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
}
EXPORT_SYMBOL(blk_queue_update_dma_alignment);
+void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
+{
+ spin_lock_irq(q->queue_lock);
+ if (queueable)
+ clear_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
+ else
+ set_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
+ spin_unlock_irq(q->queue_lock);
+}
+EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
+
/**
- * blk_queue_flush - configure queue's cache flush capability
+ * blk_set_queue_depth - tell the block layer about the device queue depth
* @q: the request queue for the device
- * @flush: 0, REQ_FLUSH or REQ_FLUSH | REQ_FUA
+ * @depth: queue depth
*
- * Tell block layer cache flush capability of @q. If it supports
- * flushing, REQ_FLUSH should be set. If it supports bypassing
- * write cache for individual writes, REQ_FUA should be set.
*/
-void blk_queue_flush(struct request_queue *q, unsigned int flush)
+void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
{
- WARN_ON_ONCE(flush & ~(REQ_FLUSH | REQ_FUA));
-
- if (WARN_ON_ONCE(!(flush & REQ_FLUSH) && (flush & REQ_FUA)))
- flush &= ~REQ_FUA;
-
- q->flush_flags = flush & (REQ_FLUSH | REQ_FUA);
+ q->queue_depth = depth;
+ wbt_set_queue_depth(q->rq_wb, depth);
}
-EXPORT_SYMBOL_GPL(blk_queue_flush);
+EXPORT_SYMBOL(blk_set_queue_depth);
-void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
-{
- q->flush_not_queueable = !queueable;
-}
-EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
+/**
+ * blk_queue_write_cache - configure queue's write cache
+ * @q: the request queue for the device
+ * @wc: write back cache on or off
+ * @fua: device supports FUA writes, if true
+ *
+ * Tell the block layer about the write cache of @q.
+ */
+void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
+{
+ spin_lock_irq(q->queue_lock);
+ if (wc)
+ queue_flag_set(QUEUE_FLAG_WC, q);
+ else
+ queue_flag_clear(QUEUE_FLAG_WC, q);
+ if (fua)
+ queue_flag_set(QUEUE_FLAG_FUA, q);
+ else
+ queue_flag_clear(QUEUE_FLAG_FUA, q);
+ spin_unlock_irq(q->queue_lock);
+
+ wbt_set_write_cache(q->rq_wb, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
+}
+EXPORT_SYMBOL_GPL(blk_queue_write_cache);
static int __init blk_settings_init(void)
{