summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/backing-dev-defs.h2
-rw-r--r--include/linux/blk_types.h16
-rw-r--r--include/linux/blkdev.h19
-rw-r--r--include/linux/fs.h3
-rw-r--r--include/linux/pwm.h5
-rw-r--r--include/linux/wbt.h141
-rw-r--r--include/linux/writeback.h10
7 files changed, 6 insertions, 190 deletions
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index dc5f76d7f..c357f27d5 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -116,8 +116,6 @@ struct bdi_writeback {
struct list_head work_list;
struct delayed_work dwork; /* work item used for writeback */
- unsigned long dirty_sleep; /* last wait */
-
struct list_head bdi_node; /* anchored at bdi->wb_list */
#ifdef CONFIG_CGROUP_WRITEBACK
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 95fbfa1fe..436f43f87 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -155,7 +155,6 @@ enum rq_flag_bits {
__REQ_INTEGRITY, /* I/O includes block integrity payload */
__REQ_FUA, /* forced unit access */
__REQ_PREFLUSH, /* request for cache flush */
- __REQ_BG, /* background activity */
/* bio only flags */
__REQ_RAHEAD, /* read ahead, can fail anytime */
@@ -199,7 +198,7 @@ enum rq_flag_bits {
(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
#define REQ_COMMON_MASK \
(REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | REQ_NOIDLE | \
- REQ_PREFLUSH | REQ_FUA | REQ_INTEGRITY | REQ_NOMERGE | REQ_BG)
+ REQ_PREFLUSH | REQ_FUA | REQ_INTEGRITY | REQ_NOMERGE)
#define REQ_CLONE_MASK REQ_COMMON_MASK
/* This mask is used for both bio and request merge checking */
@@ -224,7 +223,6 @@ enum rq_flag_bits {
#define REQ_COPY_USER (1ULL << __REQ_COPY_USER)
#define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH)
#define REQ_FLUSH_SEQ (1ULL << __REQ_FLUSH_SEQ)
-#define REQ_BG (1ULL << __REQ_BG)
#define REQ_IO_STAT (1ULL << __REQ_IO_STAT)
#define REQ_MIXED_MERGE (1ULL << __REQ_MIXED_MERGE)
#define REQ_PM (1ULL << __REQ_PM)
@@ -266,16 +264,4 @@ static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
return cookie & ((1u << BLK_QC_T_SHIFT) - 1);
}
-#define BLK_RQ_STAT_BATCH 64
-
-struct blk_rq_stat {
- s64 mean;
- u64 min;
- u64 max;
- s32 nr_samples;
- s32 nr_batch;
- u64 batch;
- s64 time;
-};
-
#endif /* __LINUX_BLK_TYPES_H */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 2624a0220..9de483535 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -24,7 +24,6 @@
#include <linux/rcupdate.h>
#include <linux/percpu-refcount.h>
#include <linux/scatterlist.h>
-#include <linux/wbt.h>
struct module;
struct scsi_ioctl_command;
@@ -38,7 +37,6 @@ struct bsg_job;
struct blkcg_gq;
struct blk_flush_queue;
struct pr_ops;
-struct rq_wb;
#define BLKDEV_MIN_RQ 4
#ifdef CONFIG_PCK_INTERACTIVE
@@ -157,7 +155,6 @@ struct request {
struct gendisk *rq_disk;
struct hd_struct *part;
unsigned long start_time;
- struct wb_issue_stat wb_stat;
#ifdef CONFIG_BLK_CGROUP
struct request_list *rl; /* rl this rq is alloced from */
unsigned long long start_time_ns;
@@ -309,8 +306,6 @@ struct request_queue {
int nr_rqs[2]; /* # allocated [a]sync rqs */
int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
- struct rq_wb *rq_wb;
-
/*
* If blkcg is not used, @q->root_rl serves all requests. If blkcg
* is used, root blkg allocates from @q->root_rl and all other
@@ -336,8 +331,6 @@ struct request_queue {
struct blk_mq_ctx __percpu *queue_ctx;
unsigned int nr_queues;
- unsigned int queue_depth;
-
/* hw dispatch queues */
struct blk_mq_hw_ctx **queue_hw_ctx;
unsigned int nr_hw_queues;
@@ -423,9 +416,6 @@ struct request_queue {
unsigned int nr_sorted;
unsigned int in_flight[2];
-
- struct blk_rq_stat rq_stats[2];
-
/*
* Number of active block driver functions for which blk_drain_queue()
* must wait. Must be incremented around functions that unlock the
@@ -697,14 +687,6 @@ static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
return false;
}
-static inline unsigned int blk_queue_depth(struct request_queue *q)
-{
- if (q->queue_depth)
- return q->queue_depth;
-
- return q->nr_requests;
-}
-
/*
* q->prep_rq_fn return values
*/
@@ -1021,7 +1003,6 @@ extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
-extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
extern void blk_set_default_limits(struct queue_limits *lim);
extern void blk_set_stacking_limits(struct queue_limits *lim);
extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 6f180afdd..1b5999474 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -189,8 +189,6 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
* WRITE_FLUSH_FUA Combination of WRITE_FLUSH and FUA. The IO is preceded
* by a cache flush and data is guaranteed to be on
* non-volatile media on completion.
- * WRITE_BG Background write. This is for background activity like
- * the periodic flush and background threshold writeback
*
*/
#define RW_MASK REQ_OP_WRITE
@@ -204,7 +202,6 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
#define WRITE_FLUSH (REQ_SYNC | REQ_NOIDLE | REQ_PREFLUSH)
#define WRITE_FUA (REQ_SYNC | REQ_NOIDLE | REQ_FUA)
#define WRITE_FLUSH_FUA (REQ_SYNC | REQ_NOIDLE | REQ_PREFLUSH | REQ_FUA)
-#define WRITE_BG (REQ_NOIDLE | REQ_BG)
/*
* Attribute flags. These should be or-ed together to figure out what
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index f1bbae014..2c6c5114c 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -641,6 +641,7 @@ static inline void pwm_remove_table(struct pwm_lookup *table, size_t num)
#ifdef CONFIG_PWM_SYSFS
void pwmchip_sysfs_export(struct pwm_chip *chip);
void pwmchip_sysfs_unexport(struct pwm_chip *chip);
+void pwmchip_sysfs_unexport_children(struct pwm_chip *chip);
#else
static inline void pwmchip_sysfs_export(struct pwm_chip *chip)
{
@@ -649,6 +650,10 @@ static inline void pwmchip_sysfs_export(struct pwm_chip *chip)
static inline void pwmchip_sysfs_unexport(struct pwm_chip *chip)
{
}
+
+static inline void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
+{
+}
#endif /* CONFIG_PWM_SYSFS */
#endif /* __LINUX_PWM_H */
diff --git a/include/linux/wbt.h b/include/linux/wbt.h
deleted file mode 100644
index 68ba75e3a..000000000
--- a/include/linux/wbt.h
+++ /dev/null
@@ -1,141 +0,0 @@
-#ifndef WB_THROTTLE_H
-#define WB_THROTTLE_H
-
-#include <linux/atomic.h>
-#include <linux/wait.h>
-#include <linux/timer.h>
-#include <linux/ktime.h>
-
-enum wbt_flags {
- WBT_TRACKED = 1, /* write, tracked for throttling */
- WBT_READ = 2, /* read */
- WBT_KSWAPD = 4, /* write, from kswapd */
-
- WBT_NR_BITS = 3, /* number of bits */
-};
-
-enum {
- /*
- * Set aside 3 bits for state, rest is a time stamp
- */
- ISSUE_STAT_SHIFT = 64 - WBT_NR_BITS,
- ISSUE_STAT_MASK = ~((1ULL << ISSUE_STAT_SHIFT) - 1),
- ISSUE_STAT_TIME_MASK = ~ISSUE_STAT_MASK,
-
- WBT_NUM_RWQ = 2,
-};
-
-struct wb_issue_stat {
- u64 time;
-};
-
-static inline void wbt_issue_stat_set_time(struct wb_issue_stat *stat)
-{
- stat->time = (stat->time & ISSUE_STAT_MASK) |
- (ktime_to_ns(ktime_get()) & ISSUE_STAT_TIME_MASK);
-}
-
-static inline u64 wbt_issue_stat_get_time(struct wb_issue_stat *stat)
-{
- return stat->time & ISSUE_STAT_TIME_MASK;
-}
-
-static inline void wbt_clear_state(struct wb_issue_stat *stat)
-{
- stat->time &= ISSUE_STAT_TIME_MASK;
-}
-
-static inline enum wbt_flags wbt_stat_to_mask(struct wb_issue_stat *stat)
-{
- return (stat->time & ISSUE_STAT_MASK) >> ISSUE_STAT_SHIFT;
-}
-
-static inline void wbt_track(struct wb_issue_stat *stat, enum wbt_flags wb_acct)
-{
- stat->time |= ((u64) wb_acct) << ISSUE_STAT_SHIFT;
-}
-
-static inline bool wbt_is_tracked(struct wb_issue_stat *stat)
-{
- return (stat->time >> ISSUE_STAT_SHIFT) & WBT_TRACKED;
-}
-
-static inline bool wbt_is_read(struct wb_issue_stat *stat)
-{
- return (stat->time >> ISSUE_STAT_SHIFT) & WBT_READ;
-}
-
-struct wb_stat_ops {
- void (*get)(void *, struct blk_rq_stat *);
- bool (*is_current)(struct blk_rq_stat *);
- void (*clear)(void *);
-};
-
-struct rq_wait {
- wait_queue_head_t wait;
- atomic_t inflight;
-};
-
-struct rq_wb {
- /*
- * Settings that govern how we throttle
- */
- unsigned int wb_background; /* background writeback */
- unsigned int wb_normal; /* normal writeback */
- unsigned int wb_max; /* max throughput writeback */
- int scale_step;
- bool scaled_max;
-
- /*
- * Number of consecutive periods where we don't have enough
- * information to make a firm scale up/down decision.
- */
- unsigned int unknown_cnt;
-
- u64 win_nsec; /* default window size */
- u64 cur_win_nsec; /* current window size */
-
- struct timer_list window_timer;
-
- s64 sync_issue;
- void *sync_cookie;
-
- unsigned int wc;
- unsigned int queue_depth;
-
- unsigned long last_issue; /* last non-throttled issue */
- unsigned long last_comp; /* last non-throttled comp */
- unsigned long min_lat_nsec;
- struct backing_dev_info *bdi;
- struct rq_wait rq_wait[WBT_NUM_RWQ];
-
- struct wb_stat_ops *stat_ops;
- void *ops_data;
-};
-
-static inline unsigned int wbt_inflight(struct rq_wb *rwb)
-{
- unsigned int i, ret = 0;
-
- for (i = 0; i < WBT_NUM_RWQ; i++)
- ret += atomic_read(&rwb->rq_wait[i].inflight);
-
- return ret;
-}
-
-struct backing_dev_info;
-
-void __wbt_done(struct rq_wb *, enum wbt_flags);
-void wbt_done(struct rq_wb *, struct wb_issue_stat *);
-enum wbt_flags wbt_wait(struct rq_wb *, unsigned int, spinlock_t *);
-struct rq_wb *wbt_init(struct backing_dev_info *, struct wb_stat_ops *, void *);
-void wbt_exit(struct rq_wb *);
-void wbt_update_limits(struct rq_wb *);
-void wbt_requeue(struct rq_wb *, struct wb_issue_stat *);
-void wbt_issue(struct rq_wb *, struct wb_issue_stat *);
-void wbt_disable(struct rq_wb *);
-
-void wbt_set_queue_depth(struct rq_wb *, unsigned int);
-void wbt_set_write_cache(struct rq_wb *, bool);
-
-#endif
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index e53abf2bf..fc1e16c25 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -100,16 +100,6 @@ struct writeback_control {
#endif
};
-static inline int wbc_to_write_flags(struct writeback_control *wbc)
-{
- if (wbc->sync_mode == WB_SYNC_ALL)
- return WRITE_SYNC;
- else if (wbc->for_kupdate || wbc->for_background)
- return WRITE_BG;
-
- return 0;
-}
-
/*
* A wb_domain represents a domain that wb's (bdi_writeback's) belong to
* and are measured against each other in. There always is one global