]>
Commit | Line | Data |
---|---|---|
e34cbd30 JA |
1 | #ifndef WB_THROTTLE_H |
2 | #define WB_THROTTLE_H | |
3 | ||
4 | #include <linux/kernel.h> | |
5 | #include <linux/atomic.h> | |
6 | #include <linux/wait.h> | |
7 | #include <linux/timer.h> | |
8 | #include <linux/ktime.h> | |
9 | ||
10 | #include "blk-stat.h" | |
11 | ||
12 | enum wbt_flags { | |
13 | WBT_TRACKED = 1, /* write, tracked for throttling */ | |
14 | WBT_READ = 2, /* read */ | |
15 | WBT_KSWAPD = 4, /* write, from kswapd */ | |
16 | ||
17 | WBT_NR_BITS = 3, /* number of bits */ | |
18 | }; | |
19 | ||
20 | enum { | |
21 | WBT_NUM_RWQ = 2, | |
22 | }; | |
23 | ||
d62118b6 JA |
24 | /* |
25 | * Enable states. Either off, or on by default (done at init time), | |
26 | * or on through manual setup in sysfs. | |
27 | */ | |
28 | enum { | |
29 | WBT_STATE_ON_DEFAULT = 1, | |
30 | WBT_STATE_ON_MANUAL = 2, | |
31 | }; | |
32 | ||
e34cbd30 JA |
33 | static inline void wbt_clear_state(struct blk_issue_stat *stat) |
34 | { | |
88eeca49 | 35 | stat->stat &= ~BLK_STAT_RES_MASK; |
e34cbd30 JA |
36 | } |
37 | ||
38 | static inline enum wbt_flags wbt_stat_to_mask(struct blk_issue_stat *stat) | |
39 | { | |
88eeca49 | 40 | return (stat->stat & BLK_STAT_RES_MASK) >> BLK_STAT_RES_SHIFT; |
e34cbd30 JA |
41 | } |
42 | ||
43 | static inline void wbt_track(struct blk_issue_stat *stat, enum wbt_flags wb_acct) | |
44 | { | |
88eeca49 | 45 | stat->stat |= ((u64) wb_acct) << BLK_STAT_RES_SHIFT; |
e34cbd30 JA |
46 | } |
47 | ||
48 | static inline bool wbt_is_tracked(struct blk_issue_stat *stat) | |
49 | { | |
88eeca49 | 50 | return (stat->stat >> BLK_STAT_RES_SHIFT) & WBT_TRACKED; |
e34cbd30 JA |
51 | } |
52 | ||
53 | static inline bool wbt_is_read(struct blk_issue_stat *stat) | |
54 | { | |
88eeca49 | 55 | return (stat->stat >> BLK_STAT_RES_SHIFT) & WBT_READ; |
e34cbd30 JA |
56 | } |
57 | ||
e34cbd30 JA |
58 | struct rq_wait { |
59 | wait_queue_head_t wait; | |
60 | atomic_t inflight; | |
61 | }; | |
62 | ||
63 | struct rq_wb { | |
64 | /* | |
65 | * Settings that govern how we throttle | |
66 | */ | |
67 | unsigned int wb_background; /* background writeback */ | |
68 | unsigned int wb_normal; /* normal writeback */ | |
69 | unsigned int wb_max; /* max throughput writeback */ | |
70 | int scale_step; | |
71 | bool scaled_max; | |
72 | ||
d62118b6 JA |
73 | short enable_state; /* WBT_STATE_* */ |
74 | ||
e34cbd30 JA |
75 | /* |
76 | * Number of consecutive periods where we don't have enough | |
77 | * information to make a firm scale up/down decision. | |
78 | */ | |
79 | unsigned int unknown_cnt; | |
80 | ||
81 | u64 win_nsec; /* default window size */ | |
82 | u64 cur_win_nsec; /* current window size */ | |
83 | ||
34dbad5d | 84 | struct blk_stat_callback *cb; |
e34cbd30 JA |
85 | |
86 | s64 sync_issue; | |
87 | void *sync_cookie; | |
88 | ||
89 | unsigned int wc; | |
90 | unsigned int queue_depth; | |
91 | ||
92 | unsigned long last_issue; /* last non-throttled issue */ | |
93 | unsigned long last_comp; /* last non-throttled comp */ | |
94 | unsigned long min_lat_nsec; | |
d8a0cbfd | 95 | struct request_queue *queue; |
e34cbd30 | 96 | struct rq_wait rq_wait[WBT_NUM_RWQ]; |
e34cbd30 JA |
97 | }; |
98 | ||
99 | static inline unsigned int wbt_inflight(struct rq_wb *rwb) | |
100 | { | |
101 | unsigned int i, ret = 0; | |
102 | ||
103 | for (i = 0; i < WBT_NUM_RWQ; i++) | |
104 | ret += atomic_read(&rwb->rq_wait[i].inflight); | |
105 | ||
106 | return ret; | |
107 | } | |
108 | ||
e34cbd30 JA |
109 | #ifdef CONFIG_BLK_WBT |
110 | ||
111 | void __wbt_done(struct rq_wb *, enum wbt_flags); | |
112 | void wbt_done(struct rq_wb *, struct blk_issue_stat *); | |
113 | enum wbt_flags wbt_wait(struct rq_wb *, struct bio *, spinlock_t *); | |
8054b89f | 114 | int wbt_init(struct request_queue *); |
e34cbd30 JA |
115 | void wbt_exit(struct request_queue *); |
116 | void wbt_update_limits(struct rq_wb *); | |
117 | void wbt_requeue(struct rq_wb *, struct blk_issue_stat *); | |
118 | void wbt_issue(struct rq_wb *, struct blk_issue_stat *); | |
fa224eed | 119 | void wbt_disable_default(struct request_queue *); |
8330cdb0 | 120 | void wbt_enable_default(struct request_queue *); |
e34cbd30 JA |
121 | |
122 | void wbt_set_queue_depth(struct rq_wb *, unsigned int); | |
123 | void wbt_set_write_cache(struct rq_wb *, bool); | |
124 | ||
80e091d1 JA |
125 | u64 wbt_default_latency_nsec(struct request_queue *); |
126 | ||
e34cbd30 JA |
127 | #else |
128 | ||
129 | static inline void __wbt_done(struct rq_wb *rwb, enum wbt_flags flags) | |
130 | { | |
131 | } | |
132 | static inline void wbt_done(struct rq_wb *rwb, struct blk_issue_stat *stat) | |
133 | { | |
134 | } | |
135 | static inline enum wbt_flags wbt_wait(struct rq_wb *rwb, struct bio *bio, | |
136 | spinlock_t *lock) | |
137 | { | |
138 | return 0; | |
139 | } | |
8054b89f | 140 | static inline int wbt_init(struct request_queue *q) |
e34cbd30 JA |
141 | { |
142 | return -EINVAL; | |
143 | } | |
144 | static inline void wbt_exit(struct request_queue *q) | |
145 | { | |
146 | } | |
147 | static inline void wbt_update_limits(struct rq_wb *rwb) | |
148 | { | |
149 | } | |
150 | static inline void wbt_requeue(struct rq_wb *rwb, struct blk_issue_stat *stat) | |
151 | { | |
152 | } | |
153 | static inline void wbt_issue(struct rq_wb *rwb, struct blk_issue_stat *stat) | |
154 | { | |
155 | } | |
fa224eed | 156 | static inline void wbt_disable_default(struct request_queue *q) |
e34cbd30 JA |
157 | { |
158 | } | |
8330cdb0 JK |
159 | static inline void wbt_enable_default(struct request_queue *q) |
160 | { | |
161 | } | |
e34cbd30 JA |
162 | static inline void wbt_set_queue_depth(struct rq_wb *rwb, unsigned int depth) |
163 | { | |
164 | } | |
165 | static inline void wbt_set_write_cache(struct rq_wb *rwb, bool wc) | |
166 | { | |
167 | } | |
80e091d1 JA |
168 | static inline u64 wbt_default_latency_nsec(struct request_queue *q) |
169 | { | |
170 | return 0; | |
171 | } | |
e34cbd30 JA |
172 | |
173 | #endif /* CONFIG_BLK_WBT */ | |
174 | ||
175 | #endif |