]>
Commit | Line | Data |
---|---|---|
e34cbd30 JA |
1 | #ifndef WB_THROTTLE_H |
2 | #define WB_THROTTLE_H | |
3 | ||
4 | #include <linux/kernel.h> | |
5 | #include <linux/atomic.h> | |
6 | #include <linux/wait.h> | |
7 | #include <linux/timer.h> | |
8 | #include <linux/ktime.h> | |
9 | ||
10 | #include "blk-stat.h" | |
11 | ||
12 | enum wbt_flags { | |
13 | WBT_TRACKED = 1, /* write, tracked for throttling */ | |
14 | WBT_READ = 2, /* read */ | |
15 | WBT_KSWAPD = 4, /* write, from kswapd */ | |
16 | ||
17 | WBT_NR_BITS = 3, /* number of bits */ | |
18 | }; | |
19 | ||
20 | enum { | |
21 | WBT_NUM_RWQ = 2, | |
22 | }; | |
23 | ||
d62118b6 JA |
24 | /* |
25 | * Enable states. Either off, or on by default (done at init time), | |
26 | * or on through manual setup in sysfs. | |
27 | */ | |
28 | enum { | |
29 | WBT_STATE_ON_DEFAULT = 1, | |
30 | WBT_STATE_ON_MANUAL = 2, | |
31 | }; | |
32 | ||
e34cbd30 JA |
33 | static inline void wbt_clear_state(struct blk_issue_stat *stat) |
34 | { | |
35 | stat->time &= BLK_STAT_TIME_MASK; | |
36 | } | |
37 | ||
38 | static inline enum wbt_flags wbt_stat_to_mask(struct blk_issue_stat *stat) | |
39 | { | |
40 | return (stat->time & BLK_STAT_MASK) >> BLK_STAT_SHIFT; | |
41 | } | |
42 | ||
43 | static inline void wbt_track(struct blk_issue_stat *stat, enum wbt_flags wb_acct) | |
44 | { | |
45 | stat->time |= ((u64) wb_acct) << BLK_STAT_SHIFT; | |
46 | } | |
47 | ||
48 | static inline bool wbt_is_tracked(struct blk_issue_stat *stat) | |
49 | { | |
50 | return (stat->time >> BLK_STAT_SHIFT) & WBT_TRACKED; | |
51 | } | |
52 | ||
53 | static inline bool wbt_is_read(struct blk_issue_stat *stat) | |
54 | { | |
55 | return (stat->time >> BLK_STAT_SHIFT) & WBT_READ; | |
56 | } | |
57 | ||
e34cbd30 JA |
58 | struct rq_wait { |
59 | wait_queue_head_t wait; | |
60 | atomic_t inflight; | |
61 | }; | |
62 | ||
63 | struct rq_wb { | |
64 | /* | |
65 | * Settings that govern how we throttle | |
66 | */ | |
67 | unsigned int wb_background; /* background writeback */ | |
68 | unsigned int wb_normal; /* normal writeback */ | |
69 | unsigned int wb_max; /* max throughput writeback */ | |
70 | int scale_step; | |
71 | bool scaled_max; | |
72 | ||
d62118b6 JA |
73 | short enable_state; /* WBT_STATE_* */ |
74 | ||
e34cbd30 JA |
75 | /* |
76 | * Number of consecutive periods where we don't have enough | |
77 | * information to make a firm scale up/down decision. | |
78 | */ | |
79 | unsigned int unknown_cnt; | |
80 | ||
81 | u64 win_nsec; /* default window size */ | |
82 | u64 cur_win_nsec; /* current window size */ | |
83 | ||
34dbad5d | 84 | struct blk_stat_callback *cb; |
e34cbd30 JA |
85 | |
86 | s64 sync_issue; | |
87 | void *sync_cookie; | |
88 | ||
89 | unsigned int wc; | |
90 | unsigned int queue_depth; | |
91 | ||
92 | unsigned long last_issue; /* last non-throttled issue */ | |
93 | unsigned long last_comp; /* last non-throttled comp */ | |
94 | unsigned long min_lat_nsec; | |
d8a0cbfd | 95 | struct request_queue *queue; |
e34cbd30 | 96 | struct rq_wait rq_wait[WBT_NUM_RWQ]; |
e34cbd30 JA |
97 | }; |
98 | ||
99 | static inline unsigned int wbt_inflight(struct rq_wb *rwb) | |
100 | { | |
101 | unsigned int i, ret = 0; | |
102 | ||
103 | for (i = 0; i < WBT_NUM_RWQ; i++) | |
104 | ret += atomic_read(&rwb->rq_wait[i].inflight); | |
105 | ||
106 | return ret; | |
107 | } | |
108 | ||
e34cbd30 JA |
109 | #ifdef CONFIG_BLK_WBT |
110 | ||
111 | void __wbt_done(struct rq_wb *, enum wbt_flags); | |
112 | void wbt_done(struct rq_wb *, struct blk_issue_stat *); | |
113 | enum wbt_flags wbt_wait(struct rq_wb *, struct bio *, spinlock_t *); | |
8054b89f | 114 | int wbt_init(struct request_queue *); |
e34cbd30 JA |
115 | void wbt_exit(struct request_queue *); |
116 | void wbt_update_limits(struct rq_wb *); | |
117 | void wbt_requeue(struct rq_wb *, struct blk_issue_stat *); | |
118 | void wbt_issue(struct rq_wb *, struct blk_issue_stat *); | |
fa224eed | 119 | void wbt_disable_default(struct request_queue *); |
e34cbd30 JA |
120 | |
121 | void wbt_set_queue_depth(struct rq_wb *, unsigned int); | |
122 | void wbt_set_write_cache(struct rq_wb *, bool); | |
123 | ||
80e091d1 JA |
124 | u64 wbt_default_latency_nsec(struct request_queue *); |
125 | ||
e34cbd30 JA |
126 | #else |
127 | ||
128 | static inline void __wbt_done(struct rq_wb *rwb, enum wbt_flags flags) | |
129 | { | |
130 | } | |
131 | static inline void wbt_done(struct rq_wb *rwb, struct blk_issue_stat *stat) | |
132 | { | |
133 | } | |
134 | static inline enum wbt_flags wbt_wait(struct rq_wb *rwb, struct bio *bio, | |
135 | spinlock_t *lock) | |
136 | { | |
137 | return 0; | |
138 | } | |
8054b89f | 139 | static inline int wbt_init(struct request_queue *q) |
e34cbd30 JA |
140 | { |
141 | return -EINVAL; | |
142 | } | |
143 | static inline void wbt_exit(struct request_queue *q) | |
144 | { | |
145 | } | |
146 | static inline void wbt_update_limits(struct rq_wb *rwb) | |
147 | { | |
148 | } | |
149 | static inline void wbt_requeue(struct rq_wb *rwb, struct blk_issue_stat *stat) | |
150 | { | |
151 | } | |
152 | static inline void wbt_issue(struct rq_wb *rwb, struct blk_issue_stat *stat) | |
153 | { | |
154 | } | |
fa224eed | 155 | static inline void wbt_disable_default(struct request_queue *q) |
e34cbd30 JA |
156 | { |
157 | } | |
158 | static inline void wbt_set_queue_depth(struct rq_wb *rwb, unsigned int depth) | |
159 | { | |
160 | } | |
161 | static inline void wbt_set_write_cache(struct rq_wb *rwb, bool wc) | |
162 | { | |
163 | } | |
80e091d1 JA |
164 | static inline u64 wbt_default_latency_nsec(struct request_queue *q) |
165 | { | |
166 | return 0; | |
167 | } | |
e34cbd30 JA |
168 | |
169 | #endif /* CONFIG_BLK_WBT */ | |
170 | ||
171 | #endif |