]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
e34cbd30 JA |
2 | #ifndef WB_THROTTLE_H |
3 | #define WB_THROTTLE_H | |
4 | ||
5 | #include <linux/kernel.h> | |
6 | #include <linux/atomic.h> | |
7 | #include <linux/wait.h> | |
8 | #include <linux/timer.h> | |
9 | #include <linux/ktime.h> | |
10 | ||
11 | #include "blk-stat.h" | |
12 | ||
13 | enum wbt_flags { | |
14 | WBT_TRACKED = 1, /* write, tracked for throttling */ | |
15 | WBT_READ = 2, /* read */ | |
16 | WBT_KSWAPD = 4, /* write, from kswapd */ | |
17 | ||
18 | WBT_NR_BITS = 3, /* number of bits */ | |
19 | }; | |
20 | ||
21 | enum { | |
22 | WBT_NUM_RWQ = 2, | |
23 | }; | |
24 | ||
d62118b6 JA |
25 | /* |
26 | * Enable states. Either off, or on by default (done at init time), | |
27 | * or on through manual setup in sysfs. | |
28 | */ | |
29 | enum { | |
30 | WBT_STATE_ON_DEFAULT = 1, | |
31 | WBT_STATE_ON_MANUAL = 2, | |
32 | }; | |
33 | ||
e34cbd30 JA |
34 | static inline void wbt_clear_state(struct blk_issue_stat *stat) |
35 | { | |
88eeca49 | 36 | stat->stat &= ~BLK_STAT_RES_MASK; |
e34cbd30 JA |
37 | } |
38 | ||
39 | static inline enum wbt_flags wbt_stat_to_mask(struct blk_issue_stat *stat) | |
40 | { | |
88eeca49 | 41 | return (stat->stat & BLK_STAT_RES_MASK) >> BLK_STAT_RES_SHIFT; |
e34cbd30 JA |
42 | } |
43 | ||
44 | static inline void wbt_track(struct blk_issue_stat *stat, enum wbt_flags wb_acct) | |
45 | { | |
88eeca49 | 46 | stat->stat |= ((u64) wb_acct) << BLK_STAT_RES_SHIFT; |
e34cbd30 JA |
47 | } |
48 | ||
49 | static inline bool wbt_is_tracked(struct blk_issue_stat *stat) | |
50 | { | |
88eeca49 | 51 | return (stat->stat >> BLK_STAT_RES_SHIFT) & WBT_TRACKED; |
e34cbd30 JA |
52 | } |
53 | ||
54 | static inline bool wbt_is_read(struct blk_issue_stat *stat) | |
55 | { | |
88eeca49 | 56 | return (stat->stat >> BLK_STAT_RES_SHIFT) & WBT_READ; |
e34cbd30 JA |
57 | } |
58 | ||
e34cbd30 JA |
59 | struct rq_wait { |
60 | wait_queue_head_t wait; | |
61 | atomic_t inflight; | |
62 | }; | |
63 | ||
64 | struct rq_wb { | |
65 | /* | |
66 | * Settings that govern how we throttle | |
67 | */ | |
68 | unsigned int wb_background; /* background writeback */ | |
69 | unsigned int wb_normal; /* normal writeback */ | |
70 | unsigned int wb_max; /* max throughput writeback */ | |
71 | int scale_step; | |
72 | bool scaled_max; | |
73 | ||
d62118b6 JA |
74 | short enable_state; /* WBT_STATE_* */ |
75 | ||
e34cbd30 JA |
76 | /* |
77 | * Number of consecutive periods where we don't have enough | |
78 | * information to make a firm scale up/down decision. | |
79 | */ | |
80 | unsigned int unknown_cnt; | |
81 | ||
82 | u64 win_nsec; /* default window size */ | |
83 | u64 cur_win_nsec; /* current window size */ | |
84 | ||
34dbad5d | 85 | struct blk_stat_callback *cb; |
e34cbd30 JA |
86 | |
87 | s64 sync_issue; | |
88 | void *sync_cookie; | |
89 | ||
90 | unsigned int wc; | |
91 | unsigned int queue_depth; | |
92 | ||
93 | unsigned long last_issue; /* last non-throttled issue */ | |
94 | unsigned long last_comp; /* last non-throttled comp */ | |
95 | unsigned long min_lat_nsec; | |
d8a0cbfd | 96 | struct request_queue *queue; |
e34cbd30 | 97 | struct rq_wait rq_wait[WBT_NUM_RWQ]; |
e34cbd30 JA |
98 | }; |
99 | ||
100 | static inline unsigned int wbt_inflight(struct rq_wb *rwb) | |
101 | { | |
102 | unsigned int i, ret = 0; | |
103 | ||
104 | for (i = 0; i < WBT_NUM_RWQ; i++) | |
105 | ret += atomic_read(&rwb->rq_wait[i].inflight); | |
106 | ||
107 | return ret; | |
108 | } | |
109 | ||
e34cbd30 JA |
110 | #ifdef CONFIG_BLK_WBT |
111 | ||
112 | void __wbt_done(struct rq_wb *, enum wbt_flags); | |
113 | void wbt_done(struct rq_wb *, struct blk_issue_stat *); | |
114 | enum wbt_flags wbt_wait(struct rq_wb *, struct bio *, spinlock_t *); | |
8054b89f | 115 | int wbt_init(struct request_queue *); |
e34cbd30 JA |
116 | void wbt_exit(struct request_queue *); |
117 | void wbt_update_limits(struct rq_wb *); | |
118 | void wbt_requeue(struct rq_wb *, struct blk_issue_stat *); | |
119 | void wbt_issue(struct rq_wb *, struct blk_issue_stat *); | |
fa224eed | 120 | void wbt_disable_default(struct request_queue *); |
8330cdb0 | 121 | void wbt_enable_default(struct request_queue *); |
e34cbd30 JA |
122 | |
123 | void wbt_set_queue_depth(struct rq_wb *, unsigned int); | |
124 | void wbt_set_write_cache(struct rq_wb *, bool); | |
125 | ||
80e091d1 JA |
126 | u64 wbt_default_latency_nsec(struct request_queue *); |
127 | ||
e34cbd30 JA |
128 | #else |
129 | ||
130 | static inline void __wbt_done(struct rq_wb *rwb, enum wbt_flags flags) | |
131 | { | |
132 | } | |
133 | static inline void wbt_done(struct rq_wb *rwb, struct blk_issue_stat *stat) | |
134 | { | |
135 | } | |
136 | static inline enum wbt_flags wbt_wait(struct rq_wb *rwb, struct bio *bio, | |
137 | spinlock_t *lock) | |
138 | { | |
139 | return 0; | |
140 | } | |
8054b89f | 141 | static inline int wbt_init(struct request_queue *q) |
e34cbd30 JA |
142 | { |
143 | return -EINVAL; | |
144 | } | |
145 | static inline void wbt_exit(struct request_queue *q) | |
146 | { | |
147 | } | |
148 | static inline void wbt_update_limits(struct rq_wb *rwb) | |
149 | { | |
150 | } | |
151 | static inline void wbt_requeue(struct rq_wb *rwb, struct blk_issue_stat *stat) | |
152 | { | |
153 | } | |
154 | static inline void wbt_issue(struct rq_wb *rwb, struct blk_issue_stat *stat) | |
155 | { | |
156 | } | |
fa224eed | 157 | static inline void wbt_disable_default(struct request_queue *q) |
e34cbd30 JA |
158 | { |
159 | } | |
8330cdb0 JK |
160 | static inline void wbt_enable_default(struct request_queue *q) |
161 | { | |
162 | } | |
e34cbd30 JA |
163 | static inline void wbt_set_queue_depth(struct rq_wb *rwb, unsigned int depth) |
164 | { | |
165 | } | |
166 | static inline void wbt_set_write_cache(struct rq_wb *rwb, bool wc) | |
167 | { | |
168 | } | |
80e091d1 JA |
169 | static inline u64 wbt_default_latency_nsec(struct request_queue *q) |
170 | { | |
171 | return 0; | |
172 | } | |
e34cbd30 JA |
173 | |
174 | #endif /* CONFIG_BLK_WBT */ | |
175 | ||
176 | #endif |