]>
Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
3fcfab16 | 2 | |
c97ab271 | 3 | #include <linux/blkdev.h> |
3fcfab16 | 4 | #include <linux/wait.h> |
34f8fe50 | 5 | #include <linux/rbtree.h> |
03ba3782 | 6 | #include <linux/kthread.h> |
e41d12f5 CH |
7 | #include <linux/backing-dev.h> |
8 | #include <linux/blk-cgroup.h> | |
03ba3782 | 9 | #include <linux/freezer.h> |
3fcfab16 | 10 | #include <linux/fs.h> |
26160158 | 11 | #include <linux/pagemap.h> |
03ba3782 | 12 | #include <linux/mm.h> |
c1ca59a1 | 13 | #include <linux/sched/mm.h> |
3fcfab16 AM |
14 | #include <linux/sched.h> |
15 | #include <linux/module.h> | |
cf0ca9fe PZ |
16 | #include <linux/writeback.h> |
17 | #include <linux/device.h> | |
455b2864 | 18 | #include <trace/events/writeback.h> |
61f29738 | 19 | #include "internal.h" |
cf0ca9fe | 20 | |
f56753ac | 21 | struct backing_dev_info noop_backing_dev_info; |
a212b105 | 22 | EXPORT_SYMBOL_GPL(noop_backing_dev_info); |
5129a469 | 23 | |
eb7ae5e0 | 24 | static const char *bdi_unknown_name = "(unknown)"; |
cfc4ba53 JA |
25 | |
26 | /* | |
34f8fe50 TH |
27 | * bdi_lock protects bdi_tree and updates to bdi_list. bdi_list has RCU |
28 | * reader side locking. | |
cfc4ba53 | 29 | */ |
03ba3782 | 30 | DEFINE_SPINLOCK(bdi_lock); |
34f8fe50 TH |
31 | static u64 bdi_id_cursor; |
32 | static struct rb_root bdi_tree = RB_ROOT; | |
66f3b8e2 | 33 | LIST_HEAD(bdi_list); |
03ba3782 | 34 | |
839a8e86 TH |
35 | /* bdi_wq serves all asynchronous writeback tasks */ |
36 | struct workqueue_struct *bdi_wq; | |
37 | ||
76f1418b MS |
38 | #ifdef CONFIG_DEBUG_FS |
39 | #include <linux/debugfs.h> | |
40 | #include <linux/seq_file.h> | |
41 | ||
42 | static struct dentry *bdi_debug_root; | |
43 | ||
44 | static void bdi_debug_init(void) | |
45 | { | |
46 | bdi_debug_root = debugfs_create_dir("bdi", NULL); | |
47 | } | |
48 | ||
49 | static int bdi_debug_stats_show(struct seq_file *m, void *v) | |
50 | { | |
51 | struct backing_dev_info *bdi = m->private; | |
c1955ce3 | 52 | struct bdi_writeback *wb = &bdi->wb; |
364aeb28 DR |
53 | unsigned long background_thresh; |
54 | unsigned long dirty_thresh; | |
0d960a38 | 55 | unsigned long wb_thresh; |
0ae45f63 | 56 | unsigned long nr_dirty, nr_io, nr_more_io, nr_dirty_time; |
f09b00d3 JA |
57 | struct inode *inode; |
58 | ||
0ae45f63 | 59 | nr_dirty = nr_io = nr_more_io = nr_dirty_time = 0; |
f758eeab | 60 | spin_lock(&wb->list_lock); |
c7f54084 | 61 | list_for_each_entry(inode, &wb->b_dirty, i_io_list) |
c1955ce3 | 62 | nr_dirty++; |
c7f54084 | 63 | list_for_each_entry(inode, &wb->b_io, i_io_list) |
c1955ce3 | 64 | nr_io++; |
c7f54084 | 65 | list_for_each_entry(inode, &wb->b_more_io, i_io_list) |
c1955ce3 | 66 | nr_more_io++; |
c7f54084 | 67 | list_for_each_entry(inode, &wb->b_dirty_time, i_io_list) |
0ae45f63 TT |
68 | if (inode->i_state & I_DIRTY_TIME) |
69 | nr_dirty_time++; | |
f758eeab | 70 | spin_unlock(&wb->list_lock); |
76f1418b | 71 | |
16c4042f | 72 | global_dirty_limits(&background_thresh, &dirty_thresh); |
0d960a38 | 73 | wb_thresh = wb_calc_thresh(wb, dirty_thresh); |
76f1418b | 74 | |
76f1418b | 75 | seq_printf(m, |
00821b00 WF |
76 | "BdiWriteback: %10lu kB\n" |
77 | "BdiReclaimable: %10lu kB\n" | |
78 | "BdiDirtyThresh: %10lu kB\n" | |
79 | "DirtyThresh: %10lu kB\n" | |
80 | "BackgroundThresh: %10lu kB\n" | |
c8e28ce0 | 81 | "BdiDirtied: %10lu kB\n" |
00821b00 WF |
82 | "BdiWritten: %10lu kB\n" |
83 | "BdiWriteBandwidth: %10lu kBps\n" | |
84 | "b_dirty: %10lu\n" | |
85 | "b_io: %10lu\n" | |
86 | "b_more_io: %10lu\n" | |
0ae45f63 | 87 | "b_dirty_time: %10lu\n" |
00821b00 WF |
88 | "bdi_list: %10u\n" |
89 | "state: %10lx\n", | |
93f78d88 TH |
90 | (unsigned long) K(wb_stat(wb, WB_WRITEBACK)), |
91 | (unsigned long) K(wb_stat(wb, WB_RECLAIMABLE)), | |
0d960a38 | 92 | K(wb_thresh), |
f7d2b1ec JK |
93 | K(dirty_thresh), |
94 | K(background_thresh), | |
93f78d88 TH |
95 | (unsigned long) K(wb_stat(wb, WB_DIRTIED)), |
96 | (unsigned long) K(wb_stat(wb, WB_WRITTEN)), | |
a88a341a | 97 | (unsigned long) K(wb->write_bandwidth), |
f7d2b1ec JK |
98 | nr_dirty, |
99 | nr_io, | |
100 | nr_more_io, | |
0ae45f63 | 101 | nr_dirty_time, |
4452226e | 102 | !list_empty(&bdi->bdi_list), bdi->wb.state); |
76f1418b MS |
103 | |
104 | return 0; | |
105 | } | |
5ad35093 | 106 | DEFINE_SHOW_ATTRIBUTE(bdi_debug_stats); |
76f1418b | 107 | |
2d146b92 | 108 | static void bdi_debug_register(struct backing_dev_info *bdi, const char *name) |
76f1418b MS |
109 | { |
110 | bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root); | |
97f07697 | 111 | |
2d146b92 GKH |
112 | debugfs_create_file("stats", 0444, bdi->debug_dir, bdi, |
113 | &bdi_debug_stats_fops); | |
76f1418b MS |
114 | } |
115 | ||
116 | static void bdi_debug_unregister(struct backing_dev_info *bdi) | |
117 | { | |
2d146b92 | 118 | debugfs_remove_recursive(bdi->debug_dir); |
76f1418b MS |
119 | } |
120 | #else | |
121 | static inline void bdi_debug_init(void) | |
122 | { | |
123 | } | |
2d146b92 | 124 | static inline void bdi_debug_register(struct backing_dev_info *bdi, |
76f1418b MS |
125 | const char *name) |
126 | { | |
127 | } | |
128 | static inline void bdi_debug_unregister(struct backing_dev_info *bdi) | |
129 | { | |
130 | } | |
131 | #endif | |
132 | ||
cf0ca9fe PZ |
133 | static ssize_t read_ahead_kb_store(struct device *dev, |
134 | struct device_attribute *attr, | |
135 | const char *buf, size_t count) | |
136 | { | |
137 | struct backing_dev_info *bdi = dev_get_drvdata(dev); | |
cf0ca9fe | 138 | unsigned long read_ahead_kb; |
7034ed13 | 139 | ssize_t ret; |
cf0ca9fe | 140 | |
7034ed13 NJ |
141 | ret = kstrtoul(buf, 10, &read_ahead_kb); |
142 | if (ret < 0) | |
143 | return ret; | |
144 | ||
145 | bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10); | |
146 | ||
147 | return count; | |
cf0ca9fe PZ |
148 | } |
149 | ||
cf0ca9fe PZ |
150 | #define BDI_SHOW(name, expr) \ |
151 | static ssize_t name##_show(struct device *dev, \ | |
5e4c0d86 | 152 | struct device_attribute *attr, char *buf) \ |
cf0ca9fe PZ |
153 | { \ |
154 | struct backing_dev_info *bdi = dev_get_drvdata(dev); \ | |
155 | \ | |
5e4c0d86 | 156 | return sysfs_emit(buf, "%lld\n", (long long)expr); \ |
d9e1241e GKH |
157 | } \ |
158 | static DEVICE_ATTR_RW(name); | |
cf0ca9fe PZ |
159 | |
160 | BDI_SHOW(read_ahead_kb, K(bdi->ra_pages)) | |
161 | ||
189d3c4a PZ |
162 | static ssize_t min_ratio_store(struct device *dev, |
163 | struct device_attribute *attr, const char *buf, size_t count) | |
164 | { | |
165 | struct backing_dev_info *bdi = dev_get_drvdata(dev); | |
189d3c4a | 166 | unsigned int ratio; |
7034ed13 NJ |
167 | ssize_t ret; |
168 | ||
169 | ret = kstrtouint(buf, 10, &ratio); | |
170 | if (ret < 0) | |
171 | return ret; | |
172 | ||
173 | ret = bdi_set_min_ratio(bdi, ratio); | |
174 | if (!ret) | |
175 | ret = count; | |
189d3c4a | 176 | |
189d3c4a PZ |
177 | return ret; |
178 | } | |
ae82291e | 179 | BDI_SHOW(min_ratio, bdi->min_ratio / BDI_RATIO_SCALE) |
189d3c4a | 180 | |
ad3e6dab SR |
181 | static ssize_t min_ratio_fine_store(struct device *dev, |
182 | struct device_attribute *attr, const char *buf, size_t count) | |
183 | { | |
184 | struct backing_dev_info *bdi = dev_get_drvdata(dev); | |
185 | unsigned int ratio; | |
186 | ssize_t ret; | |
187 | ||
188 | ret = kstrtouint(buf, 10, &ratio); | |
189 | if (ret < 0) | |
190 | return ret; | |
191 | ||
192 | ret = bdi_set_min_ratio_no_scale(bdi, ratio); | |
193 | if (!ret) | |
194 | ret = count; | |
195 | ||
196 | return ret; | |
197 | } | |
198 | BDI_SHOW(min_ratio_fine, bdi->min_ratio) | |
199 | ||
a42dde04 PZ |
200 | static ssize_t max_ratio_store(struct device *dev, |
201 | struct device_attribute *attr, const char *buf, size_t count) | |
202 | { | |
203 | struct backing_dev_info *bdi = dev_get_drvdata(dev); | |
a42dde04 | 204 | unsigned int ratio; |
7034ed13 NJ |
205 | ssize_t ret; |
206 | ||
207 | ret = kstrtouint(buf, 10, &ratio); | |
208 | if (ret < 0) | |
209 | return ret; | |
210 | ||
211 | ret = bdi_set_max_ratio(bdi, ratio); | |
212 | if (!ret) | |
213 | ret = count; | |
a42dde04 | 214 | |
a42dde04 PZ |
215 | return ret; |
216 | } | |
ae82291e | 217 | BDI_SHOW(max_ratio, bdi->max_ratio / BDI_RATIO_SCALE) |
a42dde04 | 218 | |
bca52dcb SR |
219 | static ssize_t max_ratio_fine_store(struct device *dev, |
220 | struct device_attribute *attr, const char *buf, size_t count) | |
221 | { | |
222 | struct backing_dev_info *bdi = dev_get_drvdata(dev); | |
223 | unsigned int ratio; | |
224 | ssize_t ret; | |
225 | ||
226 | ret = kstrtouint(buf, 10, &ratio); | |
227 | if (ret < 0) | |
228 | return ret; | |
229 | ||
230 | ret = bdi_set_max_ratio_no_scale(bdi, ratio); | |
231 | if (!ret) | |
232 | ret = count; | |
233 | ||
234 | return ret; | |
235 | } | |
236 | BDI_SHOW(max_ratio_fine, bdi->max_ratio) | |
237 | ||
9c84819b SR |
238 | static ssize_t min_bytes_show(struct device *dev, |
239 | struct device_attribute *attr, | |
240 | char *buf) | |
241 | { | |
242 | struct backing_dev_info *bdi = dev_get_drvdata(dev); | |
243 | ||
244 | return sysfs_emit(buf, "%llu\n", bdi_get_min_bytes(bdi)); | |
245 | } | |
246 | ||
247 | static ssize_t min_bytes_store(struct device *dev, | |
248 | struct device_attribute *attr, const char *buf, size_t count) | |
249 | { | |
250 | struct backing_dev_info *bdi = dev_get_drvdata(dev); | |
251 | u64 bytes; | |
252 | ssize_t ret; | |
253 | ||
254 | ret = kstrtoull(buf, 10, &bytes); | |
255 | if (ret < 0) | |
256 | return ret; | |
257 | ||
258 | ret = bdi_set_min_bytes(bdi, bytes); | |
259 | if (!ret) | |
260 | ret = count; | |
261 | ||
262 | return ret; | |
263 | } | |
f6365881 | 264 | static DEVICE_ATTR_RW(min_bytes); |
9c84819b | 265 | |
c56e049a SR |
266 | static ssize_t max_bytes_show(struct device *dev, |
267 | struct device_attribute *attr, | |
268 | char *buf) | |
269 | { | |
270 | struct backing_dev_info *bdi = dev_get_drvdata(dev); | |
271 | ||
272 | return sysfs_emit(buf, "%llu\n", bdi_get_max_bytes(bdi)); | |
273 | } | |
274 | ||
275 | static ssize_t max_bytes_store(struct device *dev, | |
276 | struct device_attribute *attr, const char *buf, size_t count) | |
277 | { | |
278 | struct backing_dev_info *bdi = dev_get_drvdata(dev); | |
279 | u64 bytes; | |
280 | ssize_t ret; | |
281 | ||
282 | ret = kstrtoull(buf, 10, &bytes); | |
283 | if (ret < 0) | |
284 | return ret; | |
285 | ||
286 | ret = bdi_set_max_bytes(bdi, bytes); | |
287 | if (!ret) | |
288 | ret = count; | |
289 | ||
290 | return ret; | |
291 | } | |
f6365881 | 292 | static DEVICE_ATTR_RW(max_bytes); |
c56e049a | 293 | |
7d311cda DW |
294 | static ssize_t stable_pages_required_show(struct device *dev, |
295 | struct device_attribute *attr, | |
5e4c0d86 | 296 | char *buf) |
7d311cda | 297 | { |
1cb039f3 CH |
298 | dev_warn_once(dev, |
299 | "the stable_pages_required attribute has been removed. Use the stable_writes queue attribute instead.\n"); | |
5e4c0d86 | 300 | return sysfs_emit(buf, "%d\n", 0); |
7d311cda | 301 | } |
d9e1241e GKH |
302 | static DEVICE_ATTR_RO(stable_pages_required); |
303 | ||
27bbe9d4 SR |
304 | static ssize_t strict_limit_store(struct device *dev, |
305 | struct device_attribute *attr, const char *buf, size_t count) | |
306 | { | |
307 | struct backing_dev_info *bdi = dev_get_drvdata(dev); | |
308 | unsigned int strict_limit; | |
309 | ssize_t ret; | |
310 | ||
311 | ret = kstrtouint(buf, 10, &strict_limit); | |
312 | if (ret < 0) | |
313 | return ret; | |
314 | ||
315 | ret = bdi_set_strict_limit(bdi, strict_limit); | |
316 | if (!ret) | |
317 | ret = count; | |
318 | ||
319 | return ret; | |
320 | } | |
321 | ||
322 | static ssize_t strict_limit_show(struct device *dev, | |
323 | struct device_attribute *attr, char *buf) | |
324 | { | |
325 | struct backing_dev_info *bdi = dev_get_drvdata(dev); | |
326 | ||
327 | return sysfs_emit(buf, "%d\n", | |
328 | !!(bdi->capabilities & BDI_CAP_STRICTLIMIT)); | |
329 | } | |
330 | static DEVICE_ATTR_RW(strict_limit); | |
331 | ||
d9e1241e GKH |
332 | static struct attribute *bdi_dev_attrs[] = { |
333 | &dev_attr_read_ahead_kb.attr, | |
334 | &dev_attr_min_ratio.attr, | |
ad3e6dab | 335 | &dev_attr_min_ratio_fine.attr, |
d9e1241e | 336 | &dev_attr_max_ratio.attr, |
bca52dcb | 337 | &dev_attr_max_ratio_fine.attr, |
9c84819b | 338 | &dev_attr_min_bytes.attr, |
c56e049a | 339 | &dev_attr_max_bytes.attr, |
d9e1241e | 340 | &dev_attr_stable_pages_required.attr, |
27bbe9d4 | 341 | &dev_attr_strict_limit.attr, |
d9e1241e | 342 | NULL, |
cf0ca9fe | 343 | }; |
d9e1241e | 344 | ATTRIBUTE_GROUPS(bdi_dev); |
cf0ca9fe | 345 | |
b5665cf9 IO |
346 | static const struct class bdi_class = { |
347 | .name = "bdi", | |
348 | .dev_groups = bdi_dev_groups, | |
349 | }; | |
350 | ||
cf0ca9fe PZ |
351 | static __init int bdi_class_init(void) |
352 | { | |
b5665cf9 IO |
353 | int ret; |
354 | ||
355 | ret = class_register(&bdi_class); | |
356 | if (ret) | |
357 | return ret; | |
14421453 | 358 | |
76f1418b | 359 | bdi_debug_init(); |
d03f6cdc | 360 | |
cf0ca9fe PZ |
361 | return 0; |
362 | } | |
76f1418b | 363 | postcore_initcall(bdi_class_init); |
cf0ca9fe | 364 | |
26160158 JA |
365 | static int __init default_bdi_init(void) |
366 | { | |
a2b90f11 MW |
367 | bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_UNBOUND | |
368 | WQ_SYSFS, 0); | |
839a8e86 TH |
369 | if (!bdi_wq) |
370 | return -ENOMEM; | |
4bca7e80 | 371 | return 0; |
26160158 JA |
372 | } |
373 | subsys_initcall(default_bdi_init); | |
374 | ||
6467716a | 375 | /* |
f0054bb1 | 376 | * This function is used when the first inode for this wb is marked dirty. It |
6467716a AB |
377 | * wakes-up the corresponding bdi thread which should then take care of the |
378 | * periodic background write-out of dirty inodes. Since the write-out would | |
379 | * starts only 'dirty_writeback_interval' centisecs from now anyway, we just | |
380 | * set up a timer which wakes the bdi thread up later. | |
381 | * | |
382 | * Note, we wouldn't bother setting up the timer, but this function is on the | |
383 | * fast-path (used by '__mark_inode_dirty()'), so we save few context switches | |
384 | * by delaying the wake-up. | |
6ca738d6 DB |
385 | * |
386 | * We have to be careful not to postpone flush work if it is scheduled for | |
387 | * earlier. Thus we use queue_delayed_work(). | |
6467716a | 388 | */ |
f0054bb1 | 389 | void wb_wakeup_delayed(struct bdi_writeback *wb) |
6467716a AB |
390 | { |
391 | unsigned long timeout; | |
392 | ||
393 | timeout = msecs_to_jiffies(dirty_writeback_interval * 10); | |
f87904c0 | 394 | spin_lock_irq(&wb->work_lock); |
f0054bb1 TH |
395 | if (test_bit(WB_registered, &wb->state)) |
396 | queue_delayed_work(bdi_wq, &wb->dwork, timeout); | |
f87904c0 | 397 | spin_unlock_irq(&wb->work_lock); |
03ba3782 JA |
398 | } |
399 | ||
45a2966f JK |
400 | static void wb_update_bandwidth_workfn(struct work_struct *work) |
401 | { | |
402 | struct bdi_writeback *wb = container_of(to_delayed_work(work), | |
403 | struct bdi_writeback, bw_dwork); | |
404 | ||
405 | wb_update_bandwidth(wb); | |
406 | } | |
407 | ||
cfc4ba53 | 408 | /* |
a88a341a | 409 | * Initial write bandwidth: 100 MB/s |
cfc4ba53 | 410 | */ |
a88a341a | 411 | #define INIT_BW (100 << (20 - PAGE_SHIFT)) |
cfc4ba53 | 412 | |
8395cd9f | 413 | static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi, |
8c911f3d | 414 | gfp_t gfp) |
cf0ca9fe | 415 | { |
93f78d88 | 416 | int i, err; |
cf0ca9fe | 417 | |
6467716a | 418 | memset(wb, 0, sizeof(*wb)); |
f1d0b063 | 419 | |
6467716a AB |
420 | wb->bdi = bdi; |
421 | wb->last_old_flush = jiffies; | |
422 | INIT_LIST_HEAD(&wb->b_dirty); | |
423 | INIT_LIST_HEAD(&wb->b_io); | |
424 | INIT_LIST_HEAD(&wb->b_more_io); | |
0ae45f63 | 425 | INIT_LIST_HEAD(&wb->b_dirty_time); |
f758eeab | 426 | spin_lock_init(&wb->list_lock); |
66f3b8e2 | 427 | |
633a2abb | 428 | atomic_set(&wb->writeback_inodes, 0); |
a88a341a TH |
429 | wb->bw_time_stamp = jiffies; |
430 | wb->balanced_dirty_ratelimit = INIT_BW; | |
431 | wb->dirty_ratelimit = INIT_BW; | |
432 | wb->write_bandwidth = INIT_BW; | |
433 | wb->avg_write_bandwidth = INIT_BW; | |
cf0ca9fe | 434 | |
f0054bb1 TH |
435 | spin_lock_init(&wb->work_lock); |
436 | INIT_LIST_HEAD(&wb->work_list); | |
437 | INIT_DELAYED_WORK(&wb->dwork, wb_workfn); | |
45a2966f | 438 | INIT_DELAYED_WORK(&wb->bw_dwork, wb_update_bandwidth_workfn); |
b57d74af | 439 | wb->dirty_sleep = jiffies; |
c284de61 | 440 | |
8395cd9f | 441 | err = fprop_local_init_percpu(&wb->completions, gfp); |
a88a341a | 442 | if (err) |
efee1713 | 443 | return err; |
c284de61 | 444 | |
93f78d88 | 445 | for (i = 0; i < NR_WB_STAT_ITEMS; i++) { |
8395cd9f | 446 | err = percpu_counter_init(&wb->stat[i], 0, gfp); |
a13f35e8 TH |
447 | if (err) |
448 | goto out_destroy_stat; | |
93f78d88 | 449 | } |
cf0ca9fe | 450 | |
93f78d88 | 451 | return 0; |
a13f35e8 TH |
452 | |
453 | out_destroy_stat: | |
078c6c3a | 454 | while (i--) |
a13f35e8 TH |
455 | percpu_counter_destroy(&wb->stat[i]); |
456 | fprop_local_destroy_percpu(&wb->completions); | |
a13f35e8 | 457 | return err; |
cf0ca9fe | 458 | } |
cf0ca9fe | 459 | |
e8cb72b3 JK |
460 | static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb); |
461 | ||
03ba3782 JA |
462 | /* |
463 | * Remove bdi from the global list and shutdown any threads we have running | |
464 | */ | |
46100071 | 465 | static void wb_shutdown(struct bdi_writeback *wb) |
66f3b8e2 | 466 | { |
c4db59d3 | 467 | /* Make sure nobody queues further work */ |
f87904c0 | 468 | spin_lock_irq(&wb->work_lock); |
46100071 | 469 | if (!test_and_clear_bit(WB_registered, &wb->state)) { |
f87904c0 | 470 | spin_unlock_irq(&wb->work_lock); |
03ba3782 | 471 | return; |
c4db59d3 | 472 | } |
f87904c0 | 473 | spin_unlock_irq(&wb->work_lock); |
03ba3782 | 474 | |
e8cb72b3 | 475 | cgwb_remove_from_bdi_list(wb); |
03ba3782 | 476 | /* |
46100071 TH |
477 | * Drain work list and shutdown the delayed_work. !WB_registered |
478 | * tells wb_workfn() that @wb is dying and its work_list needs to | |
479 | * be drained no matter what. | |
03ba3782 | 480 | */ |
46100071 TH |
481 | mod_delayed_work(bdi_wq, &wb->dwork, 0); |
482 | flush_delayed_work(&wb->dwork); | |
483 | WARN_ON(!list_empty(&wb->work_list)); | |
45a2966f | 484 | flush_delayed_work(&wb->bw_dwork); |
46100071 TH |
485 | } |
486 | ||
f0054bb1 | 487 | static void wb_exit(struct bdi_writeback *wb) |
93f78d88 TH |
488 | { |
489 | int i; | |
490 | ||
491 | WARN_ON(delayed_work_pending(&wb->dwork)); | |
492 | ||
493 | for (i = 0; i < NR_WB_STAT_ITEMS; i++) | |
494 | percpu_counter_destroy(&wb->stat[i]); | |
6467716a | 495 | |
a88a341a TH |
496 | fprop_local_destroy_percpu(&wb->completions); |
497 | } | |
e98be2d5 | 498 | |
52ebea74 TH |
499 | #ifdef CONFIG_CGROUP_WRITEBACK |
500 | ||
501 | #include <linux/memcontrol.h> | |
502 | ||
503 | /* | |
c22d70a1 RG |
504 | * cgwb_lock protects bdi->cgwb_tree, blkcg->cgwb_list, offline_cgwbs and |
505 | * memcg->cgwb_list. bdi->cgwb_tree is also RCU protected. | |
52ebea74 TH |
506 | */ |
507 | static DEFINE_SPINLOCK(cgwb_lock); | |
f1834646 | 508 | static struct workqueue_struct *cgwb_release_wq; |
52ebea74 | 509 | |
c22d70a1 RG |
510 | static LIST_HEAD(offline_cgwbs); |
511 | static void cleanup_offline_cgwbs_workfn(struct work_struct *work); | |
512 | static DECLARE_WORK(cleanup_offline_cgwbs_work, cleanup_offline_cgwbs_workfn); | |
513 | ||
1ba1199e BL |
514 | static void cgwb_free_rcu(struct rcu_head *rcu_head) |
515 | { | |
516 | struct bdi_writeback *wb = container_of(rcu_head, | |
517 | struct bdi_writeback, rcu); | |
518 | ||
519 | percpu_ref_exit(&wb->refcnt); | |
520 | kfree(wb); | |
521 | } | |
522 | ||
52ebea74 TH |
523 | static void cgwb_release_workfn(struct work_struct *work) |
524 | { | |
525 | struct bdi_writeback *wb = container_of(work, struct bdi_writeback, | |
526 | release_work); | |
efee1713 | 527 | struct backing_dev_info *bdi = wb->bdi; |
52ebea74 | 528 | |
3ee7e869 | 529 | mutex_lock(&wb->bdi->cgwb_release_mutex); |
52ebea74 TH |
530 | wb_shutdown(wb); |
531 | ||
532 | css_put(wb->memcg_css); | |
533 | css_put(wb->blkcg_css); | |
3ee7e869 | 534 | mutex_unlock(&wb->bdi->cgwb_release_mutex); |
52ebea74 | 535 | |
d866dbf6 | 536 | /* triggers blkg destruction if no online users left */ |
397c9f46 | 537 | blkcg_unpin_online(wb->blkcg_css); |
59b57717 | 538 | |
841710aa | 539 | fprop_local_destroy_percpu(&wb->memcg_completions); |
c22d70a1 RG |
540 | |
541 | spin_lock_irq(&cgwb_lock); | |
542 | list_del(&wb->offline_node); | |
543 | spin_unlock_irq(&cgwb_lock); | |
544 | ||
52ebea74 | 545 | wb_exit(wb); |
efee1713 | 546 | bdi_put(bdi); |
f3b6a6df | 547 | WARN_ON_ONCE(!list_empty(&wb->b_attached)); |
1ba1199e | 548 | call_rcu(&wb->rcu, cgwb_free_rcu); |
52ebea74 TH |
549 | } |
550 | ||
551 | static void cgwb_release(struct percpu_ref *refcnt) | |
552 | { | |
553 | struct bdi_writeback *wb = container_of(refcnt, struct bdi_writeback, | |
554 | refcnt); | |
f1834646 | 555 | queue_work(cgwb_release_wq, &wb->release_work); |
52ebea74 TH |
556 | } |
557 | ||
558 | static void cgwb_kill(struct bdi_writeback *wb) | |
559 | { | |
560 | lockdep_assert_held(&cgwb_lock); | |
561 | ||
562 | WARN_ON(!radix_tree_delete(&wb->bdi->cgwb_tree, wb->memcg_css->id)); | |
563 | list_del(&wb->memcg_node); | |
564 | list_del(&wb->blkcg_node); | |
c22d70a1 | 565 | list_add(&wb->offline_node, &offline_cgwbs); |
52ebea74 TH |
566 | percpu_ref_kill(&wb->refcnt); |
567 | } | |
568 | ||
e8cb72b3 JK |
569 | static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb) |
570 | { | |
571 | spin_lock_irq(&cgwb_lock); | |
572 | list_del_rcu(&wb->bdi_node); | |
573 | spin_unlock_irq(&cgwb_lock); | |
574 | } | |
575 | ||
52ebea74 TH |
576 | static int cgwb_create(struct backing_dev_info *bdi, |
577 | struct cgroup_subsys_state *memcg_css, gfp_t gfp) | |
578 | { | |
579 | struct mem_cgroup *memcg; | |
580 | struct cgroup_subsys_state *blkcg_css; | |
52ebea74 TH |
581 | struct list_head *memcg_cgwb_list, *blkcg_cgwb_list; |
582 | struct bdi_writeback *wb; | |
583 | unsigned long flags; | |
584 | int ret = 0; | |
585 | ||
586 | memcg = mem_cgroup_from_css(memcg_css); | |
c165b3e3 | 587 | blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys); |
9ccc3617 | 588 | memcg_cgwb_list = &memcg->cgwb_list; |
dec223c9 | 589 | blkcg_cgwb_list = blkcg_get_cgwb_list(blkcg_css); |
52ebea74 TH |
590 | |
591 | /* look up again under lock and discard on blkcg mismatch */ | |
592 | spin_lock_irqsave(&cgwb_lock, flags); | |
593 | wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id); | |
594 | if (wb && wb->blkcg_css != blkcg_css) { | |
595 | cgwb_kill(wb); | |
596 | wb = NULL; | |
597 | } | |
598 | spin_unlock_irqrestore(&cgwb_lock, flags); | |
599 | if (wb) | |
600 | goto out_put; | |
601 | ||
602 | /* need to create a new one */ | |
603 | wb = kmalloc(sizeof(*wb), gfp); | |
0b045bd1 CJ |
604 | if (!wb) { |
605 | ret = -ENOMEM; | |
606 | goto out_put; | |
607 | } | |
52ebea74 | 608 | |
8c911f3d | 609 | ret = wb_init(wb, bdi, gfp); |
52ebea74 TH |
610 | if (ret) |
611 | goto err_free; | |
612 | ||
613 | ret = percpu_ref_init(&wb->refcnt, cgwb_release, 0, gfp); | |
614 | if (ret) | |
615 | goto err_wb_exit; | |
616 | ||
841710aa TH |
617 | ret = fprop_local_init_percpu(&wb->memcg_completions, gfp); |
618 | if (ret) | |
619 | goto err_ref_exit; | |
620 | ||
52ebea74 TH |
621 | wb->memcg_css = memcg_css; |
622 | wb->blkcg_css = blkcg_css; | |
f3b6a6df | 623 | INIT_LIST_HEAD(&wb->b_attached); |
52ebea74 TH |
624 | INIT_WORK(&wb->release_work, cgwb_release_workfn); |
625 | set_bit(WB_registered, &wb->state); | |
efee1713 | 626 | bdi_get(bdi); |
03ba3782 JA |
627 | |
628 | /* | |
52ebea74 TH |
629 | * The root wb determines the registered state of the whole bdi and |
630 | * memcg_cgwb_list and blkcg_cgwb_list's next pointers indicate | |
631 | * whether they're still online. Don't link @wb if any is dead. | |
632 | * See wb_memcg_offline() and wb_blkcg_offline(). | |
03ba3782 | 633 | */ |
52ebea74 TH |
634 | ret = -ENODEV; |
635 | spin_lock_irqsave(&cgwb_lock, flags); | |
636 | if (test_bit(WB_registered, &bdi->wb.state) && | |
637 | blkcg_cgwb_list->next && memcg_cgwb_list->next) { | |
638 | /* we might have raced another instance of this function */ | |
639 | ret = radix_tree_insert(&bdi->cgwb_tree, memcg_css->id, wb); | |
640 | if (!ret) { | |
b817525a | 641 | list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list); |
52ebea74 TH |
642 | list_add(&wb->memcg_node, memcg_cgwb_list); |
643 | list_add(&wb->blkcg_node, blkcg_cgwb_list); | |
397c9f46 | 644 | blkcg_pin_online(blkcg_css); |
52ebea74 TH |
645 | css_get(memcg_css); |
646 | css_get(blkcg_css); | |
647 | } | |
648 | } | |
649 | spin_unlock_irqrestore(&cgwb_lock, flags); | |
650 | if (ret) { | |
651 | if (ret == -EEXIST) | |
652 | ret = 0; | |
a13f35e8 | 653 | goto err_fprop_exit; |
52ebea74 TH |
654 | } |
655 | goto out_put; | |
656 | ||
841710aa | 657 | err_fprop_exit: |
efee1713 | 658 | bdi_put(bdi); |
841710aa | 659 | fprop_local_destroy_percpu(&wb->memcg_completions); |
52ebea74 TH |
660 | err_ref_exit: |
661 | percpu_ref_exit(&wb->refcnt); | |
662 | err_wb_exit: | |
663 | wb_exit(wb); | |
664 | err_free: | |
665 | kfree(wb); | |
666 | out_put: | |
667 | css_put(blkcg_css); | |
668 | return ret; | |
66f3b8e2 JA |
669 | } |
670 | ||
52ebea74 | 671 | /** |
ed288dc0 | 672 | * wb_get_lookup - get wb for a given memcg |
52ebea74 TH |
673 | * @bdi: target bdi |
674 | * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref) | |
52ebea74 | 675 | * |
ed288dc0 TH |
676 | * Try to get the wb for @memcg_css on @bdi. The returned wb has its |
677 | * refcount incremented. | |
52ebea74 TH |
678 | * |
679 | * This function uses css_get() on @memcg_css and thus expects its refcnt | |
680 | * to be positive on invocation. IOW, rcu_read_lock() protection on | |
681 | * @memcg_css isn't enough. try_get it before calling this function. | |
682 | * | |
683 | * A wb is keyed by its associated memcg. As blkcg implicitly enables | |
684 | * memcg on the default hierarchy, memcg association is guaranteed to be | |
685 | * more specific (equal or descendant to the associated blkcg) and thus can | |
686 | * identify both the memcg and blkcg associations. | |
687 | * | |
688 | * Because the blkcg associated with a memcg may change as blkcg is enabled | |
689 | * and disabled closer to root in the hierarchy, each wb keeps track of | |
690 | * both the memcg and blkcg associated with it and verifies the blkcg on | |
691 | * each lookup. On mismatch, the existing wb is discarded and a new one is | |
692 | * created. | |
693 | */ | |
ed288dc0 TH |
694 | struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi, |
695 | struct cgroup_subsys_state *memcg_css) | |
696 | { | |
697 | struct bdi_writeback *wb; | |
698 | ||
699 | if (!memcg_css->parent) | |
700 | return &bdi->wb; | |
701 | ||
702 | rcu_read_lock(); | |
703 | wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id); | |
704 | if (wb) { | |
705 | struct cgroup_subsys_state *blkcg_css; | |
706 | ||
707 | /* see whether the blkcg association has changed */ | |
708 | blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys); | |
709 | if (unlikely(wb->blkcg_css != blkcg_css || !wb_tryget(wb))) | |
710 | wb = NULL; | |
711 | css_put(blkcg_css); | |
712 | } | |
713 | rcu_read_unlock(); | |
714 | ||
715 | return wb; | |
716 | } | |
717 | ||
718 | /** | |
719 | * wb_get_create - get wb for a given memcg, create if necessary | |
720 | * @bdi: target bdi | |
721 | * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref) | |
722 | * @gfp: allocation mask to use | |
723 | * | |
724 | * Try to get the wb for @memcg_css on @bdi. If it doesn't exist, try to | |
725 | * create one. See wb_get_lookup() for more details. | |
726 | */ | |
52ebea74 TH |
727 | struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi, |
728 | struct cgroup_subsys_state *memcg_css, | |
729 | gfp_t gfp) | |
6467716a | 730 | { |
52ebea74 TH |
731 | struct bdi_writeback *wb; |
732 | ||
c1ca59a1 | 733 | might_alloc(gfp); |
52ebea74 | 734 | |
52ebea74 | 735 | do { |
ed288dc0 | 736 | wb = wb_get_lookup(bdi, memcg_css); |
52ebea74 TH |
737 | } while (!wb && !cgwb_create(bdi, memcg_css, gfp)); |
738 | ||
739 | return wb; | |
740 | } | |
6467716a | 741 | |
a13f35e8 | 742 | static int cgwb_bdi_init(struct backing_dev_info *bdi) |
52ebea74 | 743 | { |
a13f35e8 TH |
744 | int ret; |
745 | ||
52ebea74 | 746 | INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC); |
3ee7e869 | 747 | mutex_init(&bdi->cgwb_release_mutex); |
7fc5854f | 748 | init_rwsem(&bdi->wb_switch_rwsem); |
a13f35e8 | 749 | |
8c911f3d | 750 | ret = wb_init(&bdi->wb, bdi, GFP_KERNEL); |
a13f35e8 | 751 | if (!ret) { |
7d828602 | 752 | bdi->wb.memcg_css = &root_mem_cgroup->css; |
a13f35e8 TH |
753 | bdi->wb.blkcg_css = blkcg_root_css; |
754 | } | |
755 | return ret; | |
6467716a AB |
756 | } |
757 | ||
b1c51afc | 758 | static void cgwb_bdi_unregister(struct backing_dev_info *bdi) |
52ebea74 TH |
759 | { |
760 | struct radix_tree_iter iter; | |
761 | void **slot; | |
5318ce7d | 762 | struct bdi_writeback *wb; |
52ebea74 TH |
763 | |
764 | WARN_ON(test_bit(WB_registered, &bdi->wb.state)); | |
765 | ||
766 | spin_lock_irq(&cgwb_lock); | |
767 | radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0) | |
768 | cgwb_kill(*slot); | |
3ee7e869 | 769 | spin_unlock_irq(&cgwb_lock); |
5318ce7d | 770 | |
3ee7e869 JK |
771 | mutex_lock(&bdi->cgwb_release_mutex); |
772 | spin_lock_irq(&cgwb_lock); | |
5318ce7d JK |
773 | while (!list_empty(&bdi->wb_list)) { |
774 | wb = list_first_entry(&bdi->wb_list, struct bdi_writeback, | |
775 | bdi_node); | |
776 | spin_unlock_irq(&cgwb_lock); | |
777 | wb_shutdown(wb); | |
778 | spin_lock_irq(&cgwb_lock); | |
779 | } | |
52ebea74 | 780 | spin_unlock_irq(&cgwb_lock); |
3ee7e869 | 781 | mutex_unlock(&bdi->cgwb_release_mutex); |
52ebea74 TH |
782 | } |
783 | ||
c22d70a1 RG |
784 | /* |
785 | * cleanup_offline_cgwbs_workfn - try to release dying cgwbs | |
786 | * | |
787 | * Try to release dying cgwbs by switching attached inodes to the nearest | |
788 | * living ancestor's writeback. Processed wbs are placed at the end | |
789 | * of the list to guarantee the forward progress. | |
790 | */ | |
791 | static void cleanup_offline_cgwbs_workfn(struct work_struct *work) | |
792 | { | |
793 | struct bdi_writeback *wb; | |
794 | LIST_HEAD(processed); | |
795 | ||
796 | spin_lock_irq(&cgwb_lock); | |
797 | ||
798 | while (!list_empty(&offline_cgwbs)) { | |
799 | wb = list_first_entry(&offline_cgwbs, struct bdi_writeback, | |
800 | offline_node); | |
801 | list_move(&wb->offline_node, &processed); | |
802 | ||
803 | /* | |
804 | * If wb is dirty, cleaning up the writeback by switching | |
805 | * attached inodes will result in an effective removal of any | |
806 | * bandwidth restrictions, which isn't the goal. Instead, | |
807 | * it can be postponed until the next time, when all io | |
808 | * will be likely completed. If in the meantime some inodes | |
809 | * will get re-dirtied, they should be eventually switched to | |
810 | * a new cgwb. | |
811 | */ | |
812 | if (wb_has_dirty_io(wb)) | |
813 | continue; | |
814 | ||
815 | if (!wb_tryget(wb)) | |
816 | continue; | |
817 | ||
818 | spin_unlock_irq(&cgwb_lock); | |
819 | while (cleanup_offline_cgwb(wb)) | |
820 | cond_resched(); | |
821 | spin_lock_irq(&cgwb_lock); | |
822 | ||
823 | wb_put(wb); | |
824 | } | |
825 | ||
826 | if (!list_empty(&processed)) | |
827 | list_splice_tail(&processed, &offline_cgwbs); | |
828 | ||
829 | spin_unlock_irq(&cgwb_lock); | |
830 | } | |
831 | ||
52ebea74 TH |
832 | /** |
833 | * wb_memcg_offline - kill all wb's associated with a memcg being offlined | |
834 | * @memcg: memcg being offlined | |
835 | * | |
836 | * Also prevents creation of any new wb's associated with @memcg. | |
e98be2d5 | 837 | */ |
52ebea74 TH |
838 | void wb_memcg_offline(struct mem_cgroup *memcg) |
839 | { | |
9ccc3617 | 840 | struct list_head *memcg_cgwb_list = &memcg->cgwb_list; |
52ebea74 TH |
841 | struct bdi_writeback *wb, *next; |
842 | ||
843 | spin_lock_irq(&cgwb_lock); | |
844 | list_for_each_entry_safe(wb, next, memcg_cgwb_list, memcg_node) | |
845 | cgwb_kill(wb); | |
846 | memcg_cgwb_list->next = NULL; /* prevent new wb's */ | |
847 | spin_unlock_irq(&cgwb_lock); | |
c22d70a1 RG |
848 | |
849 | queue_work(system_unbound_wq, &cleanup_offline_cgwbs_work); | |
52ebea74 TH |
850 | } |
851 | ||
852 | /** | |
853 | * wb_blkcg_offline - kill all wb's associated with a blkcg being offlined | |
dec223c9 | 854 | * @css: blkcg being offlined |
52ebea74 TH |
855 | * |
856 | * Also prevents creation of any new wb's associated with @blkcg. | |
857 | */ | |
dec223c9 | 858 | void wb_blkcg_offline(struct cgroup_subsys_state *css) |
52ebea74 | 859 | { |
52ebea74 | 860 | struct bdi_writeback *wb, *next; |
dec223c9 | 861 | struct list_head *list = blkcg_get_cgwb_list(css); |
52ebea74 TH |
862 | |
863 | spin_lock_irq(&cgwb_lock); | |
dec223c9 | 864 | list_for_each_entry_safe(wb, next, list, blkcg_node) |
52ebea74 | 865 | cgwb_kill(wb); |
dec223c9 | 866 | list->next = NULL; /* prevent new wb's */ |
52ebea74 TH |
867 | spin_unlock_irq(&cgwb_lock); |
868 | } | |
869 | ||
e8cb72b3 JK |
870 | static void cgwb_bdi_register(struct backing_dev_info *bdi) |
871 | { | |
872 | spin_lock_irq(&cgwb_lock); | |
873 | list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list); | |
874 | spin_unlock_irq(&cgwb_lock); | |
875 | } | |
876 | ||
f1834646 TH |
877 | static int __init cgwb_init(void) |
878 | { | |
879 | /* | |
880 | * There can be many concurrent release work items overwhelming | |
881 | * system_wq. Put them in a separate wq and limit concurrency. | |
882 | * There's no point in executing many of these in parallel. | |
883 | */ | |
884 | cgwb_release_wq = alloc_workqueue("cgwb_release", 0, 1); | |
885 | if (!cgwb_release_wq) | |
886 | return -ENOMEM; | |
887 | ||
888 | return 0; | |
889 | } | |
890 | subsys_initcall(cgwb_init); | |
891 | ||
52ebea74 TH |
892 | #else /* CONFIG_CGROUP_WRITEBACK */ |
893 | ||
a13f35e8 TH |
894 | static int cgwb_bdi_init(struct backing_dev_info *bdi) |
895 | { | |
8c911f3d | 896 | return wb_init(&bdi->wb, bdi, GFP_KERNEL); |
a13f35e8 TH |
897 | } |
898 | ||
b1c51afc | 899 | static void cgwb_bdi_unregister(struct backing_dev_info *bdi) { } |
df23de55 | 900 | |
e8cb72b3 JK |
901 | static void cgwb_bdi_register(struct backing_dev_info *bdi) |
902 | { | |
903 | list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list); | |
904 | } | |
905 | ||
906 | static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb) | |
907 | { | |
908 | list_del_rcu(&wb->bdi_node); | |
909 | } | |
910 | ||
52ebea74 | 911 | #endif /* CONFIG_CGROUP_WRITEBACK */ |
e98be2d5 | 912 | |
4bca7e80 | 913 | int bdi_init(struct backing_dev_info *bdi) |
b2e8fb6e | 914 | { |
cf0ca9fe PZ |
915 | bdi->dev = NULL; |
916 | ||
d03f6cdc | 917 | kref_init(&bdi->refcnt); |
189d3c4a | 918 | bdi->min_ratio = 0; |
ae82291e | 919 | bdi->max_ratio = 100 * BDI_RATIO_SCALE; |
eb608e3a | 920 | bdi->max_prop_frac = FPROP_FRAC_BASE; |
66f3b8e2 | 921 | INIT_LIST_HEAD(&bdi->bdi_list); |
b817525a | 922 | INIT_LIST_HEAD(&bdi->wb_list); |
cc395d7f | 923 | init_waitqueue_head(&bdi->wb_waitq); |
03ba3782 | 924 | |
3083da7b | 925 | return cgwb_bdi_init(bdi); |
b2e8fb6e | 926 | } |
e98be2d5 | 927 | |
aef33c2f | 928 | struct backing_dev_info *bdi_alloc(int node_id) |
d03f6cdc JK |
929 | { |
930 | struct backing_dev_info *bdi; | |
931 | ||
aef33c2f | 932 | bdi = kzalloc_node(sizeof(*bdi), GFP_KERNEL, node_id); |
d03f6cdc JK |
933 | if (!bdi) |
934 | return NULL; | |
935 | ||
936 | if (bdi_init(bdi)) { | |
937 | kfree(bdi); | |
938 | return NULL; | |
939 | } | |
f56753ac | 940 | bdi->capabilities = BDI_CAP_WRITEBACK | BDI_CAP_WRITEBACK_ACCT; |
55b2598e CH |
941 | bdi->ra_pages = VM_READAHEAD_PAGES; |
942 | bdi->io_pages = VM_READAHEAD_PAGES; | |
5ed964f8 | 943 | timer_setup(&bdi->laptop_mode_wb_timer, laptop_mode_timer_fn, 0); |
d03f6cdc JK |
944 | return bdi; |
945 | } | |
aef33c2f | 946 | EXPORT_SYMBOL(bdi_alloc); |
d03f6cdc | 947 | |
34f8fe50 TH |
948 | static struct rb_node **bdi_lookup_rb_node(u64 id, struct rb_node **parentp) |
949 | { | |
950 | struct rb_node **p = &bdi_tree.rb_node; | |
951 | struct rb_node *parent = NULL; | |
952 | struct backing_dev_info *bdi; | |
953 | ||
954 | lockdep_assert_held(&bdi_lock); | |
955 | ||
956 | while (*p) { | |
957 | parent = *p; | |
958 | bdi = rb_entry(parent, struct backing_dev_info, rb_node); | |
959 | ||
960 | if (bdi->id > id) | |
961 | p = &(*p)->rb_left; | |
962 | else if (bdi->id < id) | |
963 | p = &(*p)->rb_right; | |
964 | else | |
965 | break; | |
966 | } | |
967 | ||
968 | if (parentp) | |
969 | *parentp = parent; | |
970 | return p; | |
971 | } | |
972 | ||
973 | /** | |
974 | * bdi_get_by_id - lookup and get bdi from its id | |
975 | * @id: bdi id to lookup | |
976 | * | |
977 | * Find bdi matching @id and get it. Returns NULL if the matching bdi | |
978 | * doesn't exist or is already unregistered. | |
979 | */ | |
980 | struct backing_dev_info *bdi_get_by_id(u64 id) | |
981 | { | |
982 | struct backing_dev_info *bdi = NULL; | |
983 | struct rb_node **p; | |
984 | ||
985 | spin_lock_bh(&bdi_lock); | |
986 | p = bdi_lookup_rb_node(id, NULL); | |
987 | if (*p) { | |
988 | bdi = rb_entry(*p, struct backing_dev_info, rb_node); | |
989 | bdi_get(bdi); | |
990 | } | |
991 | spin_unlock_bh(&bdi_lock); | |
992 | ||
993 | return bdi; | |
994 | } | |
995 | ||
7c4cc300 | 996 | int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args) |
46100071 | 997 | { |
46100071 | 998 | struct device *dev; |
34f8fe50 | 999 | struct rb_node *parent, **p; |
e98be2d5 | 1000 | |
46100071 TH |
1001 | if (bdi->dev) /* The driver needs to use separate queues per device */ |
1002 | return 0; | |
e98be2d5 | 1003 | |
6bd87eec | 1004 | vsnprintf(bdi->dev_name, sizeof(bdi->dev_name), fmt, args); |
b5665cf9 | 1005 | dev = device_create(&bdi_class, NULL, MKDEV(0, 0), bdi, bdi->dev_name); |
46100071 TH |
1006 | if (IS_ERR(dev)) |
1007 | return PTR_ERR(dev); | |
04fbfdc1 | 1008 | |
e8cb72b3 | 1009 | cgwb_bdi_register(bdi); |
46100071 | 1010 | bdi->dev = dev; |
b2e8fb6e | 1011 | |
6d0e4827 | 1012 | bdi_debug_register(bdi, dev_name(dev)); |
46100071 TH |
1013 | set_bit(WB_registered, &bdi->wb.state); |
1014 | ||
1015 | spin_lock_bh(&bdi_lock); | |
34f8fe50 TH |
1016 | |
1017 | bdi->id = ++bdi_id_cursor; | |
1018 | ||
1019 | p = bdi_lookup_rb_node(bdi->id, &parent); | |
1020 | rb_link_node(&bdi->rb_node, parent, p); | |
1021 | rb_insert_color(&bdi->rb_node, &bdi_tree); | |
1022 | ||
46100071 | 1023 | list_add_tail_rcu(&bdi->bdi_list, &bdi_list); |
34f8fe50 | 1024 | |
46100071 TH |
1025 | spin_unlock_bh(&bdi_lock); |
1026 | ||
1027 | trace_writeback_bdi_register(bdi); | |
1028 | return 0; | |
b2e8fb6e | 1029 | } |
baf7a616 | 1030 | |
7c4cc300 | 1031 | int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...) |
baf7a616 JK |
1032 | { |
1033 | va_list args; | |
1034 | int ret; | |
1035 | ||
1036 | va_start(args, fmt); | |
7c4cc300 | 1037 | ret = bdi_register_va(bdi, fmt, args); |
baf7a616 JK |
1038 | va_end(args); |
1039 | return ret; | |
1040 | } | |
46100071 | 1041 | EXPORT_SYMBOL(bdi_register); |
b2e8fb6e | 1042 | |
3c5d202b | 1043 | void bdi_set_owner(struct backing_dev_info *bdi, struct device *owner) |
df08c32c | 1044 | { |
3c5d202b | 1045 | WARN_ON_ONCE(bdi->owner); |
df08c32c DW |
1046 | bdi->owner = owner; |
1047 | get_device(owner); | |
df08c32c | 1048 | } |
df08c32c | 1049 | |
46100071 TH |
1050 | /* |
1051 | * Remove bdi from bdi_list, and ensure that it is no longer visible | |
1052 | */ | |
1053 | static void bdi_remove_from_list(struct backing_dev_info *bdi) | |
1054 | { | |
1055 | spin_lock_bh(&bdi_lock); | |
34f8fe50 | 1056 | rb_erase(&bdi->rb_node, &bdi_tree); |
46100071 TH |
1057 | list_del_rcu(&bdi->bdi_list); |
1058 | spin_unlock_bh(&bdi_lock); | |
b2e8fb6e | 1059 | |
46100071 TH |
1060 | synchronize_rcu_expedited(); |
1061 | } | |
cf0ca9fe | 1062 | |
b02176f3 | 1063 | void bdi_unregister(struct backing_dev_info *bdi) |
b2e8fb6e | 1064 | { |
5ed964f8 CH |
1065 | del_timer_sync(&bdi->laptop_mode_wb_timer); |
1066 | ||
f0054bb1 TH |
1067 | /* make sure nobody finds us on the bdi_list anymore */ |
1068 | bdi_remove_from_list(bdi); | |
1069 | wb_shutdown(&bdi->wb); | |
b1c51afc | 1070 | cgwb_bdi_unregister(bdi); |
7a401a97 | 1071 | |
3c376dfa ML |
1072 | /* |
1073 | * If this BDI's min ratio has been set, use bdi_set_min_ratio() to | |
1074 | * update the global bdi_min_ratio. | |
1075 | */ | |
1076 | if (bdi->min_ratio) | |
1077 | bdi_set_min_ratio(bdi, 0); | |
1078 | ||
c4db59d3 CH |
1079 | if (bdi->dev) { |
1080 | bdi_debug_unregister(bdi); | |
1081 | device_unregister(bdi->dev); | |
1082 | bdi->dev = NULL; | |
1083 | } | |
df08c32c DW |
1084 | |
1085 | if (bdi->owner) { | |
1086 | put_device(bdi->owner); | |
1087 | bdi->owner = NULL; | |
1088 | } | |
b02176f3 | 1089 | } |
c6fd3ac0 | 1090 | EXPORT_SYMBOL(bdi_unregister); |
c4db59d3 | 1091 | |
d03f6cdc JK |
1092 | static void release_bdi(struct kref *ref) |
1093 | { | |
1094 | struct backing_dev_info *bdi = | |
1095 | container_of(ref, struct backing_dev_info, refcnt); | |
1096 | ||
702f2d1e | 1097 | WARN_ON_ONCE(test_bit(WB_registered, &bdi->wb.state)); |
2e82b84c JK |
1098 | WARN_ON_ONCE(bdi->dev); |
1099 | wb_exit(&bdi->wb); | |
d03f6cdc JK |
1100 | kfree(bdi); |
1101 | } | |
1102 | ||
1103 | void bdi_put(struct backing_dev_info *bdi) | |
1104 | { | |
1105 | kref_put(&bdi->refcnt, release_bdi); | |
1106 | } | |
62bf42ad | 1107 | EXPORT_SYMBOL(bdi_put); |
d03f6cdc | 1108 | |
ccdf7741 CH |
1109 | struct backing_dev_info *inode_to_bdi(struct inode *inode) |
1110 | { | |
1111 | struct super_block *sb; | |
1112 | ||
1113 | if (!inode) | |
1114 | return &noop_backing_dev_info; | |
1115 | ||
1116 | sb = inode->i_sb; | |
1117 | #ifdef CONFIG_BLOCK | |
1118 | if (sb_is_blkdev_sb(sb)) | |
1119 | return I_BDEV(inode)->bd_disk->bdi; | |
1120 | #endif | |
1121 | return sb->s_bdi; | |
1122 | } | |
1123 | EXPORT_SYMBOL(inode_to_bdi); | |
1124 | ||
eb7ae5e0 CH |
1125 | const char *bdi_dev_name(struct backing_dev_info *bdi) |
1126 | { | |
1127 | if (!bdi || !bdi->dev) | |
1128 | return bdi_unknown_name; | |
6bd87eec | 1129 | return bdi->dev_name; |
eb7ae5e0 CH |
1130 | } |
1131 | EXPORT_SYMBOL_GPL(bdi_dev_name); |