]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
f30c2269 | 2 | * mm/page-writeback.c |
1da177e4 LT |
3 | * |
4 | * Copyright (C) 2002, Linus Torvalds. | |
04fbfdc1 | 5 | * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <[email protected]> |
1da177e4 LT |
6 | * |
7 | * Contains functions related to writing back dirty pages at the | |
8 | * address_space level. | |
9 | * | |
e1f8e874 | 10 | * 10Apr2002 Andrew Morton |
1da177e4 LT |
11 | * Initial version |
12 | */ | |
13 | ||
14 | #include <linux/kernel.h> | |
15 | #include <linux/module.h> | |
16 | #include <linux/spinlock.h> | |
17 | #include <linux/fs.h> | |
18 | #include <linux/mm.h> | |
19 | #include <linux/swap.h> | |
20 | #include <linux/slab.h> | |
21 | #include <linux/pagemap.h> | |
22 | #include <linux/writeback.h> | |
23 | #include <linux/init.h> | |
24 | #include <linux/backing-dev.h> | |
55e829af | 25 | #include <linux/task_io_accounting_ops.h> |
1da177e4 LT |
26 | #include <linux/blkdev.h> |
27 | #include <linux/mpage.h> | |
d08b3851 | 28 | #include <linux/rmap.h> |
1da177e4 LT |
29 | #include <linux/percpu.h> |
30 | #include <linux/notifier.h> | |
31 | #include <linux/smp.h> | |
32 | #include <linux/sysctl.h> | |
33 | #include <linux/cpu.h> | |
34 | #include <linux/syscalls.h> | |
cf9a2ae8 | 35 | #include <linux/buffer_head.h> |
811d736f | 36 | #include <linux/pagevec.h> |
1da177e4 LT |
37 | |
38 | /* | |
39 | * The maximum number of pages to writeout in a single bdflush/kupdate | |
1c0eeaf5 | 40 | * operation. We do this so we don't hold I_SYNC against an inode for |
1da177e4 LT |
41 | * enormous amounts of time, which would block a userspace task which has |
42 | * been forced to throttle against that inode. Also, the code reevaluates | |
43 | * the dirty each time it has written this many pages. | |
44 | */ | |
45 | #define MAX_WRITEBACK_PAGES 1024 | |
46 | ||
47 | /* | |
48 | * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited | |
49 | * will look to see if it needs to force writeback or throttling. | |
50 | */ | |
51 | static long ratelimit_pages = 32; | |
52 | ||
1da177e4 LT |
53 | /* |
54 | * When balance_dirty_pages decides that the caller needs to perform some | |
55 | * non-background writeback, this is how many pages it will attempt to write. | |
56 | * It should be somewhat larger than RATELIMIT_PAGES to ensure that reasonably | |
57 | * large amounts of I/O are submitted. | |
58 | */ | |
59 | static inline long sync_writeback_pages(void) | |
60 | { | |
61 | return ratelimit_pages + ratelimit_pages / 2; | |
62 | } | |
63 | ||
64 | /* The following parameters are exported via /proc/sys/vm */ | |
65 | ||
66 | /* | |
67 | * Start background writeback (via pdflush) at this percentage | |
68 | */ | |
07db59bd | 69 | int dirty_background_ratio = 5; |
1da177e4 | 70 | |
195cf453 BG |
71 | /* |
72 | * free highmem will not be subtracted from the total free memory | |
73 | * for calculating free ratios if vm_highmem_is_dirtyable is true | |
74 | */ | |
75 | int vm_highmem_is_dirtyable; | |
76 | ||
1da177e4 LT |
77 | /* |
78 | * The generator of dirty data starts writeback at this percentage | |
79 | */ | |
07db59bd | 80 | int vm_dirty_ratio = 10; |
1da177e4 LT |
81 | |
82 | /* | |
fd5403c7 | 83 | * The interval between `kupdate'-style writebacks, in jiffies |
1da177e4 | 84 | */ |
f6ef9438 | 85 | int dirty_writeback_interval = 5 * HZ; |
1da177e4 LT |
86 | |
87 | /* | |
fd5403c7 | 88 | * The longest number of jiffies for which data is allowed to remain dirty |
1da177e4 | 89 | */ |
f6ef9438 | 90 | int dirty_expire_interval = 30 * HZ; |
1da177e4 LT |
91 | |
92 | /* | |
93 | * Flag that makes the machine dump writes/reads and block dirtyings. | |
94 | */ | |
95 | int block_dump; | |
96 | ||
97 | /* | |
ed5b43f1 BS |
98 | * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies: |
99 | * a full sync is triggered after this time elapses without any disk activity. | |
1da177e4 LT |
100 | */ |
101 | int laptop_mode; | |
102 | ||
103 | EXPORT_SYMBOL(laptop_mode); | |
104 | ||
105 | /* End of sysctl-exported parameters */ | |
106 | ||
107 | ||
108 | static void background_writeout(unsigned long _min_pages); | |
109 | ||
04fbfdc1 PZ |
110 | /* |
111 | * Scale the writeback cache size proportional to the relative writeout speeds. | |
112 | * | |
113 | * We do this by keeping a floating proportion between BDIs, based on page | |
114 | * writeback completions [end_page_writeback()]. Those devices that write out | |
115 | * pages fastest will get the larger share, while the slower will get a smaller | |
116 | * share. | |
117 | * | |
118 | * We use page writeout completions because we are interested in getting rid of | |
119 | * dirty pages. Having them written out is the primary goal. | |
120 | * | |
121 | * We introduce a concept of time, a period over which we measure these events, | |
122 | * because demand can/will vary over time. The length of this period itself is | |
123 | * measured in page writeback completions. | |
124 | * | |
125 | */ | |
126 | static struct prop_descriptor vm_completions; | |
3e26c149 | 127 | static struct prop_descriptor vm_dirties; |
04fbfdc1 | 128 | |
04fbfdc1 PZ |
129 | /* |
130 | * couple the period to the dirty_ratio: | |
131 | * | |
132 | * period/2 ~ roundup_pow_of_two(dirty limit) | |
133 | */ | |
134 | static int calc_period_shift(void) | |
135 | { | |
136 | unsigned long dirty_total; | |
137 | ||
138 | dirty_total = (vm_dirty_ratio * determine_dirtyable_memory()) / 100; | |
139 | return 2 + ilog2(dirty_total - 1); | |
140 | } | |
141 | ||
142 | /* | |
143 | * update the period when the dirty ratio changes. | |
144 | */ | |
145 | int dirty_ratio_handler(struct ctl_table *table, int write, | |
146 | struct file *filp, void __user *buffer, size_t *lenp, | |
147 | loff_t *ppos) | |
148 | { | |
149 | int old_ratio = vm_dirty_ratio; | |
150 | int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos); | |
151 | if (ret == 0 && write && vm_dirty_ratio != old_ratio) { | |
152 | int shift = calc_period_shift(); | |
153 | prop_change_shift(&vm_completions, shift); | |
3e26c149 | 154 | prop_change_shift(&vm_dirties, shift); |
04fbfdc1 PZ |
155 | } |
156 | return ret; | |
157 | } | |
158 | ||
159 | /* | |
160 | * Increment the BDI's writeout completion count and the global writeout | |
161 | * completion count. Called from test_clear_page_writeback(). | |
162 | */ | |
163 | static inline void __bdi_writeout_inc(struct backing_dev_info *bdi) | |
164 | { | |
a42dde04 PZ |
165 | __prop_inc_percpu_max(&vm_completions, &bdi->completions, |
166 | bdi->max_prop_frac); | |
04fbfdc1 PZ |
167 | } |
168 | ||
dd5656e5 MS |
169 | void bdi_writeout_inc(struct backing_dev_info *bdi) |
170 | { | |
171 | unsigned long flags; | |
172 | ||
173 | local_irq_save(flags); | |
174 | __bdi_writeout_inc(bdi); | |
175 | local_irq_restore(flags); | |
176 | } | |
177 | EXPORT_SYMBOL_GPL(bdi_writeout_inc); | |
178 | ||
3e26c149 PZ |
179 | static inline void task_dirty_inc(struct task_struct *tsk) |
180 | { | |
181 | prop_inc_single(&vm_dirties, &tsk->dirties); | |
182 | } | |
183 | ||
04fbfdc1 PZ |
184 | /* |
185 | * Obtain an accurate fraction of the BDI's portion. | |
186 | */ | |
187 | static void bdi_writeout_fraction(struct backing_dev_info *bdi, | |
188 | long *numerator, long *denominator) | |
189 | { | |
190 | if (bdi_cap_writeback_dirty(bdi)) { | |
191 | prop_fraction_percpu(&vm_completions, &bdi->completions, | |
192 | numerator, denominator); | |
193 | } else { | |
194 | *numerator = 0; | |
195 | *denominator = 1; | |
196 | } | |
197 | } | |
198 | ||
199 | /* | |
200 | * Clip the earned share of dirty pages to that which is actually available. | |
201 | * This avoids exceeding the total dirty_limit when the floating averages | |
202 | * fluctuate too quickly. | |
203 | */ | |
204 | static void | |
205 | clip_bdi_dirty_limit(struct backing_dev_info *bdi, long dirty, long *pbdi_dirty) | |
206 | { | |
207 | long avail_dirty; | |
208 | ||
209 | avail_dirty = dirty - | |
210 | (global_page_state(NR_FILE_DIRTY) + | |
211 | global_page_state(NR_WRITEBACK) + | |
fc3ba692 MS |
212 | global_page_state(NR_UNSTABLE_NFS) + |
213 | global_page_state(NR_WRITEBACK_TEMP)); | |
04fbfdc1 PZ |
214 | |
215 | if (avail_dirty < 0) | |
216 | avail_dirty = 0; | |
217 | ||
218 | avail_dirty += bdi_stat(bdi, BDI_RECLAIMABLE) + | |
219 | bdi_stat(bdi, BDI_WRITEBACK); | |
220 | ||
221 | *pbdi_dirty = min(*pbdi_dirty, avail_dirty); | |
222 | } | |
223 | ||
3e26c149 PZ |
224 | static inline void task_dirties_fraction(struct task_struct *tsk, |
225 | long *numerator, long *denominator) | |
226 | { | |
227 | prop_fraction_single(&vm_dirties, &tsk->dirties, | |
228 | numerator, denominator); | |
229 | } | |
230 | ||
231 | /* | |
232 | * scale the dirty limit | |
233 | * | |
234 | * task specific dirty limit: | |
235 | * | |
236 | * dirty -= (dirty/8) * p_{t} | |
237 | */ | |
f61eaf9f | 238 | static void task_dirty_limit(struct task_struct *tsk, long *pdirty) |
3e26c149 PZ |
239 | { |
240 | long numerator, denominator; | |
241 | long dirty = *pdirty; | |
242 | u64 inv = dirty >> 3; | |
243 | ||
244 | task_dirties_fraction(tsk, &numerator, &denominator); | |
245 | inv *= numerator; | |
246 | do_div(inv, denominator); | |
247 | ||
248 | dirty -= inv; | |
249 | if (dirty < *pdirty/2) | |
250 | dirty = *pdirty/2; | |
251 | ||
252 | *pdirty = dirty; | |
253 | } | |
254 | ||
189d3c4a PZ |
255 | /* |
256 | * | |
257 | */ | |
258 | static DEFINE_SPINLOCK(bdi_lock); | |
259 | static unsigned int bdi_min_ratio; | |
260 | ||
261 | int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio) | |
262 | { | |
263 | int ret = 0; | |
264 | unsigned long flags; | |
265 | ||
266 | spin_lock_irqsave(&bdi_lock, flags); | |
a42dde04 | 267 | if (min_ratio > bdi->max_ratio) { |
189d3c4a | 268 | ret = -EINVAL; |
a42dde04 PZ |
269 | } else { |
270 | min_ratio -= bdi->min_ratio; | |
271 | if (bdi_min_ratio + min_ratio < 100) { | |
272 | bdi_min_ratio += min_ratio; | |
273 | bdi->min_ratio += min_ratio; | |
274 | } else { | |
275 | ret = -EINVAL; | |
276 | } | |
277 | } | |
278 | spin_unlock_irqrestore(&bdi_lock, flags); | |
279 | ||
280 | return ret; | |
281 | } | |
282 | ||
283 | int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio) | |
284 | { | |
285 | unsigned long flags; | |
286 | int ret = 0; | |
287 | ||
288 | if (max_ratio > 100) | |
289 | return -EINVAL; | |
290 | ||
291 | spin_lock_irqsave(&bdi_lock, flags); | |
292 | if (bdi->min_ratio > max_ratio) { | |
293 | ret = -EINVAL; | |
294 | } else { | |
295 | bdi->max_ratio = max_ratio; | |
296 | bdi->max_prop_frac = (PROP_FRAC_BASE * max_ratio) / 100; | |
297 | } | |
189d3c4a PZ |
298 | spin_unlock_irqrestore(&bdi_lock, flags); |
299 | ||
300 | return ret; | |
301 | } | |
a42dde04 | 302 | EXPORT_SYMBOL(bdi_set_max_ratio); |
189d3c4a | 303 | |
1da177e4 LT |
304 | /* |
305 | * Work out the current dirty-memory clamping and background writeout | |
306 | * thresholds. | |
307 | * | |
308 | * The main aim here is to lower them aggressively if there is a lot of mapped | |
309 | * memory around. To avoid stressing page reclaim with lots of unreclaimable | |
310 | * pages. It is better to clamp down on writers than to start swapping, and | |
311 | * performing lots of scanning. | |
312 | * | |
313 | * We only allow 1/2 of the currently-unmapped memory to be dirtied. | |
314 | * | |
315 | * We don't permit the clamping level to fall below 5% - that is getting rather | |
316 | * excessive. | |
317 | * | |
318 | * We make sure that the background writeout level is below the adjusted | |
319 | * clamping level. | |
320 | */ | |
1b424464 CL |
321 | |
322 | static unsigned long highmem_dirtyable_memory(unsigned long total) | |
323 | { | |
324 | #ifdef CONFIG_HIGHMEM | |
325 | int node; | |
326 | unsigned long x = 0; | |
327 | ||
37b07e41 | 328 | for_each_node_state(node, N_HIGH_MEMORY) { |
1b424464 CL |
329 | struct zone *z = |
330 | &NODE_DATA(node)->node_zones[ZONE_HIGHMEM]; | |
331 | ||
4f98a2fe | 332 | x += zone_page_state(z, NR_FREE_PAGES) + zone_lru_pages(z); |
1b424464 CL |
333 | } |
334 | /* | |
335 | * Make sure that the number of highmem pages is never larger | |
336 | * than the number of the total dirtyable memory. This can only | |
337 | * occur in very strange VM situations but we want to make sure | |
338 | * that this does not occur. | |
339 | */ | |
340 | return min(x, total); | |
341 | #else | |
342 | return 0; | |
343 | #endif | |
344 | } | |
345 | ||
3eefae99 SR |
346 | /** |
347 | * determine_dirtyable_memory - amount of memory that may be used | |
348 | * | |
349 | * Returns the numebr of pages that can currently be freed and used | |
350 | * by the kernel for direct mappings. | |
351 | */ | |
352 | unsigned long determine_dirtyable_memory(void) | |
1b424464 CL |
353 | { |
354 | unsigned long x; | |
355 | ||
4f98a2fe | 356 | x = global_page_state(NR_FREE_PAGES) + global_lru_pages(); |
195cf453 BG |
357 | |
358 | if (!vm_highmem_is_dirtyable) | |
359 | x -= highmem_dirtyable_memory(x); | |
360 | ||
1b424464 CL |
361 | return x + 1; /* Ensure that we never return 0 */ |
362 | } | |
363 | ||
cf0ca9fe | 364 | void |
04fbfdc1 PZ |
365 | get_dirty_limits(long *pbackground, long *pdirty, long *pbdi_dirty, |
366 | struct backing_dev_info *bdi) | |
1da177e4 LT |
367 | { |
368 | int background_ratio; /* Percentages */ | |
369 | int dirty_ratio; | |
1da177e4 LT |
370 | long background; |
371 | long dirty; | |
1b424464 | 372 | unsigned long available_memory = determine_dirtyable_memory(); |
1da177e4 LT |
373 | struct task_struct *tsk; |
374 | ||
1da177e4 | 375 | dirty_ratio = vm_dirty_ratio; |
1da177e4 LT |
376 | if (dirty_ratio < 5) |
377 | dirty_ratio = 5; | |
378 | ||
379 | background_ratio = dirty_background_ratio; | |
380 | if (background_ratio >= dirty_ratio) | |
381 | background_ratio = dirty_ratio / 2; | |
382 | ||
383 | background = (background_ratio * available_memory) / 100; | |
384 | dirty = (dirty_ratio * available_memory) / 100; | |
385 | tsk = current; | |
386 | if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) { | |
387 | background += background / 4; | |
388 | dirty += dirty / 4; | |
389 | } | |
390 | *pbackground = background; | |
391 | *pdirty = dirty; | |
04fbfdc1 PZ |
392 | |
393 | if (bdi) { | |
189d3c4a | 394 | u64 bdi_dirty; |
04fbfdc1 PZ |
395 | long numerator, denominator; |
396 | ||
397 | /* | |
398 | * Calculate this BDI's share of the dirty ratio. | |
399 | */ | |
400 | bdi_writeout_fraction(bdi, &numerator, &denominator); | |
401 | ||
189d3c4a | 402 | bdi_dirty = (dirty * (100 - bdi_min_ratio)) / 100; |
04fbfdc1 PZ |
403 | bdi_dirty *= numerator; |
404 | do_div(bdi_dirty, denominator); | |
189d3c4a | 405 | bdi_dirty += (dirty * bdi->min_ratio) / 100; |
a42dde04 PZ |
406 | if (bdi_dirty > (dirty * bdi->max_ratio) / 100) |
407 | bdi_dirty = dirty * bdi->max_ratio / 100; | |
04fbfdc1 PZ |
408 | |
409 | *pbdi_dirty = bdi_dirty; | |
410 | clip_bdi_dirty_limit(bdi, dirty, pbdi_dirty); | |
3e26c149 | 411 | task_dirty_limit(current, pbdi_dirty); |
04fbfdc1 | 412 | } |
1da177e4 LT |
413 | } |
414 | ||
415 | /* | |
416 | * balance_dirty_pages() must be called by processes which are generating dirty | |
417 | * data. It looks at the number of dirty pages in the machine and will force | |
418 | * the caller to perform writeback if the system is over `vm_dirty_ratio'. | |
419 | * If we're over `background_thresh' then pdflush is woken to perform some | |
420 | * writeout. | |
421 | */ | |
422 | static void balance_dirty_pages(struct address_space *mapping) | |
423 | { | |
5fce25a9 PZ |
424 | long nr_reclaimable, bdi_nr_reclaimable; |
425 | long nr_writeback, bdi_nr_writeback; | |
1da177e4 LT |
426 | long background_thresh; |
427 | long dirty_thresh; | |
04fbfdc1 | 428 | long bdi_thresh; |
1da177e4 LT |
429 | unsigned long pages_written = 0; |
430 | unsigned long write_chunk = sync_writeback_pages(); | |
431 | ||
432 | struct backing_dev_info *bdi = mapping->backing_dev_info; | |
433 | ||
434 | for (;;) { | |
435 | struct writeback_control wbc = { | |
436 | .bdi = bdi, | |
437 | .sync_mode = WB_SYNC_NONE, | |
438 | .older_than_this = NULL, | |
439 | .nr_to_write = write_chunk, | |
111ebb6e | 440 | .range_cyclic = 1, |
1da177e4 LT |
441 | }; |
442 | ||
04fbfdc1 PZ |
443 | get_dirty_limits(&background_thresh, &dirty_thresh, |
444 | &bdi_thresh, bdi); | |
5fce25a9 PZ |
445 | |
446 | nr_reclaimable = global_page_state(NR_FILE_DIRTY) + | |
447 | global_page_state(NR_UNSTABLE_NFS); | |
448 | nr_writeback = global_page_state(NR_WRITEBACK); | |
449 | ||
04fbfdc1 PZ |
450 | bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE); |
451 | bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK); | |
5fce25a9 | 452 | |
04fbfdc1 PZ |
453 | if (bdi_nr_reclaimable + bdi_nr_writeback <= bdi_thresh) |
454 | break; | |
1da177e4 | 455 | |
5fce25a9 PZ |
456 | /* |
457 | * Throttle it only when the background writeback cannot | |
458 | * catch-up. This avoids (excessively) small writeouts | |
459 | * when the bdi limits are ramping up. | |
460 | */ | |
461 | if (nr_reclaimable + nr_writeback < | |
462 | (background_thresh + dirty_thresh) / 2) | |
463 | break; | |
464 | ||
04fbfdc1 PZ |
465 | if (!bdi->dirty_exceeded) |
466 | bdi->dirty_exceeded = 1; | |
1da177e4 LT |
467 | |
468 | /* Note: nr_reclaimable denotes nr_dirty + nr_unstable. | |
469 | * Unstable writes are a feature of certain networked | |
470 | * filesystems (i.e. NFS) in which data may have been | |
471 | * written to the server's write cache, but has not yet | |
472 | * been flushed to permanent storage. | |
473 | */ | |
04fbfdc1 | 474 | if (bdi_nr_reclaimable) { |
1da177e4 | 475 | writeback_inodes(&wbc); |
1da177e4 | 476 | pages_written += write_chunk - wbc.nr_to_write; |
04fbfdc1 PZ |
477 | get_dirty_limits(&background_thresh, &dirty_thresh, |
478 | &bdi_thresh, bdi); | |
479 | } | |
480 | ||
481 | /* | |
482 | * In order to avoid the stacked BDI deadlock we need | |
483 | * to ensure we accurately count the 'dirty' pages when | |
484 | * the threshold is low. | |
485 | * | |
486 | * Otherwise it would be possible to get thresh+n pages | |
487 | * reported dirty, even though there are thresh-m pages | |
488 | * actually dirty; with m+n sitting in the percpu | |
489 | * deltas. | |
490 | */ | |
491 | if (bdi_thresh < 2*bdi_stat_error(bdi)) { | |
492 | bdi_nr_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE); | |
493 | bdi_nr_writeback = bdi_stat_sum(bdi, BDI_WRITEBACK); | |
494 | } else if (bdi_nr_reclaimable) { | |
495 | bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE); | |
496 | bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK); | |
1da177e4 | 497 | } |
04fbfdc1 PZ |
498 | |
499 | if (bdi_nr_reclaimable + bdi_nr_writeback <= bdi_thresh) | |
500 | break; | |
501 | if (pages_written >= write_chunk) | |
502 | break; /* We've done our duty */ | |
503 | ||
3fcfab16 | 504 | congestion_wait(WRITE, HZ/10); |
1da177e4 LT |
505 | } |
506 | ||
04fbfdc1 PZ |
507 | if (bdi_nr_reclaimable + bdi_nr_writeback < bdi_thresh && |
508 | bdi->dirty_exceeded) | |
509 | bdi->dirty_exceeded = 0; | |
1da177e4 LT |
510 | |
511 | if (writeback_in_progress(bdi)) | |
512 | return; /* pdflush is already working this queue */ | |
513 | ||
514 | /* | |
515 | * In laptop mode, we wait until hitting the higher threshold before | |
516 | * starting background writeout, and then write out all the way down | |
517 | * to the lower threshold. So slow writers cause minimal disk activity. | |
518 | * | |
519 | * In normal mode, we start background writeout at the lower | |
520 | * background_thresh, to keep the amount of dirty memory low. | |
521 | */ | |
522 | if ((laptop_mode && pages_written) || | |
04fbfdc1 PZ |
523 | (!laptop_mode && (global_page_state(NR_FILE_DIRTY) |
524 | + global_page_state(NR_UNSTABLE_NFS) | |
525 | > background_thresh))) | |
1da177e4 LT |
526 | pdflush_operation(background_writeout, 0); |
527 | } | |
528 | ||
a200ee18 | 529 | void set_page_dirty_balance(struct page *page, int page_mkwrite) |
edc79b2a | 530 | { |
a200ee18 | 531 | if (set_page_dirty(page) || page_mkwrite) { |
edc79b2a PZ |
532 | struct address_space *mapping = page_mapping(page); |
533 | ||
534 | if (mapping) | |
535 | balance_dirty_pages_ratelimited(mapping); | |
536 | } | |
537 | } | |
538 | ||
1da177e4 | 539 | /** |
fa5a734e | 540 | * balance_dirty_pages_ratelimited_nr - balance dirty memory state |
67be2dd1 | 541 | * @mapping: address_space which was dirtied |
a580290c | 542 | * @nr_pages_dirtied: number of pages which the caller has just dirtied |
1da177e4 LT |
543 | * |
544 | * Processes which are dirtying memory should call in here once for each page | |
545 | * which was newly dirtied. The function will periodically check the system's | |
546 | * dirty state and will initiate writeback if needed. | |
547 | * | |
548 | * On really big machines, get_writeback_state is expensive, so try to avoid | |
549 | * calling it too often (ratelimiting). But once we're over the dirty memory | |
550 | * limit we decrease the ratelimiting by a lot, to prevent individual processes | |
551 | * from overshooting the limit by (ratelimit_pages) each. | |
552 | */ | |
fa5a734e AM |
553 | void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, |
554 | unsigned long nr_pages_dirtied) | |
1da177e4 | 555 | { |
fa5a734e AM |
556 | static DEFINE_PER_CPU(unsigned long, ratelimits) = 0; |
557 | unsigned long ratelimit; | |
558 | unsigned long *p; | |
1da177e4 LT |
559 | |
560 | ratelimit = ratelimit_pages; | |
04fbfdc1 | 561 | if (mapping->backing_dev_info->dirty_exceeded) |
1da177e4 LT |
562 | ratelimit = 8; |
563 | ||
564 | /* | |
565 | * Check the rate limiting. Also, we do not want to throttle real-time | |
566 | * tasks in balance_dirty_pages(). Period. | |
567 | */ | |
fa5a734e AM |
568 | preempt_disable(); |
569 | p = &__get_cpu_var(ratelimits); | |
570 | *p += nr_pages_dirtied; | |
571 | if (unlikely(*p >= ratelimit)) { | |
572 | *p = 0; | |
573 | preempt_enable(); | |
1da177e4 LT |
574 | balance_dirty_pages(mapping); |
575 | return; | |
576 | } | |
fa5a734e | 577 | preempt_enable(); |
1da177e4 | 578 | } |
fa5a734e | 579 | EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr); |
1da177e4 | 580 | |
232ea4d6 | 581 | void throttle_vm_writeout(gfp_t gfp_mask) |
1da177e4 | 582 | { |
1da177e4 LT |
583 | long background_thresh; |
584 | long dirty_thresh; | |
585 | ||
586 | for ( ; ; ) { | |
04fbfdc1 | 587 | get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL); |
1da177e4 LT |
588 | |
589 | /* | |
590 | * Boost the allowable dirty threshold a bit for page | |
591 | * allocators so they don't get DoS'ed by heavy writers | |
592 | */ | |
593 | dirty_thresh += dirty_thresh / 10; /* wheeee... */ | |
594 | ||
c24f21bd CL |
595 | if (global_page_state(NR_UNSTABLE_NFS) + |
596 | global_page_state(NR_WRITEBACK) <= dirty_thresh) | |
597 | break; | |
3fcfab16 | 598 | congestion_wait(WRITE, HZ/10); |
369f2389 FW |
599 | |
600 | /* | |
601 | * The caller might hold locks which can prevent IO completion | |
602 | * or progress in the filesystem. So we cannot just sit here | |
603 | * waiting for IO to complete. | |
604 | */ | |
605 | if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO)) | |
606 | break; | |
1da177e4 LT |
607 | } |
608 | } | |
609 | ||
1da177e4 LT |
610 | /* |
611 | * writeback at least _min_pages, and keep writing until the amount of dirty | |
612 | * memory is less than the background threshold, or until we're all clean. | |
613 | */ | |
614 | static void background_writeout(unsigned long _min_pages) | |
615 | { | |
616 | long min_pages = _min_pages; | |
617 | struct writeback_control wbc = { | |
618 | .bdi = NULL, | |
619 | .sync_mode = WB_SYNC_NONE, | |
620 | .older_than_this = NULL, | |
621 | .nr_to_write = 0, | |
622 | .nonblocking = 1, | |
111ebb6e | 623 | .range_cyclic = 1, |
1da177e4 LT |
624 | }; |
625 | ||
626 | for ( ; ; ) { | |
1da177e4 LT |
627 | long background_thresh; |
628 | long dirty_thresh; | |
629 | ||
04fbfdc1 | 630 | get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL); |
c24f21bd CL |
631 | if (global_page_state(NR_FILE_DIRTY) + |
632 | global_page_state(NR_UNSTABLE_NFS) < background_thresh | |
1da177e4 LT |
633 | && min_pages <= 0) |
634 | break; | |
8bc3be27 | 635 | wbc.more_io = 0; |
1da177e4 LT |
636 | wbc.encountered_congestion = 0; |
637 | wbc.nr_to_write = MAX_WRITEBACK_PAGES; | |
638 | wbc.pages_skipped = 0; | |
639 | writeback_inodes(&wbc); | |
640 | min_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; | |
641 | if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) { | |
642 | /* Wrote less than expected */ | |
8bc3be27 FW |
643 | if (wbc.encountered_congestion || wbc.more_io) |
644 | congestion_wait(WRITE, HZ/10); | |
645 | else | |
1da177e4 LT |
646 | break; |
647 | } | |
648 | } | |
649 | } | |
650 | ||
651 | /* | |
652 | * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back | |
653 | * the whole world. Returns 0 if a pdflush thread was dispatched. Returns | |
654 | * -1 if all pdflush threads were busy. | |
655 | */ | |
687a21ce | 656 | int wakeup_pdflush(long nr_pages) |
1da177e4 | 657 | { |
c24f21bd CL |
658 | if (nr_pages == 0) |
659 | nr_pages = global_page_state(NR_FILE_DIRTY) + | |
660 | global_page_state(NR_UNSTABLE_NFS); | |
1da177e4 LT |
661 | return pdflush_operation(background_writeout, nr_pages); |
662 | } | |
663 | ||
664 | static void wb_timer_fn(unsigned long unused); | |
665 | static void laptop_timer_fn(unsigned long unused); | |
666 | ||
8d06afab IM |
667 | static DEFINE_TIMER(wb_timer, wb_timer_fn, 0, 0); |
668 | static DEFINE_TIMER(laptop_mode_wb_timer, laptop_timer_fn, 0, 0); | |
1da177e4 LT |
669 | |
670 | /* | |
671 | * Periodic writeback of "old" data. | |
672 | * | |
673 | * Define "old": the first time one of an inode's pages is dirtied, we mark the | |
674 | * dirtying-time in the inode's address_space. So this periodic writeback code | |
675 | * just walks the superblock inode list, writing back any inodes which are | |
676 | * older than a specific point in time. | |
677 | * | |
f6ef9438 BS |
678 | * Try to run once per dirty_writeback_interval. But if a writeback event |
679 | * takes longer than a dirty_writeback_interval interval, then leave a | |
1da177e4 LT |
680 | * one-second gap. |
681 | * | |
682 | * older_than_this takes precedence over nr_to_write. So we'll only write back | |
683 | * all dirty pages if they are all attached to "old" mappings. | |
684 | */ | |
685 | static void wb_kupdate(unsigned long arg) | |
686 | { | |
687 | unsigned long oldest_jif; | |
688 | unsigned long start_jif; | |
689 | unsigned long next_jif; | |
690 | long nr_to_write; | |
1da177e4 LT |
691 | struct writeback_control wbc = { |
692 | .bdi = NULL, | |
693 | .sync_mode = WB_SYNC_NONE, | |
694 | .older_than_this = &oldest_jif, | |
695 | .nr_to_write = 0, | |
696 | .nonblocking = 1, | |
697 | .for_kupdate = 1, | |
111ebb6e | 698 | .range_cyclic = 1, |
1da177e4 LT |
699 | }; |
700 | ||
701 | sync_supers(); | |
702 | ||
f6ef9438 | 703 | oldest_jif = jiffies - dirty_expire_interval; |
1da177e4 | 704 | start_jif = jiffies; |
f6ef9438 | 705 | next_jif = start_jif + dirty_writeback_interval; |
c24f21bd CL |
706 | nr_to_write = global_page_state(NR_FILE_DIRTY) + |
707 | global_page_state(NR_UNSTABLE_NFS) + | |
1da177e4 LT |
708 | (inodes_stat.nr_inodes - inodes_stat.nr_unused); |
709 | while (nr_to_write > 0) { | |
8bc3be27 | 710 | wbc.more_io = 0; |
1da177e4 LT |
711 | wbc.encountered_congestion = 0; |
712 | wbc.nr_to_write = MAX_WRITEBACK_PAGES; | |
713 | writeback_inodes(&wbc); | |
714 | if (wbc.nr_to_write > 0) { | |
8bc3be27 | 715 | if (wbc.encountered_congestion || wbc.more_io) |
3fcfab16 | 716 | congestion_wait(WRITE, HZ/10); |
1da177e4 LT |
717 | else |
718 | break; /* All the old data is written */ | |
719 | } | |
720 | nr_to_write -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; | |
721 | } | |
722 | if (time_before(next_jif, jiffies + HZ)) | |
723 | next_jif = jiffies + HZ; | |
f6ef9438 | 724 | if (dirty_writeback_interval) |
1da177e4 LT |
725 | mod_timer(&wb_timer, next_jif); |
726 | } | |
727 | ||
728 | /* | |
729 | * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs | |
730 | */ | |
731 | int dirty_writeback_centisecs_handler(ctl_table *table, int write, | |
3e733f07 | 732 | struct file *file, void __user *buffer, size_t *length, loff_t *ppos) |
1da177e4 | 733 | { |
f6ef9438 | 734 | proc_dointvec_userhz_jiffies(table, write, file, buffer, length, ppos); |
3e733f07 AM |
735 | if (dirty_writeback_interval) |
736 | mod_timer(&wb_timer, jiffies + dirty_writeback_interval); | |
737 | else | |
1da177e4 | 738 | del_timer(&wb_timer); |
1da177e4 LT |
739 | return 0; |
740 | } | |
741 | ||
742 | static void wb_timer_fn(unsigned long unused) | |
743 | { | |
744 | if (pdflush_operation(wb_kupdate, 0) < 0) | |
745 | mod_timer(&wb_timer, jiffies + HZ); /* delay 1 second */ | |
746 | } | |
747 | ||
748 | static void laptop_flush(unsigned long unused) | |
749 | { | |
750 | sys_sync(); | |
751 | } | |
752 | ||
753 | static void laptop_timer_fn(unsigned long unused) | |
754 | { | |
755 | pdflush_operation(laptop_flush, 0); | |
756 | } | |
757 | ||
758 | /* | |
759 | * We've spun up the disk and we're in laptop mode: schedule writeback | |
760 | * of all dirty data a few seconds from now. If the flush is already scheduled | |
761 | * then push it back - the user is still using the disk. | |
762 | */ | |
763 | void laptop_io_completion(void) | |
764 | { | |
ed5b43f1 | 765 | mod_timer(&laptop_mode_wb_timer, jiffies + laptop_mode); |
1da177e4 LT |
766 | } |
767 | ||
768 | /* | |
769 | * We're in laptop mode and we've just synced. The sync's writes will have | |
770 | * caused another writeback to be scheduled by laptop_io_completion. | |
771 | * Nothing needs to be written back anymore, so we unschedule the writeback. | |
772 | */ | |
773 | void laptop_sync_completion(void) | |
774 | { | |
775 | del_timer(&laptop_mode_wb_timer); | |
776 | } | |
777 | ||
778 | /* | |
779 | * If ratelimit_pages is too high then we can get into dirty-data overload | |
780 | * if a large number of processes all perform writes at the same time. | |
781 | * If it is too low then SMP machines will call the (expensive) | |
782 | * get_writeback_state too often. | |
783 | * | |
784 | * Here we set ratelimit_pages to a level which ensures that when all CPUs are | |
785 | * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory | |
786 | * thresholds before writeback cuts in. | |
787 | * | |
788 | * But the limit should not be set too high. Because it also controls the | |
789 | * amount of memory which the balance_dirty_pages() caller has to write back. | |
790 | * If this is too large then the caller will block on the IO queue all the | |
791 | * time. So limit it to four megabytes - the balance_dirty_pages() caller | |
792 | * will write six megabyte chunks, max. | |
793 | */ | |
794 | ||
2d1d43f6 | 795 | void writeback_set_ratelimit(void) |
1da177e4 | 796 | { |
40c99aae | 797 | ratelimit_pages = vm_total_pages / (num_online_cpus() * 32); |
1da177e4 LT |
798 | if (ratelimit_pages < 16) |
799 | ratelimit_pages = 16; | |
800 | if (ratelimit_pages * PAGE_CACHE_SIZE > 4096 * 1024) | |
801 | ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE; | |
802 | } | |
803 | ||
26c2143b | 804 | static int __cpuinit |
1da177e4 LT |
805 | ratelimit_handler(struct notifier_block *self, unsigned long u, void *v) |
806 | { | |
2d1d43f6 | 807 | writeback_set_ratelimit(); |
aa0f0303 | 808 | return NOTIFY_DONE; |
1da177e4 LT |
809 | } |
810 | ||
74b85f37 | 811 | static struct notifier_block __cpuinitdata ratelimit_nb = { |
1da177e4 LT |
812 | .notifier_call = ratelimit_handler, |
813 | .next = NULL, | |
814 | }; | |
815 | ||
816 | /* | |
dc6e29da LT |
817 | * Called early on to tune the page writeback dirty limits. |
818 | * | |
819 | * We used to scale dirty pages according to how total memory | |
820 | * related to pages that could be allocated for buffers (by | |
821 | * comparing nr_free_buffer_pages() to vm_total_pages. | |
822 | * | |
823 | * However, that was when we used "dirty_ratio" to scale with | |
824 | * all memory, and we don't do that any more. "dirty_ratio" | |
825 | * is now applied to total non-HIGHPAGE memory (by subtracting | |
826 | * totalhigh_pages from vm_total_pages), and as such we can't | |
827 | * get into the old insane situation any more where we had | |
828 | * large amounts of dirty pages compared to a small amount of | |
829 | * non-HIGHMEM memory. | |
830 | * | |
831 | * But we might still want to scale the dirty_ratio by how | |
832 | * much memory the box has.. | |
1da177e4 LT |
833 | */ |
834 | void __init page_writeback_init(void) | |
835 | { | |
04fbfdc1 PZ |
836 | int shift; |
837 | ||
f6ef9438 | 838 | mod_timer(&wb_timer, jiffies + dirty_writeback_interval); |
2d1d43f6 | 839 | writeback_set_ratelimit(); |
1da177e4 | 840 | register_cpu_notifier(&ratelimit_nb); |
04fbfdc1 PZ |
841 | |
842 | shift = calc_period_shift(); | |
843 | prop_descriptor_init(&vm_completions, shift); | |
3e26c149 | 844 | prop_descriptor_init(&vm_dirties, shift); |
1da177e4 LT |
845 | } |
846 | ||
811d736f | 847 | /** |
0ea97180 | 848 | * write_cache_pages - walk the list of dirty pages of the given address space and write all of them. |
811d736f DH |
849 | * @mapping: address space structure to write |
850 | * @wbc: subtract the number of written pages from *@wbc->nr_to_write | |
0ea97180 MS |
851 | * @writepage: function called for each page |
852 | * @data: data passed to writepage function | |
811d736f | 853 | * |
0ea97180 | 854 | * If a page is already under I/O, write_cache_pages() skips it, even |
811d736f DH |
855 | * if it's dirty. This is desirable behaviour for memory-cleaning writeback, |
856 | * but it is INCORRECT for data-integrity system calls such as fsync(). fsync() | |
857 | * and msync() need to guarantee that all the data which was dirty at the time | |
858 | * the call was made get new I/O started against them. If wbc->sync_mode is | |
859 | * WB_SYNC_ALL then we were called for data integrity and we must wait for | |
860 | * existing IO to complete. | |
811d736f | 861 | */ |
0ea97180 MS |
862 | int write_cache_pages(struct address_space *mapping, |
863 | struct writeback_control *wbc, writepage_t writepage, | |
864 | void *data) | |
811d736f DH |
865 | { |
866 | struct backing_dev_info *bdi = mapping->backing_dev_info; | |
867 | int ret = 0; | |
868 | int done = 0; | |
811d736f DH |
869 | struct pagevec pvec; |
870 | int nr_pages; | |
871 | pgoff_t index; | |
872 | pgoff_t end; /* Inclusive */ | |
873 | int scanned = 0; | |
874 | int range_whole = 0; | |
17bc6c30 | 875 | long nr_to_write = wbc->nr_to_write; |
811d736f DH |
876 | |
877 | if (wbc->nonblocking && bdi_write_congested(bdi)) { | |
878 | wbc->encountered_congestion = 1; | |
879 | return 0; | |
880 | } | |
881 | ||
811d736f DH |
882 | pagevec_init(&pvec, 0); |
883 | if (wbc->range_cyclic) { | |
884 | index = mapping->writeback_index; /* Start from prev offset */ | |
885 | end = -1; | |
886 | } else { | |
887 | index = wbc->range_start >> PAGE_CACHE_SHIFT; | |
888 | end = wbc->range_end >> PAGE_CACHE_SHIFT; | |
889 | if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) | |
890 | range_whole = 1; | |
891 | scanned = 1; | |
892 | } | |
893 | retry: | |
894 | while (!done && (index <= end) && | |
895 | (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, | |
896 | PAGECACHE_TAG_DIRTY, | |
897 | min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) { | |
898 | unsigned i; | |
899 | ||
900 | scanned = 1; | |
901 | for (i = 0; i < nr_pages; i++) { | |
902 | struct page *page = pvec.pages[i]; | |
903 | ||
904 | /* | |
905 | * At this point we hold neither mapping->tree_lock nor | |
906 | * lock on the page itself: the page may be truncated or | |
907 | * invalidated (changing page->mapping to NULL), or even | |
908 | * swizzled back from swapper_space to tmpfs file | |
909 | * mapping | |
910 | */ | |
911 | lock_page(page); | |
912 | ||
913 | if (unlikely(page->mapping != mapping)) { | |
914 | unlock_page(page); | |
915 | continue; | |
916 | } | |
917 | ||
918 | if (!wbc->range_cyclic && page->index > end) { | |
919 | done = 1; | |
920 | unlock_page(page); | |
921 | continue; | |
922 | } | |
923 | ||
924 | if (wbc->sync_mode != WB_SYNC_NONE) | |
925 | wait_on_page_writeback(page); | |
926 | ||
927 | if (PageWriteback(page) || | |
928 | !clear_page_dirty_for_io(page)) { | |
929 | unlock_page(page); | |
930 | continue; | |
931 | } | |
932 | ||
0ea97180 | 933 | ret = (*writepage)(page, wbc, data); |
811d736f | 934 | |
e4230030 | 935 | if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) { |
811d736f | 936 | unlock_page(page); |
e4230030 AM |
937 | ret = 0; |
938 | } | |
17bc6c30 | 939 | if (ret || (--nr_to_write <= 0)) |
811d736f DH |
940 | done = 1; |
941 | if (wbc->nonblocking && bdi_write_congested(bdi)) { | |
942 | wbc->encountered_congestion = 1; | |
943 | done = 1; | |
944 | } | |
945 | } | |
946 | pagevec_release(&pvec); | |
947 | cond_resched(); | |
948 | } | |
949 | if (!scanned && !done) { | |
950 | /* | |
951 | * We hit the last page and there is more work to be done: wrap | |
952 | * back to the start of the file | |
953 | */ | |
954 | scanned = 1; | |
955 | index = 0; | |
956 | goto retry; | |
957 | } | |
17bc6c30 AK |
958 | if (!wbc->no_nrwrite_index_update) { |
959 | if (wbc->range_cyclic || (range_whole && nr_to_write > 0)) | |
960 | mapping->writeback_index = index; | |
961 | wbc->nr_to_write = nr_to_write; | |
962 | } | |
06d6cf69 | 963 | |
811d736f DH |
964 | return ret; |
965 | } | |
0ea97180 MS |
966 | EXPORT_SYMBOL(write_cache_pages); |
967 | ||
968 | /* | |
969 | * Function used by generic_writepages to call the real writepage | |
970 | * function and set the mapping flags on error | |
971 | */ | |
972 | static int __writepage(struct page *page, struct writeback_control *wbc, | |
973 | void *data) | |
974 | { | |
975 | struct address_space *mapping = data; | |
976 | int ret = mapping->a_ops->writepage(page, wbc); | |
977 | mapping_set_error(mapping, ret); | |
978 | return ret; | |
979 | } | |
980 | ||
981 | /** | |
982 | * generic_writepages - walk the list of dirty pages of the given address space and writepage() all of them. | |
983 | * @mapping: address space structure to write | |
984 | * @wbc: subtract the number of written pages from *@wbc->nr_to_write | |
985 | * | |
986 | * This is a library function, which implements the writepages() | |
987 | * address_space_operation. | |
988 | */ | |
989 | int generic_writepages(struct address_space *mapping, | |
990 | struct writeback_control *wbc) | |
991 | { | |
992 | /* deal with chardevs and other special file */ | |
993 | if (!mapping->a_ops->writepage) | |
994 | return 0; | |
995 | ||
996 | return write_cache_pages(mapping, wbc, __writepage, mapping); | |
997 | } | |
811d736f DH |
998 | |
999 | EXPORT_SYMBOL(generic_writepages); | |
1000 | ||
1da177e4 LT |
1001 | int do_writepages(struct address_space *mapping, struct writeback_control *wbc) |
1002 | { | |
22905f77 AM |
1003 | int ret; |
1004 | ||
1da177e4 LT |
1005 | if (wbc->nr_to_write <= 0) |
1006 | return 0; | |
22905f77 | 1007 | wbc->for_writepages = 1; |
1da177e4 | 1008 | if (mapping->a_ops->writepages) |
d08b3851 | 1009 | ret = mapping->a_ops->writepages(mapping, wbc); |
22905f77 AM |
1010 | else |
1011 | ret = generic_writepages(mapping, wbc); | |
1012 | wbc->for_writepages = 0; | |
1013 | return ret; | |
1da177e4 LT |
1014 | } |
1015 | ||
1016 | /** | |
1017 | * write_one_page - write out a single page and optionally wait on I/O | |
67be2dd1 MW |
1018 | * @page: the page to write |
1019 | * @wait: if true, wait on writeout | |
1da177e4 LT |
1020 | * |
1021 | * The page must be locked by the caller and will be unlocked upon return. | |
1022 | * | |
1023 | * write_one_page() returns a negative error code if I/O failed. | |
1024 | */ | |
1025 | int write_one_page(struct page *page, int wait) | |
1026 | { | |
1027 | struct address_space *mapping = page->mapping; | |
1028 | int ret = 0; | |
1029 | struct writeback_control wbc = { | |
1030 | .sync_mode = WB_SYNC_ALL, | |
1031 | .nr_to_write = 1, | |
1032 | }; | |
1033 | ||
1034 | BUG_ON(!PageLocked(page)); | |
1035 | ||
1036 | if (wait) | |
1037 | wait_on_page_writeback(page); | |
1038 | ||
1039 | if (clear_page_dirty_for_io(page)) { | |
1040 | page_cache_get(page); | |
1041 | ret = mapping->a_ops->writepage(page, &wbc); | |
1042 | if (ret == 0 && wait) { | |
1043 | wait_on_page_writeback(page); | |
1044 | if (PageError(page)) | |
1045 | ret = -EIO; | |
1046 | } | |
1047 | page_cache_release(page); | |
1048 | } else { | |
1049 | unlock_page(page); | |
1050 | } | |
1051 | return ret; | |
1052 | } | |
1053 | EXPORT_SYMBOL(write_one_page); | |
1054 | ||
76719325 KC |
1055 | /* |
1056 | * For address_spaces which do not use buffers nor write back. | |
1057 | */ | |
1058 | int __set_page_dirty_no_writeback(struct page *page) | |
1059 | { | |
1060 | if (!PageDirty(page)) | |
1061 | SetPageDirty(page); | |
1062 | return 0; | |
1063 | } | |
1064 | ||
1da177e4 LT |
1065 | /* |
1066 | * For address_spaces which do not use buffers. Just tag the page as dirty in | |
1067 | * its radix tree. | |
1068 | * | |
1069 | * This is also used when a single buffer is being dirtied: we want to set the | |
1070 | * page dirty in that case, but not all the buffers. This is a "bottom-up" | |
1071 | * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying. | |
1072 | * | |
1073 | * Most callers have locked the page, which pins the address_space in memory. | |
1074 | * But zap_pte_range() does not lock the page, however in that case the | |
1075 | * mapping is pinned by the vma's ->vm_file reference. | |
1076 | * | |
1077 | * We take care to handle the case where the page was truncated from the | |
183ff22b | 1078 | * mapping by re-checking page_mapping() inside tree_lock. |
1da177e4 LT |
1079 | */ |
1080 | int __set_page_dirty_nobuffers(struct page *page) | |
1081 | { | |
1da177e4 LT |
1082 | if (!TestSetPageDirty(page)) { |
1083 | struct address_space *mapping = page_mapping(page); | |
1084 | struct address_space *mapping2; | |
1085 | ||
8c08540f AM |
1086 | if (!mapping) |
1087 | return 1; | |
1088 | ||
19fd6231 | 1089 | spin_lock_irq(&mapping->tree_lock); |
8c08540f AM |
1090 | mapping2 = page_mapping(page); |
1091 | if (mapping2) { /* Race with truncate? */ | |
1092 | BUG_ON(mapping2 != mapping); | |
787d2214 | 1093 | WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page)); |
55e829af | 1094 | if (mapping_cap_account_dirty(mapping)) { |
8c08540f | 1095 | __inc_zone_page_state(page, NR_FILE_DIRTY); |
c9e51e41 PZ |
1096 | __inc_bdi_stat(mapping->backing_dev_info, |
1097 | BDI_RECLAIMABLE); | |
55e829af AM |
1098 | task_io_account_write(PAGE_CACHE_SIZE); |
1099 | } | |
8c08540f AM |
1100 | radix_tree_tag_set(&mapping->page_tree, |
1101 | page_index(page), PAGECACHE_TAG_DIRTY); | |
1102 | } | |
19fd6231 | 1103 | spin_unlock_irq(&mapping->tree_lock); |
8c08540f AM |
1104 | if (mapping->host) { |
1105 | /* !PageAnon && !swapper_space */ | |
1106 | __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); | |
1da177e4 | 1107 | } |
4741c9fd | 1108 | return 1; |
1da177e4 | 1109 | } |
4741c9fd | 1110 | return 0; |
1da177e4 LT |
1111 | } |
1112 | EXPORT_SYMBOL(__set_page_dirty_nobuffers); | |
1113 | ||
1114 | /* | |
1115 | * When a writepage implementation decides that it doesn't want to write this | |
1116 | * page for some reason, it should redirty the locked page via | |
1117 | * redirty_page_for_writepage() and it should then unlock the page and return 0 | |
1118 | */ | |
1119 | int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page) | |
1120 | { | |
1121 | wbc->pages_skipped++; | |
1122 | return __set_page_dirty_nobuffers(page); | |
1123 | } | |
1124 | EXPORT_SYMBOL(redirty_page_for_writepage); | |
1125 | ||
1126 | /* | |
1127 | * If the mapping doesn't provide a set_page_dirty a_op, then | |
1128 | * just fall through and assume that it wants buffer_heads. | |
1129 | */ | |
3e26c149 | 1130 | static int __set_page_dirty(struct page *page) |
1da177e4 LT |
1131 | { |
1132 | struct address_space *mapping = page_mapping(page); | |
1133 | ||
1134 | if (likely(mapping)) { | |
1135 | int (*spd)(struct page *) = mapping->a_ops->set_page_dirty; | |
9361401e DH |
1136 | #ifdef CONFIG_BLOCK |
1137 | if (!spd) | |
1138 | spd = __set_page_dirty_buffers; | |
1139 | #endif | |
1140 | return (*spd)(page); | |
1da177e4 | 1141 | } |
4741c9fd AM |
1142 | if (!PageDirty(page)) { |
1143 | if (!TestSetPageDirty(page)) | |
1144 | return 1; | |
1145 | } | |
1da177e4 LT |
1146 | return 0; |
1147 | } | |
3e26c149 | 1148 | |
920c7a5d | 1149 | int set_page_dirty(struct page *page) |
3e26c149 PZ |
1150 | { |
1151 | int ret = __set_page_dirty(page); | |
1152 | if (ret) | |
1153 | task_dirty_inc(current); | |
1154 | return ret; | |
1155 | } | |
1da177e4 LT |
1156 | EXPORT_SYMBOL(set_page_dirty); |
1157 | ||
1158 | /* | |
1159 | * set_page_dirty() is racy if the caller has no reference against | |
1160 | * page->mapping->host, and if the page is unlocked. This is because another | |
1161 | * CPU could truncate the page off the mapping and then free the mapping. | |
1162 | * | |
1163 | * Usually, the page _is_ locked, or the caller is a user-space process which | |
1164 | * holds a reference on the inode by having an open file. | |
1165 | * | |
1166 | * In other cases, the page should be locked before running set_page_dirty(). | |
1167 | */ | |
1168 | int set_page_dirty_lock(struct page *page) | |
1169 | { | |
1170 | int ret; | |
1171 | ||
db37648c | 1172 | lock_page_nosync(page); |
1da177e4 LT |
1173 | ret = set_page_dirty(page); |
1174 | unlock_page(page); | |
1175 | return ret; | |
1176 | } | |
1177 | EXPORT_SYMBOL(set_page_dirty_lock); | |
1178 | ||
1da177e4 LT |
1179 | /* |
1180 | * Clear a page's dirty flag, while caring for dirty memory accounting. | |
1181 | * Returns true if the page was previously dirty. | |
1182 | * | |
1183 | * This is for preparing to put the page under writeout. We leave the page | |
1184 | * tagged as dirty in the radix tree so that a concurrent write-for-sync | |
1185 | * can discover it via a PAGECACHE_TAG_DIRTY walk. The ->writepage | |
1186 | * implementation will run either set_page_writeback() or set_page_dirty(), | |
1187 | * at which stage we bring the page's dirty flag and radix-tree dirty tag | |
1188 | * back into sync. | |
1189 | * | |
1190 | * This incoherency between the page's dirty flag and radix-tree tag is | |
1191 | * unfortunate, but it only exists while the page is locked. | |
1192 | */ | |
1193 | int clear_page_dirty_for_io(struct page *page) | |
1194 | { | |
1195 | struct address_space *mapping = page_mapping(page); | |
1196 | ||
79352894 NP |
1197 | BUG_ON(!PageLocked(page)); |
1198 | ||
fe3cba17 | 1199 | ClearPageReclaim(page); |
7658cc28 LT |
1200 | if (mapping && mapping_cap_account_dirty(mapping)) { |
1201 | /* | |
1202 | * Yes, Virginia, this is indeed insane. | |
1203 | * | |
1204 | * We use this sequence to make sure that | |
1205 | * (a) we account for dirty stats properly | |
1206 | * (b) we tell the low-level filesystem to | |
1207 | * mark the whole page dirty if it was | |
1208 | * dirty in a pagetable. Only to then | |
1209 | * (c) clean the page again and return 1 to | |
1210 | * cause the writeback. | |
1211 | * | |
1212 | * This way we avoid all nasty races with the | |
1213 | * dirty bit in multiple places and clearing | |
1214 | * them concurrently from different threads. | |
1215 | * | |
1216 | * Note! Normally the "set_page_dirty(page)" | |
1217 | * has no effect on the actual dirty bit - since | |
1218 | * that will already usually be set. But we | |
1219 | * need the side effects, and it can help us | |
1220 | * avoid races. | |
1221 | * | |
1222 | * We basically use the page "master dirty bit" | |
1223 | * as a serialization point for all the different | |
1224 | * threads doing their things. | |
7658cc28 LT |
1225 | */ |
1226 | if (page_mkclean(page)) | |
1227 | set_page_dirty(page); | |
79352894 NP |
1228 | /* |
1229 | * We carefully synchronise fault handlers against | |
1230 | * installing a dirty pte and marking the page dirty | |
1231 | * at this point. We do this by having them hold the | |
1232 | * page lock at some point after installing their | |
1233 | * pte, but before marking the page dirty. | |
1234 | * Pages are always locked coming in here, so we get | |
1235 | * the desired exclusion. See mm/memory.c:do_wp_page() | |
1236 | * for more comments. | |
1237 | */ | |
7658cc28 | 1238 | if (TestClearPageDirty(page)) { |
8c08540f | 1239 | dec_zone_page_state(page, NR_FILE_DIRTY); |
c9e51e41 PZ |
1240 | dec_bdi_stat(mapping->backing_dev_info, |
1241 | BDI_RECLAIMABLE); | |
7658cc28 | 1242 | return 1; |
1da177e4 | 1243 | } |
7658cc28 | 1244 | return 0; |
1da177e4 | 1245 | } |
7658cc28 | 1246 | return TestClearPageDirty(page); |
1da177e4 | 1247 | } |
58bb01a9 | 1248 | EXPORT_SYMBOL(clear_page_dirty_for_io); |
1da177e4 LT |
1249 | |
1250 | int test_clear_page_writeback(struct page *page) | |
1251 | { | |
1252 | struct address_space *mapping = page_mapping(page); | |
1253 | int ret; | |
1254 | ||
1255 | if (mapping) { | |
69cb51d1 | 1256 | struct backing_dev_info *bdi = mapping->backing_dev_info; |
1da177e4 LT |
1257 | unsigned long flags; |
1258 | ||
19fd6231 | 1259 | spin_lock_irqsave(&mapping->tree_lock, flags); |
1da177e4 | 1260 | ret = TestClearPageWriteback(page); |
69cb51d1 | 1261 | if (ret) { |
1da177e4 LT |
1262 | radix_tree_tag_clear(&mapping->page_tree, |
1263 | page_index(page), | |
1264 | PAGECACHE_TAG_WRITEBACK); | |
e4ad08fe | 1265 | if (bdi_cap_account_writeback(bdi)) { |
69cb51d1 | 1266 | __dec_bdi_stat(bdi, BDI_WRITEBACK); |
04fbfdc1 PZ |
1267 | __bdi_writeout_inc(bdi); |
1268 | } | |
69cb51d1 | 1269 | } |
19fd6231 | 1270 | spin_unlock_irqrestore(&mapping->tree_lock, flags); |
1da177e4 LT |
1271 | } else { |
1272 | ret = TestClearPageWriteback(page); | |
1273 | } | |
d688abf5 AM |
1274 | if (ret) |
1275 | dec_zone_page_state(page, NR_WRITEBACK); | |
1da177e4 LT |
1276 | return ret; |
1277 | } | |
1278 | ||
1279 | int test_set_page_writeback(struct page *page) | |
1280 | { | |
1281 | struct address_space *mapping = page_mapping(page); | |
1282 | int ret; | |
1283 | ||
1284 | if (mapping) { | |
69cb51d1 | 1285 | struct backing_dev_info *bdi = mapping->backing_dev_info; |
1da177e4 LT |
1286 | unsigned long flags; |
1287 | ||
19fd6231 | 1288 | spin_lock_irqsave(&mapping->tree_lock, flags); |
1da177e4 | 1289 | ret = TestSetPageWriteback(page); |
69cb51d1 | 1290 | if (!ret) { |
1da177e4 LT |
1291 | radix_tree_tag_set(&mapping->page_tree, |
1292 | page_index(page), | |
1293 | PAGECACHE_TAG_WRITEBACK); | |
e4ad08fe | 1294 | if (bdi_cap_account_writeback(bdi)) |
69cb51d1 PZ |
1295 | __inc_bdi_stat(bdi, BDI_WRITEBACK); |
1296 | } | |
1da177e4 LT |
1297 | if (!PageDirty(page)) |
1298 | radix_tree_tag_clear(&mapping->page_tree, | |
1299 | page_index(page), | |
1300 | PAGECACHE_TAG_DIRTY); | |
19fd6231 | 1301 | spin_unlock_irqrestore(&mapping->tree_lock, flags); |
1da177e4 LT |
1302 | } else { |
1303 | ret = TestSetPageWriteback(page); | |
1304 | } | |
d688abf5 AM |
1305 | if (!ret) |
1306 | inc_zone_page_state(page, NR_WRITEBACK); | |
1da177e4 LT |
1307 | return ret; |
1308 | ||
1309 | } | |
1310 | EXPORT_SYMBOL(test_set_page_writeback); | |
1311 | ||
1312 | /* | |
00128188 | 1313 | * Return true if any of the pages in the mapping are marked with the |
1da177e4 LT |
1314 | * passed tag. |
1315 | */ | |
1316 | int mapping_tagged(struct address_space *mapping, int tag) | |
1317 | { | |
1da177e4 | 1318 | int ret; |
00128188 | 1319 | rcu_read_lock(); |
1da177e4 | 1320 | ret = radix_tree_tagged(&mapping->page_tree, tag); |
00128188 | 1321 | rcu_read_unlock(); |
1da177e4 LT |
1322 | return ret; |
1323 | } | |
1324 | EXPORT_SYMBOL(mapping_tagged); |