]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * mm/truncate.c - code for taking down pages from address_spaces | |
3 | * | |
4 | * Copyright (C) 2002, Linus Torvalds | |
5 | * | |
e1f8e874 | 6 | * 10Sep2002 Andrew Morton |
1da177e4 LT |
7 | * Initial version. |
8 | */ | |
9 | ||
10 | #include <linux/kernel.h> | |
4af3c9cc | 11 | #include <linux/backing-dev.h> |
1da177e4 | 12 | #include <linux/mm.h> |
0fd0e6b0 | 13 | #include <linux/swap.h> |
1da177e4 LT |
14 | #include <linux/module.h> |
15 | #include <linux/pagemap.h> | |
01f2705d | 16 | #include <linux/highmem.h> |
1da177e4 | 17 | #include <linux/pagevec.h> |
e08748ce | 18 | #include <linux/task_io_accounting_ops.h> |
1da177e4 | 19 | #include <linux/buffer_head.h> /* grr. try_to_release_page, |
aaa4059b | 20 | do_invalidatepage */ |
ba470de4 | 21 | #include "internal.h" |
1da177e4 LT |
22 | |
23 | ||
cf9a2ae8 | 24 | /** |
28bc44d7 | 25 | * do_invalidatepage - invalidate part or all of a page |
cf9a2ae8 DH |
26 | * @page: the page which is affected |
27 | * @offset: the index of the truncation point | |
28 | * | |
29 | * do_invalidatepage() is called when all or part of the page has become | |
30 | * invalidated by a truncate operation. | |
31 | * | |
32 | * do_invalidatepage() does not have to release all buffers, but it must | |
33 | * ensure that no dirty buffer is left outside @offset and that no I/O | |
34 | * is underway against any of the blocks which are outside the truncation | |
35 | * point. Because the caller is about to free (and possibly reuse) those | |
36 | * blocks on-disk. | |
37 | */ | |
38 | void do_invalidatepage(struct page *page, unsigned long offset) | |
39 | { | |
40 | void (*invalidatepage)(struct page *, unsigned long); | |
41 | invalidatepage = page->mapping->a_ops->invalidatepage; | |
9361401e | 42 | #ifdef CONFIG_BLOCK |
cf9a2ae8 DH |
43 | if (!invalidatepage) |
44 | invalidatepage = block_invalidatepage; | |
9361401e | 45 | #endif |
cf9a2ae8 DH |
46 | if (invalidatepage) |
47 | (*invalidatepage)(page, offset); | |
48 | } | |
49 | ||
1da177e4 LT |
50 | static inline void truncate_partial_page(struct page *page, unsigned partial) |
51 | { | |
eebd2aa3 | 52 | zero_user_segment(page, partial, PAGE_CACHE_SIZE); |
266cf658 | 53 | if (page_has_private(page)) |
1da177e4 LT |
54 | do_invalidatepage(page, partial); |
55 | } | |
56 | ||
ecdfc978 LT |
57 | /* |
58 | * This cancels just the dirty bit on the kernel page itself, it | |
59 | * does NOT actually remove dirty bits on any mmap's that may be | |
60 | * around. It also leaves the page tagged dirty, so any sync | |
61 | * activity will still find it on the dirty lists, and in particular, | |
62 | * clear_page_dirty_for_io() will still look at the dirty bits in | |
63 | * the VM. | |
64 | * | |
65 | * Doing this should *normally* only ever be done when a page | |
66 | * is truncated, and is not actually mapped anywhere at all. However, | |
67 | * fs/buffer.c does this when it notices that somebody has cleaned | |
68 | * out all the buffers on a page without actually doing it through | |
69 | * the VM. Can you say "ext3 is horribly ugly"? Tought you could. | |
70 | */ | |
fba2591b LT |
71 | void cancel_dirty_page(struct page *page, unsigned int account_size) |
72 | { | |
8368e328 LT |
73 | if (TestClearPageDirty(page)) { |
74 | struct address_space *mapping = page->mapping; | |
75 | if (mapping && mapping_cap_account_dirty(mapping)) { | |
76 | dec_zone_page_state(page, NR_FILE_DIRTY); | |
c9e51e41 PZ |
77 | dec_bdi_stat(mapping->backing_dev_info, |
78 | BDI_RECLAIMABLE); | |
8368e328 LT |
79 | if (account_size) |
80 | task_io_account_cancelled_write(account_size); | |
81 | } | |
3e67c098 | 82 | } |
fba2591b | 83 | } |
8368e328 | 84 | EXPORT_SYMBOL(cancel_dirty_page); |
fba2591b | 85 | |
1da177e4 LT |
86 | /* |
87 | * If truncate cannot remove the fs-private metadata from the page, the page | |
62e1c553 | 88 | * becomes orphaned. It will be left on the LRU and may even be mapped into |
54cb8821 | 89 | * user pagetables if we're racing with filemap_fault(). |
1da177e4 LT |
90 | * |
91 | * We need to bale out if page->mapping is no longer equal to the original | |
92 | * mapping. This happens a) when the VM reclaimed the page while we waited on | |
fc0ecff6 | 93 | * its lock, b) when a concurrent invalidate_mapping_pages got there first and |
1da177e4 LT |
94 | * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space. |
95 | */ | |
96 | static void | |
97 | truncate_complete_page(struct address_space *mapping, struct page *page) | |
98 | { | |
99 | if (page->mapping != mapping) | |
100 | return; | |
101 | ||
266cf658 | 102 | if (page_has_private(page)) |
1da177e4 LT |
103 | do_invalidatepage(page, 0); |
104 | ||
a2b34564 BS |
105 | cancel_dirty_page(page, PAGE_CACHE_SIZE); |
106 | ||
ba470de4 | 107 | clear_page_mlock(page); |
787d2214 | 108 | remove_from_page_cache(page); |
1da177e4 | 109 | ClearPageMappedToDisk(page); |
1da177e4 LT |
110 | page_cache_release(page); /* pagecache ref */ |
111 | } | |
112 | ||
113 | /* | |
fc0ecff6 | 114 | * This is for invalidate_mapping_pages(). That function can be called at |
1da177e4 | 115 | * any time, and is not supposed to throw away dirty pages. But pages can |
0fd0e6b0 NP |
116 | * be marked dirty at any time too, so use remove_mapping which safely |
117 | * discards clean, unused pages. | |
1da177e4 LT |
118 | * |
119 | * Returns non-zero if the page was successfully invalidated. | |
120 | */ | |
121 | static int | |
122 | invalidate_complete_page(struct address_space *mapping, struct page *page) | |
123 | { | |
0fd0e6b0 NP |
124 | int ret; |
125 | ||
1da177e4 LT |
126 | if (page->mapping != mapping) |
127 | return 0; | |
128 | ||
266cf658 | 129 | if (page_has_private(page) && !try_to_release_page(page, 0)) |
1da177e4 LT |
130 | return 0; |
131 | ||
ba470de4 | 132 | clear_page_mlock(page); |
0fd0e6b0 | 133 | ret = remove_mapping(mapping, page); |
0fd0e6b0 NP |
134 | |
135 | return ret; | |
1da177e4 LT |
136 | } |
137 | ||
138 | /** | |
0643245f | 139 | * truncate_inode_pages - truncate range of pages specified by start & end byte offsets |
1da177e4 LT |
140 | * @mapping: mapping to truncate |
141 | * @lstart: offset from which to truncate | |
d7339071 | 142 | * @lend: offset to which to truncate |
1da177e4 | 143 | * |
d7339071 HR |
144 | * Truncate the page cache, removing the pages that are between |
145 | * specified offsets (and zeroing out partial page | |
146 | * (if lstart is not page aligned)). | |
1da177e4 LT |
147 | * |
148 | * Truncate takes two passes - the first pass is nonblocking. It will not | |
149 | * block on page locks and it will not block on writeback. The second pass | |
150 | * will wait. This is to prevent as much IO as possible in the affected region. | |
151 | * The first pass will remove most pages, so the search cost of the second pass | |
152 | * is low. | |
153 | * | |
154 | * When looking at page->index outside the page lock we need to be careful to | |
155 | * copy it into a local to avoid races (it could change at any time). | |
156 | * | |
157 | * We pass down the cache-hot hint to the page freeing code. Even if the | |
158 | * mapping is large, it is probably the case that the final pages are the most | |
159 | * recently touched, and freeing happens in ascending file offset order. | |
1da177e4 | 160 | */ |
d7339071 HR |
161 | void truncate_inode_pages_range(struct address_space *mapping, |
162 | loff_t lstart, loff_t lend) | |
1da177e4 LT |
163 | { |
164 | const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; | |
d7339071 | 165 | pgoff_t end; |
1da177e4 LT |
166 | const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1); |
167 | struct pagevec pvec; | |
168 | pgoff_t next; | |
169 | int i; | |
170 | ||
171 | if (mapping->nrpages == 0) | |
172 | return; | |
173 | ||
d7339071 HR |
174 | BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1)); |
175 | end = (lend >> PAGE_CACHE_SHIFT); | |
176 | ||
1da177e4 LT |
177 | pagevec_init(&pvec, 0); |
178 | next = start; | |
d7339071 HR |
179 | while (next <= end && |
180 | pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { | |
1da177e4 LT |
181 | for (i = 0; i < pagevec_count(&pvec); i++) { |
182 | struct page *page = pvec.pages[i]; | |
183 | pgoff_t page_index = page->index; | |
184 | ||
d7339071 HR |
185 | if (page_index > end) { |
186 | next = page_index; | |
187 | break; | |
188 | } | |
189 | ||
1da177e4 LT |
190 | if (page_index > next) |
191 | next = page_index; | |
192 | next++; | |
529ae9aa | 193 | if (!trylock_page(page)) |
1da177e4 LT |
194 | continue; |
195 | if (PageWriteback(page)) { | |
196 | unlock_page(page); | |
197 | continue; | |
198 | } | |
d00806b1 NP |
199 | if (page_mapped(page)) { |
200 | unmap_mapping_range(mapping, | |
201 | (loff_t)page_index<<PAGE_CACHE_SHIFT, | |
202 | PAGE_CACHE_SIZE, 0); | |
203 | } | |
1da177e4 LT |
204 | truncate_complete_page(mapping, page); |
205 | unlock_page(page); | |
206 | } | |
207 | pagevec_release(&pvec); | |
208 | cond_resched(); | |
209 | } | |
210 | ||
211 | if (partial) { | |
212 | struct page *page = find_lock_page(mapping, start - 1); | |
213 | if (page) { | |
214 | wait_on_page_writeback(page); | |
215 | truncate_partial_page(page, partial); | |
216 | unlock_page(page); | |
217 | page_cache_release(page); | |
218 | } | |
219 | } | |
220 | ||
221 | next = start; | |
222 | for ( ; ; ) { | |
223 | cond_resched(); | |
224 | if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { | |
225 | if (next == start) | |
226 | break; | |
227 | next = start; | |
228 | continue; | |
229 | } | |
d7339071 HR |
230 | if (pvec.pages[0]->index > end) { |
231 | pagevec_release(&pvec); | |
232 | break; | |
233 | } | |
1da177e4 LT |
234 | for (i = 0; i < pagevec_count(&pvec); i++) { |
235 | struct page *page = pvec.pages[i]; | |
236 | ||
d7339071 HR |
237 | if (page->index > end) |
238 | break; | |
1da177e4 LT |
239 | lock_page(page); |
240 | wait_on_page_writeback(page); | |
d00806b1 NP |
241 | if (page_mapped(page)) { |
242 | unmap_mapping_range(mapping, | |
243 | (loff_t)page->index<<PAGE_CACHE_SHIFT, | |
244 | PAGE_CACHE_SIZE, 0); | |
245 | } | |
1da177e4 LT |
246 | if (page->index > next) |
247 | next = page->index; | |
248 | next++; | |
249 | truncate_complete_page(mapping, page); | |
250 | unlock_page(page); | |
251 | } | |
252 | pagevec_release(&pvec); | |
253 | } | |
254 | } | |
d7339071 | 255 | EXPORT_SYMBOL(truncate_inode_pages_range); |
1da177e4 | 256 | |
d7339071 HR |
257 | /** |
258 | * truncate_inode_pages - truncate *all* the pages from an offset | |
259 | * @mapping: mapping to truncate | |
260 | * @lstart: offset from which to truncate | |
261 | * | |
1b1dcc1b | 262 | * Called under (and serialised by) inode->i_mutex. |
d7339071 HR |
263 | */ |
264 | void truncate_inode_pages(struct address_space *mapping, loff_t lstart) | |
265 | { | |
266 | truncate_inode_pages_range(mapping, lstart, (loff_t)-1); | |
267 | } | |
1da177e4 LT |
268 | EXPORT_SYMBOL(truncate_inode_pages); |
269 | ||
fc9a07e7 AM |
270 | unsigned long __invalidate_mapping_pages(struct address_space *mapping, |
271 | pgoff_t start, pgoff_t end, bool be_atomic) | |
1da177e4 LT |
272 | { |
273 | struct pagevec pvec; | |
274 | pgoff_t next = start; | |
275 | unsigned long ret = 0; | |
276 | int i; | |
277 | ||
278 | pagevec_init(&pvec, 0); | |
279 | while (next <= end && | |
280 | pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { | |
281 | for (i = 0; i < pagevec_count(&pvec); i++) { | |
282 | struct page *page = pvec.pages[i]; | |
e0f23603 N |
283 | pgoff_t index; |
284 | int lock_failed; | |
1da177e4 | 285 | |
529ae9aa | 286 | lock_failed = !trylock_page(page); |
e0f23603 N |
287 | |
288 | /* | |
289 | * We really shouldn't be looking at the ->index of an | |
290 | * unlocked page. But we're not allowed to lock these | |
291 | * pages. So we rely upon nobody altering the ->index | |
292 | * of this (pinned-by-us) page. | |
293 | */ | |
294 | index = page->index; | |
295 | if (index > next) | |
296 | next = index; | |
1da177e4 | 297 | next++; |
e0f23603 N |
298 | if (lock_failed) |
299 | continue; | |
300 | ||
1da177e4 LT |
301 | if (PageDirty(page) || PageWriteback(page)) |
302 | goto unlock; | |
303 | if (page_mapped(page)) | |
304 | goto unlock; | |
305 | ret += invalidate_complete_page(mapping, page); | |
306 | unlock: | |
307 | unlock_page(page); | |
308 | if (next > end) | |
309 | break; | |
310 | } | |
311 | pagevec_release(&pvec); | |
fc9a07e7 AM |
312 | if (likely(!be_atomic)) |
313 | cond_resched(); | |
1da177e4 LT |
314 | } |
315 | return ret; | |
316 | } | |
fc9a07e7 AM |
317 | |
318 | /** | |
319 | * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode | |
320 | * @mapping: the address_space which holds the pages to invalidate | |
321 | * @start: the offset 'from' which to invalidate | |
322 | * @end: the offset 'to' which to invalidate (inclusive) | |
323 | * | |
324 | * This function only removes the unlocked pages, if you want to | |
325 | * remove all the pages of one inode, you must call truncate_inode_pages. | |
326 | * | |
327 | * invalidate_mapping_pages() will not block on IO activity. It will not | |
328 | * invalidate pages which are dirty, locked, under writeback or mapped into | |
329 | * pagetables. | |
330 | */ | |
331 | unsigned long invalidate_mapping_pages(struct address_space *mapping, | |
332 | pgoff_t start, pgoff_t end) | |
333 | { | |
334 | return __invalidate_mapping_pages(mapping, start, end, false); | |
335 | } | |
54bc4855 | 336 | EXPORT_SYMBOL(invalidate_mapping_pages); |
1da177e4 | 337 | |
bd4c8ce4 AM |
338 | /* |
339 | * This is like invalidate_complete_page(), except it ignores the page's | |
340 | * refcount. We do this because invalidate_inode_pages2() needs stronger | |
341 | * invalidation guarantees, and cannot afford to leave pages behind because | |
2706a1b8 AB |
342 | * shrink_page_list() has a temp ref on them, or because they're transiently |
343 | * sitting in the lru_cache_add() pagevecs. | |
bd4c8ce4 AM |
344 | */ |
345 | static int | |
346 | invalidate_complete_page2(struct address_space *mapping, struct page *page) | |
347 | { | |
348 | if (page->mapping != mapping) | |
349 | return 0; | |
350 | ||
266cf658 | 351 | if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL)) |
bd4c8ce4 AM |
352 | return 0; |
353 | ||
19fd6231 | 354 | spin_lock_irq(&mapping->tree_lock); |
bd4c8ce4 AM |
355 | if (PageDirty(page)) |
356 | goto failed; | |
357 | ||
ba470de4 | 358 | clear_page_mlock(page); |
266cf658 | 359 | BUG_ON(page_has_private(page)); |
bd4c8ce4 | 360 | __remove_from_page_cache(page); |
19fd6231 | 361 | spin_unlock_irq(&mapping->tree_lock); |
e767e056 | 362 | mem_cgroup_uncharge_cache_page(page); |
bd4c8ce4 AM |
363 | page_cache_release(page); /* pagecache ref */ |
364 | return 1; | |
365 | failed: | |
19fd6231 | 366 | spin_unlock_irq(&mapping->tree_lock); |
bd4c8ce4 AM |
367 | return 0; |
368 | } | |
369 | ||
e3db7691 TM |
370 | static int do_launder_page(struct address_space *mapping, struct page *page) |
371 | { | |
372 | if (!PageDirty(page)) | |
373 | return 0; | |
374 | if (page->mapping != mapping || mapping->a_ops->launder_page == NULL) | |
375 | return 0; | |
376 | return mapping->a_ops->launder_page(page); | |
377 | } | |
378 | ||
1da177e4 LT |
379 | /** |
380 | * invalidate_inode_pages2_range - remove range of pages from an address_space | |
67be2dd1 | 381 | * @mapping: the address_space |
1da177e4 LT |
382 | * @start: the page offset 'from' which to invalidate |
383 | * @end: the page offset 'to' which to invalidate (inclusive) | |
384 | * | |
385 | * Any pages which are found to be mapped into pagetables are unmapped prior to | |
386 | * invalidation. | |
387 | * | |
6ccfa806 | 388 | * Returns -EBUSY if any pages could not be invalidated. |
1da177e4 LT |
389 | */ |
390 | int invalidate_inode_pages2_range(struct address_space *mapping, | |
391 | pgoff_t start, pgoff_t end) | |
392 | { | |
393 | struct pagevec pvec; | |
394 | pgoff_t next; | |
395 | int i; | |
396 | int ret = 0; | |
0dd1334f | 397 | int ret2 = 0; |
1da177e4 LT |
398 | int did_range_unmap = 0; |
399 | int wrapped = 0; | |
400 | ||
401 | pagevec_init(&pvec, 0); | |
402 | next = start; | |
7b965e08 | 403 | while (next <= end && !wrapped && |
1da177e4 LT |
404 | pagevec_lookup(&pvec, mapping, next, |
405 | min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) { | |
7b965e08 | 406 | for (i = 0; i < pagevec_count(&pvec); i++) { |
1da177e4 LT |
407 | struct page *page = pvec.pages[i]; |
408 | pgoff_t page_index; | |
1da177e4 LT |
409 | |
410 | lock_page(page); | |
411 | if (page->mapping != mapping) { | |
412 | unlock_page(page); | |
413 | continue; | |
414 | } | |
415 | page_index = page->index; | |
416 | next = page_index + 1; | |
417 | if (next == 0) | |
418 | wrapped = 1; | |
419 | if (page_index > end) { | |
420 | unlock_page(page); | |
421 | break; | |
422 | } | |
423 | wait_on_page_writeback(page); | |
d00806b1 | 424 | if (page_mapped(page)) { |
1da177e4 LT |
425 | if (!did_range_unmap) { |
426 | /* | |
427 | * Zap the rest of the file in one hit. | |
428 | */ | |
429 | unmap_mapping_range(mapping, | |
479ef592 OD |
430 | (loff_t)page_index<<PAGE_CACHE_SHIFT, |
431 | (loff_t)(end - page_index + 1) | |
1da177e4 LT |
432 | << PAGE_CACHE_SHIFT, |
433 | 0); | |
434 | did_range_unmap = 1; | |
435 | } else { | |
436 | /* | |
437 | * Just zap this page | |
438 | */ | |
439 | unmap_mapping_range(mapping, | |
479ef592 | 440 | (loff_t)page_index<<PAGE_CACHE_SHIFT, |
1da177e4 LT |
441 | PAGE_CACHE_SIZE, 0); |
442 | } | |
443 | } | |
d00806b1 | 444 | BUG_ON(page_mapped(page)); |
0dd1334f HH |
445 | ret2 = do_launder_page(mapping, page); |
446 | if (ret2 == 0) { | |
447 | if (!invalidate_complete_page2(mapping, page)) | |
6ccfa806 | 448 | ret2 = -EBUSY; |
0dd1334f HH |
449 | } |
450 | if (ret2 < 0) | |
451 | ret = ret2; | |
1da177e4 LT |
452 | unlock_page(page); |
453 | } | |
454 | pagevec_release(&pvec); | |
455 | cond_resched(); | |
456 | } | |
457 | return ret; | |
458 | } | |
459 | EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range); | |
460 | ||
461 | /** | |
462 | * invalidate_inode_pages2 - remove all pages from an address_space | |
67be2dd1 | 463 | * @mapping: the address_space |
1da177e4 LT |
464 | * |
465 | * Any pages which are found to be mapped into pagetables are unmapped prior to | |
466 | * invalidation. | |
467 | * | |
468 | * Returns -EIO if any pages could not be invalidated. | |
469 | */ | |
470 | int invalidate_inode_pages2(struct address_space *mapping) | |
471 | { | |
472 | return invalidate_inode_pages2_range(mapping, 0, -1); | |
473 | } | |
474 | EXPORT_SYMBOL_GPL(invalidate_inode_pages2); |