]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * mm/truncate.c - code for taking down pages from address_spaces | |
3 | * | |
4 | * Copyright (C) 2002, Linus Torvalds | |
5 | * | |
6 | * 10Sep2002 [email protected] | |
7 | * Initial version. | |
8 | */ | |
9 | ||
10 | #include <linux/kernel.h> | |
11 | #include <linux/mm.h> | |
0fd0e6b0 | 12 | #include <linux/swap.h> |
1da177e4 LT |
13 | #include <linux/module.h> |
14 | #include <linux/pagemap.h> | |
01f2705d | 15 | #include <linux/highmem.h> |
1da177e4 | 16 | #include <linux/pagevec.h> |
e08748ce | 17 | #include <linux/task_io_accounting_ops.h> |
1da177e4 | 18 | #include <linux/buffer_head.h> /* grr. try_to_release_page, |
aaa4059b | 19 | do_invalidatepage */ |
1da177e4 LT |
20 | |
21 | ||
cf9a2ae8 DH |
22 | /** |
23 | * do_invalidatepage - invalidate part of all of a page | |
24 | * @page: the page which is affected | |
25 | * @offset: the index of the truncation point | |
26 | * | |
27 | * do_invalidatepage() is called when all or part of the page has become | |
28 | * invalidated by a truncate operation. | |
29 | * | |
30 | * do_invalidatepage() does not have to release all buffers, but it must | |
31 | * ensure that no dirty buffer is left outside @offset and that no I/O | |
32 | * is underway against any of the blocks which are outside the truncation | |
33 | * point. Because the caller is about to free (and possibly reuse) those | |
34 | * blocks on-disk. | |
35 | */ | |
36 | void do_invalidatepage(struct page *page, unsigned long offset) | |
37 | { | |
38 | void (*invalidatepage)(struct page *, unsigned long); | |
39 | invalidatepage = page->mapping->a_ops->invalidatepage; | |
9361401e | 40 | #ifdef CONFIG_BLOCK |
cf9a2ae8 DH |
41 | if (!invalidatepage) |
42 | invalidatepage = block_invalidatepage; | |
9361401e | 43 | #endif |
cf9a2ae8 DH |
44 | if (invalidatepage) |
45 | (*invalidatepage)(page, offset); | |
46 | } | |
47 | ||
1da177e4 LT |
48 | static inline void truncate_partial_page(struct page *page, unsigned partial) |
49 | { | |
01f2705d | 50 | zero_user_page(page, partial, PAGE_CACHE_SIZE - partial, KM_USER0); |
1da177e4 LT |
51 | if (PagePrivate(page)) |
52 | do_invalidatepage(page, partial); | |
53 | } | |
54 | ||
ecdfc978 LT |
55 | /* |
56 | * This cancels just the dirty bit on the kernel page itself, it | |
57 | * does NOT actually remove dirty bits on any mmap's that may be | |
58 | * around. It also leaves the page tagged dirty, so any sync | |
59 | * activity will still find it on the dirty lists, and in particular, | |
60 | * clear_page_dirty_for_io() will still look at the dirty bits in | |
61 | * the VM. | |
62 | * | |
63 | * Doing this should *normally* only ever be done when a page | |
64 | * is truncated, and is not actually mapped anywhere at all. However, | |
65 | * fs/buffer.c does this when it notices that somebody has cleaned | |
66 | * out all the buffers on a page without actually doing it through | |
67 | * the VM. Can you say "ext3 is horribly ugly"? Tought you could. | |
68 | */ | |
fba2591b LT |
69 | void cancel_dirty_page(struct page *page, unsigned int account_size) |
70 | { | |
8368e328 LT |
71 | if (TestClearPageDirty(page)) { |
72 | struct address_space *mapping = page->mapping; | |
73 | if (mapping && mapping_cap_account_dirty(mapping)) { | |
74 | dec_zone_page_state(page, NR_FILE_DIRTY); | |
c9e51e41 PZ |
75 | dec_bdi_stat(mapping->backing_dev_info, |
76 | BDI_RECLAIMABLE); | |
8368e328 LT |
77 | if (account_size) |
78 | task_io_account_cancelled_write(account_size); | |
79 | } | |
3e67c098 | 80 | } |
fba2591b | 81 | } |
8368e328 | 82 | EXPORT_SYMBOL(cancel_dirty_page); |
fba2591b | 83 | |
1da177e4 LT |
84 | /* |
85 | * If truncate cannot remove the fs-private metadata from the page, the page | |
86 | * becomes anonymous. It will be left on the LRU and may even be mapped into | |
54cb8821 | 87 | * user pagetables if we're racing with filemap_fault(). |
1da177e4 LT |
88 | * |
89 | * We need to bale out if page->mapping is no longer equal to the original | |
90 | * mapping. This happens a) when the VM reclaimed the page while we waited on | |
fc0ecff6 | 91 | * its lock, b) when a concurrent invalidate_mapping_pages got there first and |
1da177e4 LT |
92 | * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space. |
93 | */ | |
94 | static void | |
95 | truncate_complete_page(struct address_space *mapping, struct page *page) | |
96 | { | |
97 | if (page->mapping != mapping) | |
98 | return; | |
99 | ||
3e67c098 AM |
100 | cancel_dirty_page(page, PAGE_CACHE_SIZE); |
101 | ||
1da177e4 LT |
102 | if (PagePrivate(page)) |
103 | do_invalidatepage(page, 0); | |
104 | ||
787d2214 | 105 | remove_from_page_cache(page); |
1da177e4 LT |
106 | ClearPageUptodate(page); |
107 | ClearPageMappedToDisk(page); | |
1da177e4 LT |
108 | page_cache_release(page); /* pagecache ref */ |
109 | } | |
110 | ||
111 | /* | |
fc0ecff6 | 112 | * This is for invalidate_mapping_pages(). That function can be called at |
1da177e4 | 113 | * any time, and is not supposed to throw away dirty pages. But pages can |
0fd0e6b0 NP |
114 | * be marked dirty at any time too, so use remove_mapping which safely |
115 | * discards clean, unused pages. | |
1da177e4 LT |
116 | * |
117 | * Returns non-zero if the page was successfully invalidated. | |
118 | */ | |
119 | static int | |
120 | invalidate_complete_page(struct address_space *mapping, struct page *page) | |
121 | { | |
0fd0e6b0 NP |
122 | int ret; |
123 | ||
1da177e4 LT |
124 | if (page->mapping != mapping) |
125 | return 0; | |
126 | ||
127 | if (PagePrivate(page) && !try_to_release_page(page, 0)) | |
128 | return 0; | |
129 | ||
0fd0e6b0 | 130 | ret = remove_mapping(mapping, page); |
0fd0e6b0 NP |
131 | |
132 | return ret; | |
1da177e4 LT |
133 | } |
134 | ||
135 | /** | |
d7339071 HR |
136 | * truncate_inode_pages - truncate range of pages specified by start and |
137 | * end byte offsets | |
1da177e4 LT |
138 | * @mapping: mapping to truncate |
139 | * @lstart: offset from which to truncate | |
d7339071 | 140 | * @lend: offset to which to truncate |
1da177e4 | 141 | * |
d7339071 HR |
142 | * Truncate the page cache, removing the pages that are between |
143 | * specified offsets (and zeroing out partial page | |
144 | * (if lstart is not page aligned)). | |
1da177e4 LT |
145 | * |
146 | * Truncate takes two passes - the first pass is nonblocking. It will not | |
147 | * block on page locks and it will not block on writeback. The second pass | |
148 | * will wait. This is to prevent as much IO as possible in the affected region. | |
149 | * The first pass will remove most pages, so the search cost of the second pass | |
150 | * is low. | |
151 | * | |
152 | * When looking at page->index outside the page lock we need to be careful to | |
153 | * copy it into a local to avoid races (it could change at any time). | |
154 | * | |
155 | * We pass down the cache-hot hint to the page freeing code. Even if the | |
156 | * mapping is large, it is probably the case that the final pages are the most | |
157 | * recently touched, and freeing happens in ascending file offset order. | |
1da177e4 | 158 | */ |
d7339071 HR |
159 | void truncate_inode_pages_range(struct address_space *mapping, |
160 | loff_t lstart, loff_t lend) | |
1da177e4 LT |
161 | { |
162 | const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; | |
d7339071 | 163 | pgoff_t end; |
1da177e4 LT |
164 | const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1); |
165 | struct pagevec pvec; | |
166 | pgoff_t next; | |
167 | int i; | |
168 | ||
169 | if (mapping->nrpages == 0) | |
170 | return; | |
171 | ||
d7339071 HR |
172 | BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1)); |
173 | end = (lend >> PAGE_CACHE_SHIFT); | |
174 | ||
1da177e4 LT |
175 | pagevec_init(&pvec, 0); |
176 | next = start; | |
d7339071 HR |
177 | while (next <= end && |
178 | pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { | |
1da177e4 LT |
179 | for (i = 0; i < pagevec_count(&pvec); i++) { |
180 | struct page *page = pvec.pages[i]; | |
181 | pgoff_t page_index = page->index; | |
182 | ||
d7339071 HR |
183 | if (page_index > end) { |
184 | next = page_index; | |
185 | break; | |
186 | } | |
187 | ||
1da177e4 LT |
188 | if (page_index > next) |
189 | next = page_index; | |
190 | next++; | |
191 | if (TestSetPageLocked(page)) | |
192 | continue; | |
193 | if (PageWriteback(page)) { | |
194 | unlock_page(page); | |
195 | continue; | |
196 | } | |
d00806b1 NP |
197 | if (page_mapped(page)) { |
198 | unmap_mapping_range(mapping, | |
199 | (loff_t)page_index<<PAGE_CACHE_SHIFT, | |
200 | PAGE_CACHE_SIZE, 0); | |
201 | } | |
1da177e4 LT |
202 | truncate_complete_page(mapping, page); |
203 | unlock_page(page); | |
204 | } | |
205 | pagevec_release(&pvec); | |
206 | cond_resched(); | |
207 | } | |
208 | ||
209 | if (partial) { | |
210 | struct page *page = find_lock_page(mapping, start - 1); | |
211 | if (page) { | |
212 | wait_on_page_writeback(page); | |
213 | truncate_partial_page(page, partial); | |
214 | unlock_page(page); | |
215 | page_cache_release(page); | |
216 | } | |
217 | } | |
218 | ||
219 | next = start; | |
220 | for ( ; ; ) { | |
221 | cond_resched(); | |
222 | if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { | |
223 | if (next == start) | |
224 | break; | |
225 | next = start; | |
226 | continue; | |
227 | } | |
d7339071 HR |
228 | if (pvec.pages[0]->index > end) { |
229 | pagevec_release(&pvec); | |
230 | break; | |
231 | } | |
1da177e4 LT |
232 | for (i = 0; i < pagevec_count(&pvec); i++) { |
233 | struct page *page = pvec.pages[i]; | |
234 | ||
d7339071 HR |
235 | if (page->index > end) |
236 | break; | |
1da177e4 LT |
237 | lock_page(page); |
238 | wait_on_page_writeback(page); | |
d00806b1 NP |
239 | if (page_mapped(page)) { |
240 | unmap_mapping_range(mapping, | |
241 | (loff_t)page->index<<PAGE_CACHE_SHIFT, | |
242 | PAGE_CACHE_SIZE, 0); | |
243 | } | |
1da177e4 LT |
244 | if (page->index > next) |
245 | next = page->index; | |
246 | next++; | |
247 | truncate_complete_page(mapping, page); | |
248 | unlock_page(page); | |
249 | } | |
250 | pagevec_release(&pvec); | |
251 | } | |
252 | } | |
d7339071 | 253 | EXPORT_SYMBOL(truncate_inode_pages_range); |
1da177e4 | 254 | |
d7339071 HR |
255 | /** |
256 | * truncate_inode_pages - truncate *all* the pages from an offset | |
257 | * @mapping: mapping to truncate | |
258 | * @lstart: offset from which to truncate | |
259 | * | |
1b1dcc1b | 260 | * Called under (and serialised by) inode->i_mutex. |
d7339071 HR |
261 | */ |
262 | void truncate_inode_pages(struct address_space *mapping, loff_t lstart) | |
263 | { | |
264 | truncate_inode_pages_range(mapping, lstart, (loff_t)-1); | |
265 | } | |
1da177e4 LT |
266 | EXPORT_SYMBOL(truncate_inode_pages); |
267 | ||
fc9a07e7 AM |
268 | unsigned long __invalidate_mapping_pages(struct address_space *mapping, |
269 | pgoff_t start, pgoff_t end, bool be_atomic) | |
1da177e4 LT |
270 | { |
271 | struct pagevec pvec; | |
272 | pgoff_t next = start; | |
273 | unsigned long ret = 0; | |
274 | int i; | |
275 | ||
276 | pagevec_init(&pvec, 0); | |
277 | while (next <= end && | |
278 | pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { | |
279 | for (i = 0; i < pagevec_count(&pvec); i++) { | |
280 | struct page *page = pvec.pages[i]; | |
e0f23603 N |
281 | pgoff_t index; |
282 | int lock_failed; | |
1da177e4 | 283 | |
e0f23603 N |
284 | lock_failed = TestSetPageLocked(page); |
285 | ||
286 | /* | |
287 | * We really shouldn't be looking at the ->index of an | |
288 | * unlocked page. But we're not allowed to lock these | |
289 | * pages. So we rely upon nobody altering the ->index | |
290 | * of this (pinned-by-us) page. | |
291 | */ | |
292 | index = page->index; | |
293 | if (index > next) | |
294 | next = index; | |
1da177e4 | 295 | next++; |
e0f23603 N |
296 | if (lock_failed) |
297 | continue; | |
298 | ||
1da177e4 LT |
299 | if (PageDirty(page) || PageWriteback(page)) |
300 | goto unlock; | |
301 | if (page_mapped(page)) | |
302 | goto unlock; | |
303 | ret += invalidate_complete_page(mapping, page); | |
304 | unlock: | |
305 | unlock_page(page); | |
306 | if (next > end) | |
307 | break; | |
308 | } | |
309 | pagevec_release(&pvec); | |
fc9a07e7 AM |
310 | if (likely(!be_atomic)) |
311 | cond_resched(); | |
1da177e4 LT |
312 | } |
313 | return ret; | |
314 | } | |
fc9a07e7 AM |
315 | |
316 | /** | |
317 | * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode | |
318 | * @mapping: the address_space which holds the pages to invalidate | |
319 | * @start: the offset 'from' which to invalidate | |
320 | * @end: the offset 'to' which to invalidate (inclusive) | |
321 | * | |
322 | * This function only removes the unlocked pages, if you want to | |
323 | * remove all the pages of one inode, you must call truncate_inode_pages. | |
324 | * | |
325 | * invalidate_mapping_pages() will not block on IO activity. It will not | |
326 | * invalidate pages which are dirty, locked, under writeback or mapped into | |
327 | * pagetables. | |
328 | */ | |
329 | unsigned long invalidate_mapping_pages(struct address_space *mapping, | |
330 | pgoff_t start, pgoff_t end) | |
331 | { | |
332 | return __invalidate_mapping_pages(mapping, start, end, false); | |
333 | } | |
54bc4855 | 334 | EXPORT_SYMBOL(invalidate_mapping_pages); |
1da177e4 | 335 | |
bd4c8ce4 AM |
336 | /* |
337 | * This is like invalidate_complete_page(), except it ignores the page's | |
338 | * refcount. We do this because invalidate_inode_pages2() needs stronger | |
339 | * invalidation guarantees, and cannot afford to leave pages behind because | |
2706a1b8 AB |
340 | * shrink_page_list() has a temp ref on them, or because they're transiently |
341 | * sitting in the lru_cache_add() pagevecs. | |
bd4c8ce4 AM |
342 | */ |
343 | static int | |
344 | invalidate_complete_page2(struct address_space *mapping, struct page *page) | |
345 | { | |
346 | if (page->mapping != mapping) | |
347 | return 0; | |
348 | ||
887ed2f3 | 349 | if (PagePrivate(page) && !try_to_release_page(page, GFP_KERNEL)) |
bd4c8ce4 AM |
350 | return 0; |
351 | ||
352 | write_lock_irq(&mapping->tree_lock); | |
353 | if (PageDirty(page)) | |
354 | goto failed; | |
355 | ||
356 | BUG_ON(PagePrivate(page)); | |
357 | __remove_from_page_cache(page); | |
358 | write_unlock_irq(&mapping->tree_lock); | |
359 | ClearPageUptodate(page); | |
360 | page_cache_release(page); /* pagecache ref */ | |
361 | return 1; | |
362 | failed: | |
363 | write_unlock_irq(&mapping->tree_lock); | |
364 | return 0; | |
365 | } | |
366 | ||
e3db7691 TM |
367 | static int do_launder_page(struct address_space *mapping, struct page *page) |
368 | { | |
369 | if (!PageDirty(page)) | |
370 | return 0; | |
371 | if (page->mapping != mapping || mapping->a_ops->launder_page == NULL) | |
372 | return 0; | |
373 | return mapping->a_ops->launder_page(page); | |
374 | } | |
375 | ||
1da177e4 LT |
376 | /** |
377 | * invalidate_inode_pages2_range - remove range of pages from an address_space | |
67be2dd1 | 378 | * @mapping: the address_space |
1da177e4 LT |
379 | * @start: the page offset 'from' which to invalidate |
380 | * @end: the page offset 'to' which to invalidate (inclusive) | |
381 | * | |
382 | * Any pages which are found to be mapped into pagetables are unmapped prior to | |
383 | * invalidation. | |
384 | * | |
385 | * Returns -EIO if any pages could not be invalidated. | |
386 | */ | |
387 | int invalidate_inode_pages2_range(struct address_space *mapping, | |
388 | pgoff_t start, pgoff_t end) | |
389 | { | |
390 | struct pagevec pvec; | |
391 | pgoff_t next; | |
392 | int i; | |
393 | int ret = 0; | |
394 | int did_range_unmap = 0; | |
395 | int wrapped = 0; | |
396 | ||
397 | pagevec_init(&pvec, 0); | |
398 | next = start; | |
7b965e08 | 399 | while (next <= end && !wrapped && |
1da177e4 LT |
400 | pagevec_lookup(&pvec, mapping, next, |
401 | min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) { | |
7b965e08 | 402 | for (i = 0; i < pagevec_count(&pvec); i++) { |
1da177e4 LT |
403 | struct page *page = pvec.pages[i]; |
404 | pgoff_t page_index; | |
1da177e4 LT |
405 | |
406 | lock_page(page); | |
407 | if (page->mapping != mapping) { | |
408 | unlock_page(page); | |
409 | continue; | |
410 | } | |
411 | page_index = page->index; | |
412 | next = page_index + 1; | |
413 | if (next == 0) | |
414 | wrapped = 1; | |
415 | if (page_index > end) { | |
416 | unlock_page(page); | |
417 | break; | |
418 | } | |
419 | wait_on_page_writeback(page); | |
d00806b1 | 420 | if (page_mapped(page)) { |
1da177e4 LT |
421 | if (!did_range_unmap) { |
422 | /* | |
423 | * Zap the rest of the file in one hit. | |
424 | */ | |
425 | unmap_mapping_range(mapping, | |
479ef592 OD |
426 | (loff_t)page_index<<PAGE_CACHE_SHIFT, |
427 | (loff_t)(end - page_index + 1) | |
1da177e4 LT |
428 | << PAGE_CACHE_SHIFT, |
429 | 0); | |
430 | did_range_unmap = 1; | |
431 | } else { | |
432 | /* | |
433 | * Just zap this page | |
434 | */ | |
435 | unmap_mapping_range(mapping, | |
479ef592 | 436 | (loff_t)page_index<<PAGE_CACHE_SHIFT, |
1da177e4 LT |
437 | PAGE_CACHE_SIZE, 0); |
438 | } | |
439 | } | |
d00806b1 | 440 | BUG_ON(page_mapped(page)); |
e3db7691 TM |
441 | ret = do_launder_page(mapping, page); |
442 | if (ret == 0 && !invalidate_complete_page2(mapping, page)) | |
1da177e4 | 443 | ret = -EIO; |
1da177e4 LT |
444 | unlock_page(page); |
445 | } | |
446 | pagevec_release(&pvec); | |
447 | cond_resched(); | |
448 | } | |
449 | return ret; | |
450 | } | |
451 | EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range); | |
452 | ||
453 | /** | |
454 | * invalidate_inode_pages2 - remove all pages from an address_space | |
67be2dd1 | 455 | * @mapping: the address_space |
1da177e4 LT |
456 | * |
457 | * Any pages which are found to be mapped into pagetables are unmapped prior to | |
458 | * invalidation. | |
459 | * | |
460 | * Returns -EIO if any pages could not be invalidated. | |
461 | */ | |
462 | int invalidate_inode_pages2(struct address_space *mapping) | |
463 | { | |
464 | return invalidate_inode_pages2_range(mapping, 0, -1); | |
465 | } | |
466 | EXPORT_SYMBOL_GPL(invalidate_inode_pages2); |