]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/mm/swap_state.c | |
3 | * | |
4 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | |
5 | * Swap reorganised 29.12.95, Stephen Tweedie | |
6 | * | |
7 | * Rewritten to use page cache, (C) 1998 Stephen Tweedie | |
8 | */ | |
9 | #include <linux/module.h> | |
10 | #include <linux/mm.h> | |
11 | #include <linux/kernel_stat.h> | |
12 | #include <linux/swap.h> | |
13 | #include <linux/init.h> | |
14 | #include <linux/pagemap.h> | |
15 | #include <linux/buffer_head.h> | |
16 | #include <linux/backing-dev.h> | |
c484d410 | 17 | #include <linux/pagevec.h> |
b20a3503 | 18 | #include <linux/migrate.h> |
1da177e4 LT |
19 | |
20 | #include <asm/pgtable.h> | |
21 | ||
22 | /* | |
23 | * swapper_space is a fiction, retained to simplify the path through | |
24 | * vmscan's shrink_list, to make sync_page look nicer, and to allow | |
25 | * future use of radix_tree tags in the swap cache. | |
26 | */ | |
f5e54d6e | 27 | static const struct address_space_operations swap_aops = { |
1da177e4 LT |
28 | .writepage = swap_writepage, |
29 | .sync_page = block_sync_page, | |
30 | .set_page_dirty = __set_page_dirty_nobuffers, | |
e965f963 | 31 | .migratepage = migrate_page, |
1da177e4 LT |
32 | }; |
33 | ||
34 | static struct backing_dev_info swap_backing_dev_info = { | |
35 | .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK, | |
36 | .unplug_io_fn = swap_unplug_io_fn, | |
37 | }; | |
38 | ||
39 | struct address_space swapper_space = { | |
40 | .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN), | |
e4d91918 | 41 | .tree_lock = __RW_LOCK_UNLOCKED(swapper_space.tree_lock), |
1da177e4 LT |
42 | .a_ops = &swap_aops, |
43 | .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear), | |
44 | .backing_dev_info = &swap_backing_dev_info, | |
45 | }; | |
1da177e4 LT |
46 | |
47 | #define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0) | |
48 | ||
49 | static struct { | |
50 | unsigned long add_total; | |
51 | unsigned long del_total; | |
52 | unsigned long find_success; | |
53 | unsigned long find_total; | |
54 | unsigned long noent_race; | |
55 | unsigned long exist_race; | |
56 | } swap_cache_info; | |
57 | ||
58 | void show_swap_cache_info(void) | |
59 | { | |
60 | printk("Swap cache: add %lu, delete %lu, find %lu/%lu, race %lu+%lu\n", | |
61 | swap_cache_info.add_total, swap_cache_info.del_total, | |
62 | swap_cache_info.find_success, swap_cache_info.find_total, | |
63 | swap_cache_info.noent_race, swap_cache_info.exist_race); | |
64 | printk("Free swap = %lukB\n", nr_swap_pages << (PAGE_SHIFT - 10)); | |
65 | printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10)); | |
66 | } | |
67 | ||
68 | /* | |
69 | * __add_to_swap_cache resembles add_to_page_cache on swapper_space, | |
70 | * but sets SwapCache flag and private instead of mapping and index. | |
71 | */ | |
9de75d11 | 72 | static int __add_to_swap_cache(struct page *page, swp_entry_t entry, |
dd0fc66f | 73 | gfp_t gfp_mask) |
1da177e4 LT |
74 | { |
75 | int error; | |
76 | ||
77 | BUG_ON(PageSwapCache(page)); | |
78 | BUG_ON(PagePrivate(page)); | |
79 | error = radix_tree_preload(gfp_mask); | |
80 | if (!error) { | |
81 | write_lock_irq(&swapper_space.tree_lock); | |
82 | error = radix_tree_insert(&swapper_space.page_tree, | |
83 | entry.val, page); | |
84 | if (!error) { | |
85 | page_cache_get(page); | |
86 | SetPageLocked(page); | |
87 | SetPageSwapCache(page); | |
4c21e2f2 | 88 | set_page_private(page, entry.val); |
1da177e4 | 89 | total_swapcache_pages++; |
347ce434 | 90 | __inc_zone_page_state(page, NR_FILE_PAGES); |
1da177e4 LT |
91 | } |
92 | write_unlock_irq(&swapper_space.tree_lock); | |
93 | radix_tree_preload_end(); | |
94 | } | |
95 | return error; | |
96 | } | |
97 | ||
98 | static int add_to_swap_cache(struct page *page, swp_entry_t entry) | |
99 | { | |
100 | int error; | |
101 | ||
102 | if (!swap_duplicate(entry)) { | |
103 | INC_CACHE_INFO(noent_race); | |
104 | return -ENOENT; | |
105 | } | |
106 | error = __add_to_swap_cache(page, entry, GFP_KERNEL); | |
107 | /* | |
108 | * Anon pages are already on the LRU, we don't run lru_cache_add here. | |
109 | */ | |
110 | if (error) { | |
111 | swap_free(entry); | |
112 | if (error == -EEXIST) | |
113 | INC_CACHE_INFO(exist_race); | |
114 | return error; | |
115 | } | |
116 | INC_CACHE_INFO(add_total); | |
117 | return 0; | |
118 | } | |
119 | ||
120 | /* | |
121 | * This must be called only on pages that have | |
122 | * been verified to be in the swap cache. | |
123 | */ | |
124 | void __delete_from_swap_cache(struct page *page) | |
125 | { | |
126 | BUG_ON(!PageLocked(page)); | |
127 | BUG_ON(!PageSwapCache(page)); | |
128 | BUG_ON(PageWriteback(page)); | |
3279ffd9 | 129 | BUG_ON(PagePrivate(page)); |
1da177e4 | 130 | |
4c21e2f2 HD |
131 | radix_tree_delete(&swapper_space.page_tree, page_private(page)); |
132 | set_page_private(page, 0); | |
1da177e4 LT |
133 | ClearPageSwapCache(page); |
134 | total_swapcache_pages--; | |
347ce434 | 135 | __dec_zone_page_state(page, NR_FILE_PAGES); |
1da177e4 LT |
136 | INC_CACHE_INFO(del_total); |
137 | } | |
138 | ||
139 | /** | |
140 | * add_to_swap - allocate swap space for a page | |
141 | * @page: page we want to move to swap | |
142 | * | |
143 | * Allocate swap space for the page and add the page to the | |
144 | * swap cache. Caller needs to hold the page lock. | |
145 | */ | |
1480a540 | 146 | int add_to_swap(struct page * page, gfp_t gfp_mask) |
1da177e4 LT |
147 | { |
148 | swp_entry_t entry; | |
1da177e4 LT |
149 | int err; |
150 | ||
e74ca2b4 | 151 | BUG_ON(!PageLocked(page)); |
1da177e4 LT |
152 | |
153 | for (;;) { | |
154 | entry = get_swap_page(); | |
155 | if (!entry.val) | |
156 | return 0; | |
157 | ||
bd53b714 NP |
158 | /* |
159 | * Radix-tree node allocations from PF_MEMALLOC contexts could | |
160 | * completely exhaust the page allocator. __GFP_NOMEMALLOC | |
161 | * stops emergency reserves from being allocated. | |
1da177e4 | 162 | * |
bd53b714 NP |
163 | * TODO: this could cause a theoretical memory reclaim |
164 | * deadlock in the swap out path. | |
1da177e4 | 165 | */ |
1da177e4 LT |
166 | /* |
167 | * Add it to the swap cache and mark it dirty | |
168 | */ | |
bd53b714 | 169 | err = __add_to_swap_cache(page, entry, |
1480a540 | 170 | gfp_mask|__GFP_NOMEMALLOC|__GFP_NOWARN); |
1da177e4 LT |
171 | |
172 | switch (err) { | |
173 | case 0: /* Success */ | |
174 | SetPageUptodate(page); | |
175 | SetPageDirty(page); | |
176 | INC_CACHE_INFO(add_total); | |
177 | return 1; | |
178 | case -EEXIST: | |
179 | /* Raced with "speculative" read_swap_cache_async */ | |
180 | INC_CACHE_INFO(exist_race); | |
181 | swap_free(entry); | |
182 | continue; | |
183 | default: | |
184 | /* -ENOMEM radix-tree allocation failure */ | |
185 | swap_free(entry); | |
186 | return 0; | |
187 | } | |
188 | } | |
189 | } | |
190 | ||
191 | /* | |
192 | * This must be called only on pages that have | |
193 | * been verified to be in the swap cache and locked. | |
194 | * It will never put the page into the free list, | |
195 | * the caller has a reference on the page. | |
196 | */ | |
197 | void delete_from_swap_cache(struct page *page) | |
198 | { | |
199 | swp_entry_t entry; | |
200 | ||
4c21e2f2 | 201 | entry.val = page_private(page); |
1da177e4 LT |
202 | |
203 | write_lock_irq(&swapper_space.tree_lock); | |
204 | __delete_from_swap_cache(page); | |
205 | write_unlock_irq(&swapper_space.tree_lock); | |
206 | ||
207 | swap_free(entry); | |
208 | page_cache_release(page); | |
209 | } | |
210 | ||
211 | /* | |
212 | * Strange swizzling function only for use by shmem_writepage | |
213 | */ | |
214 | int move_to_swap_cache(struct page *page, swp_entry_t entry) | |
215 | { | |
216 | int err = __add_to_swap_cache(page, entry, GFP_ATOMIC); | |
217 | if (!err) { | |
218 | remove_from_page_cache(page); | |
219 | page_cache_release(page); /* pagecache ref */ | |
220 | if (!swap_duplicate(entry)) | |
221 | BUG(); | |
222 | SetPageDirty(page); | |
223 | INC_CACHE_INFO(add_total); | |
224 | } else if (err == -EEXIST) | |
225 | INC_CACHE_INFO(exist_race); | |
226 | return err; | |
227 | } | |
228 | ||
229 | /* | |
230 | * Strange swizzling function for shmem_getpage (and shmem_unuse) | |
231 | */ | |
232 | int move_from_swap_cache(struct page *page, unsigned long index, | |
233 | struct address_space *mapping) | |
234 | { | |
235 | int err = add_to_page_cache(page, mapping, index, GFP_ATOMIC); | |
236 | if (!err) { | |
237 | delete_from_swap_cache(page); | |
238 | /* shift page from clean_pages to dirty_pages list */ | |
239 | ClearPageDirty(page); | |
240 | set_page_dirty(page); | |
241 | } | |
242 | return err; | |
243 | } | |
244 | ||
245 | /* | |
246 | * If we are the only user, then try to free up the swap cache. | |
247 | * | |
248 | * Its ok to check for PageSwapCache without the page lock | |
249 | * here because we are going to recheck again inside | |
250 | * exclusive_swap_page() _with_ the lock. | |
251 | * - Marcelo | |
252 | */ | |
253 | static inline void free_swap_cache(struct page *page) | |
254 | { | |
255 | if (PageSwapCache(page) && !TestSetPageLocked(page)) { | |
256 | remove_exclusive_swap_page(page); | |
257 | unlock_page(page); | |
258 | } | |
259 | } | |
260 | ||
261 | /* | |
262 | * Perform a free_page(), also freeing any swap cache associated with | |
b8072f09 | 263 | * this page if it is the last user of the page. |
1da177e4 LT |
264 | */ |
265 | void free_page_and_swap_cache(struct page *page) | |
266 | { | |
267 | free_swap_cache(page); | |
268 | page_cache_release(page); | |
269 | } | |
270 | ||
271 | /* | |
272 | * Passed an array of pages, drop them all from swapcache and then release | |
273 | * them. They are removed from the LRU and freed if this is their last use. | |
274 | */ | |
275 | void free_pages_and_swap_cache(struct page **pages, int nr) | |
276 | { | |
1da177e4 LT |
277 | struct page **pagep = pages; |
278 | ||
279 | lru_add_drain(); | |
280 | while (nr) { | |
c484d410 | 281 | int todo = min(nr, PAGEVEC_SIZE); |
1da177e4 LT |
282 | int i; |
283 | ||
284 | for (i = 0; i < todo; i++) | |
285 | free_swap_cache(pagep[i]); | |
286 | release_pages(pagep, todo, 0); | |
287 | pagep += todo; | |
288 | nr -= todo; | |
289 | } | |
290 | } | |
291 | ||
292 | /* | |
293 | * Lookup a swap entry in the swap cache. A found page will be returned | |
294 | * unlocked and with its refcount incremented - we rely on the kernel | |
295 | * lock getting page table operations atomic even if we drop the page | |
296 | * lock before returning. | |
297 | */ | |
298 | struct page * lookup_swap_cache(swp_entry_t entry) | |
299 | { | |
300 | struct page *page; | |
301 | ||
302 | page = find_get_page(&swapper_space, entry.val); | |
303 | ||
304 | if (page) | |
305 | INC_CACHE_INFO(find_success); | |
306 | ||
307 | INC_CACHE_INFO(find_total); | |
308 | return page; | |
309 | } | |
310 | ||
311 | /* | |
312 | * Locate a page of swap in physical memory, reserving swap cache space | |
313 | * and reading the disk if it is not already cached. | |
314 | * A failure return means that either the page allocation failed or that | |
315 | * the swap entry is no longer in use. | |
316 | */ | |
317 | struct page *read_swap_cache_async(swp_entry_t entry, | |
318 | struct vm_area_struct *vma, unsigned long addr) | |
319 | { | |
320 | struct page *found_page, *new_page = NULL; | |
321 | int err; | |
322 | ||
323 | do { | |
324 | /* | |
325 | * First check the swap cache. Since this is normally | |
326 | * called after lookup_swap_cache() failed, re-calling | |
327 | * that would confuse statistics. | |
328 | */ | |
329 | found_page = find_get_page(&swapper_space, entry.val); | |
330 | if (found_page) | |
331 | break; | |
332 | ||
333 | /* | |
334 | * Get a new page to read into from swap. | |
335 | */ | |
336 | if (!new_page) { | |
337 | new_page = alloc_page_vma(GFP_HIGHUSER, vma, addr); | |
338 | if (!new_page) | |
339 | break; /* Out of memory */ | |
340 | } | |
341 | ||
342 | /* | |
343 | * Associate the page with swap entry in the swap cache. | |
344 | * May fail (-ENOENT) if swap entry has been freed since | |
345 | * our caller observed it. May fail (-EEXIST) if there | |
346 | * is already a page associated with this entry in the | |
347 | * swap cache: added by a racing read_swap_cache_async, | |
348 | * or by try_to_swap_out (or shmem_writepage) re-using | |
349 | * the just freed swap entry for an existing page. | |
350 | * May fail (-ENOMEM) if radix-tree node allocation failed. | |
351 | */ | |
352 | err = add_to_swap_cache(new_page, entry); | |
353 | if (!err) { | |
354 | /* | |
355 | * Initiate read into locked page and return. | |
356 | */ | |
357 | lru_cache_add_active(new_page); | |
358 | swap_readpage(NULL, new_page); | |
359 | return new_page; | |
360 | } | |
361 | } while (err != -ENOENT && err != -ENOMEM); | |
362 | ||
363 | if (new_page) | |
364 | page_cache_release(new_page); | |
365 | return found_page; | |
366 | } |