]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _LINUX_SWAP_H |
2 | #define _LINUX_SWAP_H | |
3 | ||
1da177e4 LT |
4 | #include <linux/spinlock.h> |
5 | #include <linux/linkage.h> | |
6 | #include <linux/mmzone.h> | |
7 | #include <linux/list.h> | |
66e1707b | 8 | #include <linux/memcontrol.h> |
1da177e4 | 9 | #include <linux/sched.h> |
af936a16 | 10 | #include <linux/node.h> |
542d1c88 | 11 | |
1da177e4 LT |
12 | #include <asm/atomic.h> |
13 | #include <asm/page.h> | |
14 | ||
8bc719d3 MS |
15 | struct notifier_block; |
16 | ||
ab954160 AM |
17 | struct bio; |
18 | ||
1da177e4 LT |
19 | #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */ |
20 | #define SWAP_FLAG_PRIO_MASK 0x7fff | |
21 | #define SWAP_FLAG_PRIO_SHIFT 0 | |
22 | ||
23 | static inline int current_is_kswapd(void) | |
24 | { | |
25 | return current->flags & PF_KSWAPD; | |
26 | } | |
27 | ||
28 | /* | |
29 | * MAX_SWAPFILES defines the maximum number of swaptypes: things which can | |
30 | * be swapped to. The swap type and the offset into that swap type are | |
31 | * encoded into pte's and into pgoff_t's in the swapcache. Using five bits | |
32 | * for the type means that the maximum number of swapcache pages is 27 bits | |
33 | * on 32-bit-pgoff_t architectures. And that assumes that the architecture packs | |
34 | * the type/offset into the pte as 5/27 as well. | |
35 | */ | |
36 | #define MAX_SWAPFILES_SHIFT 5 | |
0697212a | 37 | #ifndef CONFIG_MIGRATION |
1da177e4 | 38 | #define MAX_SWAPFILES (1 << MAX_SWAPFILES_SHIFT) |
0697212a CL |
39 | #else |
40 | /* Use last two entries for page migration swap entries */ | |
41 | #define MAX_SWAPFILES ((1 << MAX_SWAPFILES_SHIFT)-2) | |
42 | #define SWP_MIGRATION_READ MAX_SWAPFILES | |
43 | #define SWP_MIGRATION_WRITE (MAX_SWAPFILES + 1) | |
44 | #endif | |
1da177e4 LT |
45 | |
46 | /* | |
47 | * Magic header for a swap area. The first part of the union is | |
48 | * what the swap magic looks like for the old (limited to 128MB) | |
49 | * swap area format, the second part of the union adds - in the | |
50 | * old reserved area - some extra information. Note that the first | |
51 | * kilobyte is reserved for boot loader or disk label stuff... | |
52 | * | |
53 | * Having the magic at the end of the PAGE_SIZE makes detecting swap | |
54 | * areas somewhat tricky on machines that support multiple page sizes. | |
55 | * For 2.5 we'll probably want to move the magic to just beyond the | |
56 | * bootbits... | |
57 | */ | |
58 | union swap_header { | |
59 | struct { | |
60 | char reserved[PAGE_SIZE - 10]; | |
61 | char magic[10]; /* SWAP-SPACE or SWAPSPACE2 */ | |
62 | } magic; | |
63 | struct { | |
e8f03d02 AD |
64 | char bootbits[1024]; /* Space for disklabel etc. */ |
65 | __u32 version; | |
66 | __u32 last_page; | |
67 | __u32 nr_badpages; | |
68 | unsigned char sws_uuid[16]; | |
69 | unsigned char sws_volume[16]; | |
70 | __u32 padding[117]; | |
71 | __u32 badpages[1]; | |
1da177e4 LT |
72 | } info; |
73 | }; | |
74 | ||
75 | /* A swap entry has to fit into a "unsigned long", as | |
76 | * the entry is hidden in the "index" field of the | |
77 | * swapper address space. | |
78 | */ | |
79 | typedef struct { | |
80 | unsigned long val; | |
81 | } swp_entry_t; | |
82 | ||
83 | /* | |
84 | * current->reclaim_state points to one of these when a task is running | |
85 | * memory reclaim | |
86 | */ | |
87 | struct reclaim_state { | |
88 | unsigned long reclaimed_slab; | |
89 | }; | |
90 | ||
91 | #ifdef __KERNEL__ | |
92 | ||
93 | struct address_space; | |
94 | struct sysinfo; | |
95 | struct writeback_control; | |
96 | struct zone; | |
97 | ||
98 | /* | |
99 | * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of | |
100 | * disk blocks. A list of swap extents maps the entire swapfile. (Where the | |
101 | * term `swapfile' refers to either a blockdevice or an IS_REG file. Apart | |
102 | * from setup, they're handled identically. | |
103 | * | |
104 | * We always assume that blocks are of size PAGE_SIZE. | |
105 | */ | |
106 | struct swap_extent { | |
107 | struct list_head list; | |
108 | pgoff_t start_page; | |
109 | pgoff_t nr_pages; | |
110 | sector_t start_block; | |
111 | }; | |
112 | ||
113 | /* | |
114 | * Max bad pages in the new format.. | |
115 | */ | |
116 | #define __swapoffset(x) ((unsigned long)&((union swap_header *)0)->x) | |
117 | #define MAX_SWAP_BADPAGES \ | |
118 | ((__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int)) | |
119 | ||
120 | enum { | |
121 | SWP_USED = (1 << 0), /* is slot in swap_info[] used? */ | |
122 | SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */ | |
123 | SWP_ACTIVE = (SWP_USED | SWP_WRITEOK), | |
52b7efdb HD |
124 | /* add others here before... */ |
125 | SWP_SCANNING = (1 << 8), /* refcount in scan_swap_map */ | |
1da177e4 LT |
126 | }; |
127 | ||
128 | #define SWAP_CLUSTER_MAX 32 | |
129 | ||
130 | #define SWAP_MAP_MAX 0x7fff | |
131 | #define SWAP_MAP_BAD 0x8000 | |
132 | ||
133 | /* | |
134 | * The in-memory structure used to track swap areas. | |
1da177e4 LT |
135 | */ |
136 | struct swap_info_struct { | |
137 | unsigned int flags; | |
5d337b91 | 138 | int prio; /* swap priority */ |
1da177e4 LT |
139 | struct file *swap_file; |
140 | struct block_device *bdev; | |
141 | struct list_head extent_list; | |
1da177e4 LT |
142 | struct swap_extent *curr_swap_extent; |
143 | unsigned old_block_size; | |
144 | unsigned short * swap_map; | |
145 | unsigned int lowest_bit; | |
146 | unsigned int highest_bit; | |
147 | unsigned int cluster_next; | |
148 | unsigned int cluster_nr; | |
6eb396dc HD |
149 | unsigned int pages; |
150 | unsigned int max; | |
151 | unsigned int inuse_pages; | |
1da177e4 LT |
152 | int next; /* next entry on swap list */ |
153 | }; | |
154 | ||
155 | struct swap_list_t { | |
156 | int head; /* head of priority-ordered swapfile list */ | |
157 | int next; /* swapfile to be used next */ | |
158 | }; | |
159 | ||
160 | /* Swap 50% full? Release swapcache more aggressively.. */ | |
161 | #define vm_swap_full() (nr_swap_pages*2 < total_swap_pages) | |
162 | ||
1da177e4 LT |
163 | /* linux/mm/page_alloc.c */ |
164 | extern unsigned long totalram_pages; | |
cb45b0e9 | 165 | extern unsigned long totalreserve_pages; |
1da177e4 | 166 | extern long nr_swap_pages; |
1da177e4 LT |
167 | extern unsigned int nr_free_buffer_pages(void); |
168 | extern unsigned int nr_free_pagecache_pages(void); | |
169 | ||
96177299 CL |
170 | /* Definition of global_page_state not available yet */ |
171 | #define nr_free_pages() global_page_state(NR_FREE_PAGES) | |
172 | ||
173 | ||
1da177e4 | 174 | /* linux/mm/swap.c */ |
f04e9ebb KM |
175 | extern void __lru_cache_add(struct page *, enum lru_list lru); |
176 | extern void lru_cache_add_lru(struct page *, enum lru_list lru); | |
b3c97528 HH |
177 | extern void activate_page(struct page *); |
178 | extern void mark_page_accessed(struct page *); | |
1da177e4 | 179 | extern void lru_add_drain(void); |
053837fc | 180 | extern int lru_add_drain_all(void); |
ac6aadb2 | 181 | extern void rotate_reclaimable_page(struct page *page); |
1da177e4 LT |
182 | extern void swap_setup(void); |
183 | ||
894bc310 LS |
184 | extern void add_page_to_unevictable_list(struct page *page); |
185 | ||
f04e9ebb KM |
186 | /** |
187 | * lru_cache_add: add a page to the page lists | |
188 | * @page: the page to add | |
189 | */ | |
4f98a2fe | 190 | static inline void lru_cache_add_anon(struct page *page) |
f04e9ebb | 191 | { |
4f98a2fe | 192 | __lru_cache_add(page, LRU_INACTIVE_ANON); |
f04e9ebb KM |
193 | } |
194 | ||
4f98a2fe | 195 | static inline void lru_cache_add_active_anon(struct page *page) |
f04e9ebb | 196 | { |
4f98a2fe RR |
197 | __lru_cache_add(page, LRU_ACTIVE_ANON); |
198 | } | |
199 | ||
200 | static inline void lru_cache_add_file(struct page *page) | |
201 | { | |
202 | __lru_cache_add(page, LRU_INACTIVE_FILE); | |
203 | } | |
204 | ||
205 | static inline void lru_cache_add_active_file(struct page *page) | |
206 | { | |
207 | __lru_cache_add(page, LRU_ACTIVE_FILE); | |
f04e9ebb KM |
208 | } |
209 | ||
1da177e4 | 210 | /* linux/mm/vmscan.c */ |
dac1d27b | 211 | extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, |
5ad333eb | 212 | gfp_t gfp_mask); |
e1a1cd59 BS |
213 | extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, |
214 | gfp_t gfp_mask); | |
4f98a2fe | 215 | extern int __isolate_lru_page(struct page *page, int mode, int file); |
69e05944 | 216 | extern unsigned long shrink_all_memory(unsigned long nr_pages); |
1da177e4 | 217 | extern int vm_swappiness; |
b20a3503 | 218 | extern int remove_mapping(struct address_space *mapping, struct page *page); |
bd1e22b8 | 219 | extern long vm_total_pages; |
b20a3503 | 220 | |
9eeff239 CL |
221 | #ifdef CONFIG_NUMA |
222 | extern int zone_reclaim_mode; | |
9614634f | 223 | extern int sysctl_min_unmapped_ratio; |
0ff38490 | 224 | extern int sysctl_min_slab_ratio; |
9eeff239 CL |
225 | extern int zone_reclaim(struct zone *, gfp_t, unsigned int); |
226 | #else | |
227 | #define zone_reclaim_mode 0 | |
228 | static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order) | |
229 | { | |
230 | return 0; | |
231 | } | |
232 | #endif | |
233 | ||
894bc310 LS |
234 | #ifdef CONFIG_UNEVICTABLE_LRU |
235 | extern int page_evictable(struct page *page, struct vm_area_struct *vma); | |
89e004ea | 236 | extern void scan_mapping_unevictable_pages(struct address_space *); |
af936a16 LS |
237 | |
238 | extern unsigned long scan_unevictable_pages; | |
239 | extern int scan_unevictable_handler(struct ctl_table *, int, struct file *, | |
240 | void __user *, size_t *, loff_t *); | |
241 | extern int scan_unevictable_register_node(struct node *node); | |
242 | extern void scan_unevictable_unregister_node(struct node *node); | |
894bc310 LS |
243 | #else |
244 | static inline int page_evictable(struct page *page, | |
245 | struct vm_area_struct *vma) | |
246 | { | |
247 | return 1; | |
248 | } | |
af936a16 | 249 | |
89e004ea LS |
250 | static inline void scan_mapping_unevictable_pages(struct address_space *mapping) |
251 | { | |
252 | } | |
af936a16 LS |
253 | |
254 | static inline int scan_unevictable_register_node(struct node *node) | |
255 | { | |
256 | return 0; | |
257 | } | |
258 | ||
259 | static inline void scan_unevictable_unregister_node(struct node *node) { } | |
894bc310 LS |
260 | #endif |
261 | ||
3218ae14 YG |
262 | extern int kswapd_run(int nid); |
263 | ||
1da177e4 LT |
264 | #ifdef CONFIG_MMU |
265 | /* linux/mm/shmem.c */ | |
266 | extern int shmem_unuse(swp_entry_t entry, struct page *page); | |
267 | #endif /* CONFIG_MMU */ | |
268 | ||
269 | extern void swap_unplug_io_fn(struct backing_dev_info *, struct page *); | |
270 | ||
271 | #ifdef CONFIG_SWAP | |
272 | /* linux/mm/page_io.c */ | |
273 | extern int swap_readpage(struct file *, struct page *); | |
274 | extern int swap_writepage(struct page *page, struct writeback_control *wbc); | |
6712ecf8 | 275 | extern void end_swap_bio_read(struct bio *bio, int err); |
1da177e4 LT |
276 | |
277 | /* linux/mm/swap_state.c */ | |
278 | extern struct address_space swapper_space; | |
279 | #define total_swapcache_pages swapper_space.nrpages | |
280 | extern void show_swap_cache_info(void); | |
ac47b003 | 281 | extern int add_to_swap(struct page *); |
73b1262f | 282 | extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t); |
1da177e4 LT |
283 | extern void __delete_from_swap_cache(struct page *); |
284 | extern void delete_from_swap_cache(struct page *); | |
1da177e4 LT |
285 | extern void free_page_and_swap_cache(struct page *); |
286 | extern void free_pages_and_swap_cache(struct page **, int); | |
46017e95 | 287 | extern struct page *lookup_swap_cache(swp_entry_t); |
02098fea | 288 | extern struct page *read_swap_cache_async(swp_entry_t, gfp_t, |
46017e95 | 289 | struct vm_area_struct *vma, unsigned long addr); |
02098fea | 290 | extern struct page *swapin_readahead(swp_entry_t, gfp_t, |
46017e95 HD |
291 | struct vm_area_struct *vma, unsigned long addr); |
292 | ||
1da177e4 LT |
293 | /* linux/mm/swapfile.c */ |
294 | extern long total_swap_pages; | |
1da177e4 LT |
295 | extern void si_swapinfo(struct sysinfo *); |
296 | extern swp_entry_t get_swap_page(void); | |
f577eb30 | 297 | extern swp_entry_t get_swap_page_of_type(int); |
1da177e4 LT |
298 | extern int swap_duplicate(swp_entry_t); |
299 | extern int valid_swaphandles(swp_entry_t, unsigned long *); | |
300 | extern void swap_free(swp_entry_t); | |
301 | extern void free_swap_and_cache(swp_entry_t); | |
7bf23687 | 302 | extern int swap_type_of(dev_t, sector_t, struct block_device **); |
f577eb30 | 303 | extern unsigned int count_swap_pages(int, int); |
1da177e4 | 304 | extern sector_t map_swap_page(struct swap_info_struct *, pgoff_t); |
3aef83e0 | 305 | extern sector_t swapdev_block(int, pgoff_t); |
1da177e4 | 306 | extern struct swap_info_struct *get_swap_info_struct(unsigned); |
7b1fe597 | 307 | extern int reuse_swap_page(struct page *); |
a2c43eed | 308 | extern int try_to_free_swap(struct page *); |
1da177e4 LT |
309 | struct backing_dev_info; |
310 | ||
1da177e4 LT |
311 | /* linux/mm/thrash.c */ |
312 | extern struct mm_struct * swap_token_mm; | |
1da177e4 LT |
313 | extern void grab_swap_token(void); |
314 | extern void __put_swap_token(struct mm_struct *); | |
315 | ||
316 | static inline int has_swap_token(struct mm_struct *mm) | |
317 | { | |
318 | return (mm == swap_token_mm); | |
319 | } | |
320 | ||
321 | static inline void put_swap_token(struct mm_struct *mm) | |
322 | { | |
323 | if (has_swap_token(mm)) | |
324 | __put_swap_token(mm); | |
325 | } | |
326 | ||
f7b7fd8f RR |
327 | static inline void disable_swap_token(void) |
328 | { | |
329 | put_swap_token(swap_token_mm); | |
330 | } | |
331 | ||
1da177e4 LT |
332 | #else /* CONFIG_SWAP */ |
333 | ||
334 | #define total_swap_pages 0 | |
335 | #define total_swapcache_pages 0UL | |
336 | ||
337 | #define si_swapinfo(val) \ | |
338 | do { (val)->freeswap = (val)->totalswap = 0; } while (0) | |
9ae5b3c7 OH |
339 | /* only sparc can not include linux/pagemap.h in this file |
340 | * so leave page_cache_release and release_pages undeclared... */ | |
1da177e4 LT |
341 | #define free_page_and_swap_cache(page) \ |
342 | page_cache_release(page) | |
343 | #define free_pages_and_swap_cache(pages, nr) \ | |
344 | release_pages((pages), (nr), 0); | |
345 | ||
bd96b9eb CK |
346 | static inline void show_swap_cache_info(void) |
347 | { | |
348 | } | |
349 | ||
350 | static inline void free_swap_and_cache(swp_entry_t swp) | |
351 | { | |
352 | } | |
353 | ||
354 | static inline int swap_duplicate(swp_entry_t swp) | |
355 | { | |
356 | return 0; | |
357 | } | |
358 | ||
359 | static inline void swap_free(swp_entry_t swp) | |
360 | { | |
361 | } | |
362 | ||
02098fea | 363 | static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, |
bd96b9eb CK |
364 | struct vm_area_struct *vma, unsigned long addr) |
365 | { | |
366 | return NULL; | |
367 | } | |
368 | ||
369 | static inline struct page *lookup_swap_cache(swp_entry_t swp) | |
370 | { | |
371 | return NULL; | |
372 | } | |
373 | ||
73b1262f HD |
374 | static inline int add_to_swap_cache(struct page *page, swp_entry_t entry, |
375 | gfp_t gfp_mask) | |
bd96b9eb | 376 | { |
73b1262f | 377 | return -1; |
bd96b9eb CK |
378 | } |
379 | ||
380 | static inline void __delete_from_swap_cache(struct page *page) | |
381 | { | |
382 | } | |
383 | ||
384 | static inline void delete_from_swap_cache(struct page *page) | |
385 | { | |
386 | } | |
387 | ||
7b1fe597 | 388 | #define reuse_swap_page(page) (page_mapcount(page) == 1) |
1da177e4 | 389 | |
a2c43eed | 390 | static inline int try_to_free_swap(struct page *page) |
68a22394 RR |
391 | { |
392 | return 0; | |
393 | } | |
394 | ||
1da177e4 LT |
395 | static inline swp_entry_t get_swap_page(void) |
396 | { | |
397 | swp_entry_t entry; | |
398 | entry.val = 0; | |
399 | return entry; | |
400 | } | |
401 | ||
402 | /* linux/mm/thrash.c */ | |
403 | #define put_swap_token(x) do { } while(0) | |
404 | #define grab_swap_token() do { } while(0) | |
405 | #define has_swap_token(x) 0 | |
f7b7fd8f | 406 | #define disable_swap_token() do { } while(0) |
1da177e4 LT |
407 | |
408 | #endif /* CONFIG_SWAP */ | |
409 | #endif /* __KERNEL__*/ | |
410 | #endif /* _LINUX_SWAP_H */ |