]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _LINUX_MMZONE_H |
2 | #define _LINUX_MMZONE_H | |
3 | ||
1da177e4 | 4 | #ifndef __ASSEMBLY__ |
97965478 | 5 | #ifndef __GENERATING_BOUNDS_H |
1da177e4 | 6 | |
1da177e4 LT |
7 | #include <linux/spinlock.h> |
8 | #include <linux/list.h> | |
9 | #include <linux/wait.h> | |
e815af95 | 10 | #include <linux/bitops.h> |
1da177e4 LT |
11 | #include <linux/cache.h> |
12 | #include <linux/threads.h> | |
13 | #include <linux/numa.h> | |
14 | #include <linux/init.h> | |
bdc8cb98 | 15 | #include <linux/seqlock.h> |
8357f869 | 16 | #include <linux/nodemask.h> |
835c134e | 17 | #include <linux/pageblock-flags.h> |
bbeae5b0 | 18 | #include <linux/page-flags-layout.h> |
60063497 | 19 | #include <linux/atomic.h> |
93ff66bf | 20 | #include <asm/page.h> |
1da177e4 LT |
21 | |
22 | /* Free memory management - zoned buddy allocator. */ | |
23 | #ifndef CONFIG_FORCE_MAX_ZONEORDER | |
24 | #define MAX_ORDER 11 | |
25 | #else | |
26 | #define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER | |
27 | #endif | |
e984bb43 | 28 | #define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1)) |
1da177e4 | 29 | |
5ad333eb AW |
30 | /* |
31 | * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed | |
32 | * costly to service. That is between allocation orders which should | |
35fca53e | 33 | * coalesce naturally under reasonable reclaim pressure and those which |
5ad333eb AW |
34 | * will not. |
35 | */ | |
36 | #define PAGE_ALLOC_COSTLY_ORDER 3 | |
37 | ||
47118af0 MN |
38 | enum { |
39 | MIGRATE_UNMOVABLE, | |
40 | MIGRATE_RECLAIMABLE, | |
41 | MIGRATE_MOVABLE, | |
42 | MIGRATE_PCPTYPES, /* the number of types on the pcp lists */ | |
43 | MIGRATE_RESERVE = MIGRATE_PCPTYPES, | |
44 | #ifdef CONFIG_CMA | |
45 | /* | |
46 | * MIGRATE_CMA migration type is designed to mimic the way | |
47 | * ZONE_MOVABLE works. Only movable pages can be allocated | |
48 | * from MIGRATE_CMA pageblocks and page allocator never | |
49 | * implicitly change migration type of MIGRATE_CMA pageblock. | |
50 | * | |
51 | * The way to use it is to change migratetype of a range of | |
52 | * pageblocks to MIGRATE_CMA which can be done by | |
53 | * __free_pageblock_cma() function. What is important though | |
54 | * is that a range of pageblocks must be aligned to | |
55 | * MAX_ORDER_NR_PAGES should biggest page be bigger then | |
56 | * a single pageblock. | |
57 | */ | |
58 | MIGRATE_CMA, | |
59 | #endif | |
194159fb | 60 | #ifdef CONFIG_MEMORY_ISOLATION |
47118af0 | 61 | MIGRATE_ISOLATE, /* can't allocate from here */ |
194159fb | 62 | #endif |
47118af0 MN |
63 | MIGRATE_TYPES |
64 | }; | |
65 | ||
66 | #ifdef CONFIG_CMA | |
67 | # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) | |
68 | #else | |
69 | # define is_migrate_cma(migratetype) false | |
70 | #endif | |
b2a0ac88 MG |
71 | |
72 | #define for_each_migratetype_order(order, type) \ | |
73 | for (order = 0; order < MAX_ORDER; order++) \ | |
74 | for (type = 0; type < MIGRATE_TYPES; type++) | |
75 | ||
467c996c MG |
76 | extern int page_group_by_mobility_disabled; |
77 | ||
78 | static inline int get_pageblock_migratetype(struct page *page) | |
79 | { | |
467c996c MG |
80 | return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end); |
81 | } | |
82 | ||
1da177e4 | 83 | struct free_area { |
b2a0ac88 | 84 | struct list_head free_list[MIGRATE_TYPES]; |
1da177e4 LT |
85 | unsigned long nr_free; |
86 | }; | |
87 | ||
88 | struct pglist_data; | |
89 | ||
90 | /* | |
91 | * zone->lock and zone->lru_lock are two of the hottest locks in the kernel. | |
92 | * So add a wild amount of padding here to ensure that they fall into separate | |
93 | * cachelines. There are very few zone structures in the machine, so space | |
94 | * consumption is not a concern here. | |
95 | */ | |
96 | #if defined(CONFIG_SMP) | |
97 | struct zone_padding { | |
98 | char x[0]; | |
22fc6ecc | 99 | } ____cacheline_internodealigned_in_smp; |
1da177e4 LT |
100 | #define ZONE_PADDING(name) struct zone_padding name; |
101 | #else | |
102 | #define ZONE_PADDING(name) | |
103 | #endif | |
104 | ||
2244b95a | 105 | enum zone_stat_item { |
51ed4491 | 106 | /* First 128 byte cacheline (assuming 64 bit words) */ |
d23ad423 | 107 | NR_FREE_PAGES, |
81c0a2bb | 108 | NR_ALLOC_BATCH, |
b69408e8 | 109 | NR_LRU_BASE, |
4f98a2fe RR |
110 | NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */ |
111 | NR_ACTIVE_ANON, /* " " " " " */ | |
112 | NR_INACTIVE_FILE, /* " " " " " */ | |
113 | NR_ACTIVE_FILE, /* " " " " " */ | |
894bc310 | 114 | NR_UNEVICTABLE, /* " " " " " */ |
5344b7e6 | 115 | NR_MLOCK, /* mlock()ed pages found and moved off LRU */ |
f3dbd344 CL |
116 | NR_ANON_PAGES, /* Mapped anonymous pages */ |
117 | NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. | |
65ba55f5 | 118 | only modified from process context */ |
347ce434 | 119 | NR_FILE_PAGES, |
b1e7a8fd | 120 | NR_FILE_DIRTY, |
ce866b34 | 121 | NR_WRITEBACK, |
51ed4491 CL |
122 | NR_SLAB_RECLAIMABLE, |
123 | NR_SLAB_UNRECLAIMABLE, | |
124 | NR_PAGETABLE, /* used for pagetables */ | |
c6a7f572 KM |
125 | NR_KERNEL_STACK, |
126 | /* Second 128 byte cacheline */ | |
fd39fc85 | 127 | NR_UNSTABLE_NFS, /* NFS unstable pages */ |
d2c5e30c | 128 | NR_BOUNCE, |
e129b5c2 | 129 | NR_VMSCAN_WRITE, |
49ea7eb6 | 130 | NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */ |
fc3ba692 | 131 | NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */ |
a731286d KM |
132 | NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ |
133 | NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ | |
4b02108a | 134 | NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */ |
ea941f0e MR |
135 | NR_DIRTIED, /* page dirtyings since bootup */ |
136 | NR_WRITTEN, /* page writings since bootup */ | |
ca889e6c CL |
137 | #ifdef CONFIG_NUMA |
138 | NUMA_HIT, /* allocated in intended node */ | |
139 | NUMA_MISS, /* allocated in non intended node */ | |
140 | NUMA_FOREIGN, /* was intended here, hit elsewhere */ | |
141 | NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */ | |
142 | NUMA_LOCAL, /* allocation from local node */ | |
143 | NUMA_OTHER, /* allocation from other node */ | |
144 | #endif | |
a528910e JW |
145 | WORKINGSET_REFAULT, |
146 | WORKINGSET_ACTIVATE, | |
79134171 | 147 | NR_ANON_TRANSPARENT_HUGEPAGES, |
d1ce749a | 148 | NR_FREE_CMA_PAGES, |
2244b95a CL |
149 | NR_VM_ZONE_STAT_ITEMS }; |
150 | ||
4f98a2fe RR |
151 | /* |
152 | * We do arithmetic on the LRU lists in various places in the code, | |
153 | * so it is important to keep the active lists LRU_ACTIVE higher in | |
154 | * the array than the corresponding inactive lists, and to keep | |
155 | * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists. | |
156 | * | |
157 | * This has to be kept in sync with the statistics in zone_stat_item | |
158 | * above and the descriptions in vmstat_text in mm/vmstat.c | |
159 | */ | |
160 | #define LRU_BASE 0 | |
161 | #define LRU_ACTIVE 1 | |
162 | #define LRU_FILE 2 | |
163 | ||
b69408e8 | 164 | enum lru_list { |
4f98a2fe RR |
165 | LRU_INACTIVE_ANON = LRU_BASE, |
166 | LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE, | |
167 | LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE, | |
168 | LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE, | |
894bc310 | 169 | LRU_UNEVICTABLE, |
894bc310 LS |
170 | NR_LRU_LISTS |
171 | }; | |
b69408e8 | 172 | |
4111304d | 173 | #define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++) |
b69408e8 | 174 | |
4111304d | 175 | #define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++) |
894bc310 | 176 | |
4111304d | 177 | static inline int is_file_lru(enum lru_list lru) |
4f98a2fe | 178 | { |
4111304d | 179 | return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE); |
4f98a2fe RR |
180 | } |
181 | ||
4111304d | 182 | static inline int is_active_lru(enum lru_list lru) |
b69408e8 | 183 | { |
4111304d | 184 | return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE); |
b69408e8 CL |
185 | } |
186 | ||
4111304d | 187 | static inline int is_unevictable_lru(enum lru_list lru) |
894bc310 | 188 | { |
4111304d | 189 | return (lru == LRU_UNEVICTABLE); |
894bc310 LS |
190 | } |
191 | ||
89abfab1 HD |
192 | struct zone_reclaim_stat { |
193 | /* | |
194 | * The pageout code in vmscan.c keeps track of how many of the | |
59f91e5d | 195 | * mem/swap backed and file backed pages are referenced. |
89abfab1 HD |
196 | * The higher the rotated/scanned ratio, the more valuable |
197 | * that cache is. | |
198 | * | |
199 | * The anon LRU stats live in [0], file LRU stats in [1] | |
200 | */ | |
201 | unsigned long recent_rotated[2]; | |
202 | unsigned long recent_scanned[2]; | |
203 | }; | |
204 | ||
6290df54 JW |
205 | struct lruvec { |
206 | struct list_head lists[NR_LRU_LISTS]; | |
89abfab1 | 207 | struct zone_reclaim_stat reclaim_stat; |
c255a458 | 208 | #ifdef CONFIG_MEMCG |
7f5e86c2 KK |
209 | struct zone *zone; |
210 | #endif | |
6290df54 JW |
211 | }; |
212 | ||
bb2a0de9 KH |
213 | /* Mask used at gathering information at once (see memcontrol.c) */ |
214 | #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE)) | |
215 | #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON)) | |
bb2a0de9 KH |
216 | #define LRU_ALL ((1 << NR_LRU_LISTS) - 1) |
217 | ||
39deaf85 | 218 | /* Isolate clean file */ |
f3fd4a61 | 219 | #define ISOLATE_CLEAN ((__force isolate_mode_t)0x1) |
f80c0673 | 220 | /* Isolate unmapped file */ |
f3fd4a61 | 221 | #define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2) |
c8244935 | 222 | /* Isolate for asynchronous migration */ |
f3fd4a61 | 223 | #define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4) |
e46a2879 MK |
224 | /* Isolate unevictable pages */ |
225 | #define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8) | |
4356f21d MK |
226 | |
227 | /* LRU Isolation modes. */ | |
228 | typedef unsigned __bitwise__ isolate_mode_t; | |
229 | ||
41858966 MG |
230 | enum zone_watermarks { |
231 | WMARK_MIN, | |
232 | WMARK_LOW, | |
233 | WMARK_HIGH, | |
234 | NR_WMARK | |
235 | }; | |
236 | ||
237 | #define min_wmark_pages(z) (z->watermark[WMARK_MIN]) | |
238 | #define low_wmark_pages(z) (z->watermark[WMARK_LOW]) | |
239 | #define high_wmark_pages(z) (z->watermark[WMARK_HIGH]) | |
240 | ||
1da177e4 LT |
241 | struct per_cpu_pages { |
242 | int count; /* number of pages in the list */ | |
1da177e4 LT |
243 | int high; /* high watermark, emptying needed */ |
244 | int batch; /* chunk size for buddy add/remove */ | |
5f8dcc21 MG |
245 | |
246 | /* Lists of pages, one per migrate type stored on the pcp-lists */ | |
247 | struct list_head lists[MIGRATE_PCPTYPES]; | |
1da177e4 LT |
248 | }; |
249 | ||
250 | struct per_cpu_pageset { | |
3dfa5721 | 251 | struct per_cpu_pages pcp; |
4037d452 CL |
252 | #ifdef CONFIG_NUMA |
253 | s8 expire; | |
254 | #endif | |
2244b95a | 255 | #ifdef CONFIG_SMP |
df9ecaba | 256 | s8 stat_threshold; |
2244b95a CL |
257 | s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; |
258 | #endif | |
99dcc3e5 | 259 | }; |
e7c8d5c9 | 260 | |
97965478 CL |
261 | #endif /* !__GENERATING_BOUNDS.H */ |
262 | ||
2f1b6248 | 263 | enum zone_type { |
4b51d669 | 264 | #ifdef CONFIG_ZONE_DMA |
2f1b6248 CL |
265 | /* |
266 | * ZONE_DMA is used when there are devices that are not able | |
267 | * to do DMA to all of addressable memory (ZONE_NORMAL). Then we | |
268 | * carve out the portion of memory that is needed for these devices. | |
269 | * The range is arch specific. | |
270 | * | |
271 | * Some examples | |
272 | * | |
273 | * Architecture Limit | |
274 | * --------------------------- | |
275 | * parisc, ia64, sparc <4G | |
276 | * s390 <2G | |
2f1b6248 CL |
277 | * arm Various |
278 | * alpha Unlimited or 0-16MB. | |
279 | * | |
280 | * i386, x86_64 and multiple other arches | |
281 | * <16M. | |
282 | */ | |
283 | ZONE_DMA, | |
4b51d669 | 284 | #endif |
fb0e7942 | 285 | #ifdef CONFIG_ZONE_DMA32 |
2f1b6248 CL |
286 | /* |
287 | * x86_64 needs two ZONE_DMAs because it supports devices that are | |
288 | * only able to do DMA to the lower 16M but also 32 bit devices that | |
289 | * can only do DMA areas below 4G. | |
290 | */ | |
291 | ZONE_DMA32, | |
fb0e7942 | 292 | #endif |
2f1b6248 CL |
293 | /* |
294 | * Normal addressable memory is in ZONE_NORMAL. DMA operations can be | |
295 | * performed on pages in ZONE_NORMAL if the DMA devices support | |
296 | * transfers to all addressable memory. | |
297 | */ | |
298 | ZONE_NORMAL, | |
e53ef38d | 299 | #ifdef CONFIG_HIGHMEM |
2f1b6248 CL |
300 | /* |
301 | * A memory area that is only addressable by the kernel through | |
302 | * mapping portions into its own address space. This is for example | |
303 | * used by i386 to allow the kernel to address the memory beyond | |
304 | * 900MB. The kernel will set up special mappings (page | |
305 | * table entries on i386) for each page that the kernel needs to | |
306 | * access. | |
307 | */ | |
308 | ZONE_HIGHMEM, | |
e53ef38d | 309 | #endif |
2a1e274a | 310 | ZONE_MOVABLE, |
97965478 | 311 | __MAX_NR_ZONES |
2f1b6248 | 312 | }; |
1da177e4 | 313 | |
97965478 CL |
314 | #ifndef __GENERATING_BOUNDS_H |
315 | ||
1da177e4 LT |
316 | struct zone { |
317 | /* Fields commonly accessed by the page allocator */ | |
41858966 MG |
318 | |
319 | /* zone watermarks, access with *_wmark_pages(zone) macros */ | |
320 | unsigned long watermark[NR_WMARK]; | |
321 | ||
aa454840 CL |
322 | /* |
323 | * When free pages are below this point, additional steps are taken | |
324 | * when reading the number of free pages to avoid per-cpu counter | |
325 | * drift allowing watermarks to be breached | |
326 | */ | |
327 | unsigned long percpu_drift_mark; | |
328 | ||
1da177e4 LT |
329 | /* |
330 | * We don't know if the memory that we're going to allocate will be freeable | |
331 | * or/and it will be released eventually, so to avoid totally wasting several | |
332 | * GB of ram we must reserve some of the lower zone memory (otherwise we risk | |
333 | * to run OOM on the lower zones despite there's tons of freeable ram | |
334 | * on the higher zones). This array is recalculated at runtime if the | |
335 | * sysctl_lowmem_reserve_ratio sysctl changes. | |
336 | */ | |
337 | unsigned long lowmem_reserve[MAX_NR_ZONES]; | |
338 | ||
ab8fabd4 JW |
339 | /* |
340 | * This is a per-zone reserve of pages that should not be | |
341 | * considered dirtyable memory. | |
342 | */ | |
343 | unsigned long dirty_balance_reserve; | |
344 | ||
e7c8d5c9 | 345 | #ifdef CONFIG_NUMA |
d5f541ed | 346 | int node; |
9614634f CL |
347 | /* |
348 | * zone reclaim becomes active if more unmapped pages exist. | |
349 | */ | |
8417bba4 | 350 | unsigned long min_unmapped_pages; |
0ff38490 | 351 | unsigned long min_slab_pages; |
e7c8d5c9 | 352 | #endif |
43cf38eb | 353 | struct per_cpu_pageset __percpu *pageset; |
1da177e4 LT |
354 | /* |
355 | * free areas of different sizes | |
356 | */ | |
357 | spinlock_t lock; | |
bb13ffeb | 358 | #if defined CONFIG_COMPACTION || defined CONFIG_CMA |
62997027 MG |
359 | /* Set to true when the PG_migrate_skip bits should be cleared */ |
360 | bool compact_blockskip_flush; | |
c89511ab MG |
361 | |
362 | /* pfns where compaction scanners should start */ | |
363 | unsigned long compact_cached_free_pfn; | |
364 | unsigned long compact_cached_migrate_pfn; | |
bb13ffeb | 365 | #endif |
bdc8cb98 DH |
366 | #ifdef CONFIG_MEMORY_HOTPLUG |
367 | /* see spanned/present_pages for more description */ | |
368 | seqlock_t span_seqlock; | |
369 | #endif | |
1da177e4 LT |
370 | struct free_area free_area[MAX_ORDER]; |
371 | ||
835c134e MG |
372 | #ifndef CONFIG_SPARSEMEM |
373 | /* | |
d9c23400 | 374 | * Flags for a pageblock_nr_pages block. See pageblock-flags.h. |
835c134e MG |
375 | * In SPARSEMEM, this map is stored in struct mem_section |
376 | */ | |
377 | unsigned long *pageblock_flags; | |
378 | #endif /* CONFIG_SPARSEMEM */ | |
379 | ||
4f92e258 MG |
380 | #ifdef CONFIG_COMPACTION |
381 | /* | |
382 | * On compaction failure, 1<<compact_defer_shift compactions | |
383 | * are skipped before trying again. The number attempted since | |
384 | * last failure is tracked with compact_considered. | |
385 | */ | |
386 | unsigned int compact_considered; | |
387 | unsigned int compact_defer_shift; | |
aff62249 | 388 | int compact_order_failed; |
4f92e258 | 389 | #endif |
1da177e4 LT |
390 | |
391 | ZONE_PADDING(_pad1_) | |
392 | ||
393 | /* Fields commonly accessed by the page reclaim scanner */ | |
6290df54 JW |
394 | spinlock_t lru_lock; |
395 | struct lruvec lruvec; | |
4f98a2fe | 396 | |
a528910e JW |
397 | /* Evictions & activations on the inactive file list */ |
398 | atomic_long_t inactive_age; | |
399 | ||
1da177e4 | 400 | unsigned long pages_scanned; /* since last reclaim */ |
e815af95 | 401 | unsigned long flags; /* zone flags, see below */ |
753ee728 | 402 | |
2244b95a CL |
403 | /* Zone statistics */ |
404 | atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; | |
9eeff239 | 405 | |
556adecb RR |
406 | /* |
407 | * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on | |
408 | * this zone's LRU. Maintained by the pageout code. | |
409 | */ | |
410 | unsigned int inactive_ratio; | |
411 | ||
1da177e4 LT |
412 | |
413 | ZONE_PADDING(_pad2_) | |
414 | /* Rarely used or read-mostly fields */ | |
415 | ||
416 | /* | |
417 | * wait_table -- the array holding the hash table | |
02b694de | 418 | * wait_table_hash_nr_entries -- the size of the hash table array |
1da177e4 LT |
419 | * wait_table_bits -- wait_table_size == (1 << wait_table_bits) |
420 | * | |
421 | * The purpose of all these is to keep track of the people | |
422 | * waiting for a page to become available and make them | |
423 | * runnable again when possible. The trouble is that this | |
424 | * consumes a lot of space, especially when so few things | |
425 | * wait on pages at a given time. So instead of using | |
426 | * per-page waitqueues, we use a waitqueue hash table. | |
427 | * | |
428 | * The bucket discipline is to sleep on the same queue when | |
429 | * colliding and wake all in that wait queue when removing. | |
430 | * When something wakes, it must check to be sure its page is | |
431 | * truly available, a la thundering herd. The cost of a | |
432 | * collision is great, but given the expected load of the | |
433 | * table, they should be so rare as to be outweighed by the | |
434 | * benefits from the saved space. | |
435 | * | |
436 | * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the | |
437 | * primary users of these fields, and in mm/page_alloc.c | |
438 | * free_area_init_core() performs the initialization of them. | |
439 | */ | |
440 | wait_queue_head_t * wait_table; | |
02b694de | 441 | unsigned long wait_table_hash_nr_entries; |
1da177e4 LT |
442 | unsigned long wait_table_bits; |
443 | ||
444 | /* | |
445 | * Discontig memory support fields. | |
446 | */ | |
447 | struct pglist_data *zone_pgdat; | |
1da177e4 LT |
448 | /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ |
449 | unsigned long zone_start_pfn; | |
450 | ||
bdc8cb98 | 451 | /* |
9feedc9d JL |
452 | * spanned_pages is the total pages spanned by the zone, including |
453 | * holes, which is calculated as: | |
454 | * spanned_pages = zone_end_pfn - zone_start_pfn; | |
bdc8cb98 | 455 | * |
9feedc9d JL |
456 | * present_pages is physical pages existing within the zone, which |
457 | * is calculated as: | |
8761e31c | 458 | * present_pages = spanned_pages - absent_pages(pages in holes); |
9feedc9d JL |
459 | * |
460 | * managed_pages is present pages managed by the buddy system, which | |
461 | * is calculated as (reserved_pages includes pages allocated by the | |
462 | * bootmem allocator): | |
463 | * managed_pages = present_pages - reserved_pages; | |
464 | * | |
465 | * So present_pages may be used by memory hotplug or memory power | |
466 | * management logic to figure out unmanaged pages by checking | |
467 | * (present_pages - managed_pages). And managed_pages should be used | |
468 | * by page allocator and vm scanner to calculate all kinds of watermarks | |
469 | * and thresholds. | |
470 | * | |
471 | * Locking rules: | |
472 | * | |
473 | * zone_start_pfn and spanned_pages are protected by span_seqlock. | |
474 | * It is a seqlock because it has to be read outside of zone->lock, | |
475 | * and it is done in the main allocator path. But, it is written | |
476 | * quite infrequently. | |
477 | * | |
478 | * The span_seq lock is declared along with zone->lock because it is | |
bdc8cb98 DH |
479 | * frequently read in proximity to zone->lock. It's good to |
480 | * give them a chance of being in the same cacheline. | |
9feedc9d | 481 | * |
c3d5f5f0 JL |
482 | * Write access to present_pages at runtime should be protected by |
483 | * lock_memory_hotplug()/unlock_memory_hotplug(). Any reader who can't | |
484 | * tolerant drift of present_pages should hold memory hotplug lock to | |
485 | * get a stable value. | |
486 | * | |
487 | * Read access to managed_pages should be safe because it's unsigned | |
488 | * long. Write access to zone->managed_pages and totalram_pages are | |
489 | * protected by managed_page_count_lock at runtime. Idealy only | |
490 | * adjust_managed_page_count() should be used instead of directly | |
491 | * touching zone->managed_pages and totalram_pages. | |
bdc8cb98 | 492 | */ |
9feedc9d JL |
493 | unsigned long spanned_pages; |
494 | unsigned long present_pages; | |
495 | unsigned long managed_pages; | |
1da177e4 | 496 | |
943dca1a YI |
497 | /* |
498 | * Number of MIGRATE_RESEVE page block. To maintain for just | |
499 | * optimization. Protected by zone->lock. | |
500 | */ | |
501 | int nr_migrate_reserve_block; | |
502 | ||
1da177e4 LT |
503 | /* |
504 | * rarely used fields: | |
505 | */ | |
15ad7cdc | 506 | const char *name; |
22fc6ecc | 507 | } ____cacheline_internodealigned_in_smp; |
1da177e4 | 508 | |
e815af95 | 509 | typedef enum { |
e815af95 | 510 | ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */ |
098d7f12 | 511 | ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */ |
0e093d99 MG |
512 | ZONE_CONGESTED, /* zone has many dirty pages backed by |
513 | * a congested BDI | |
514 | */ | |
d43006d5 MG |
515 | ZONE_TAIL_LRU_DIRTY, /* reclaim scanning has recently found |
516 | * many dirty file pages at the tail | |
517 | * of the LRU. | |
518 | */ | |
283aba9f MG |
519 | ZONE_WRITEBACK, /* reclaim scanning has recently found |
520 | * many pages under writeback | |
521 | */ | |
e815af95 DR |
522 | } zone_flags_t; |
523 | ||
524 | static inline void zone_set_flag(struct zone *zone, zone_flags_t flag) | |
525 | { | |
526 | set_bit(flag, &zone->flags); | |
527 | } | |
d773ed6b DR |
528 | |
529 | static inline int zone_test_and_set_flag(struct zone *zone, zone_flags_t flag) | |
530 | { | |
531 | return test_and_set_bit(flag, &zone->flags); | |
532 | } | |
533 | ||
e815af95 DR |
534 | static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag) |
535 | { | |
536 | clear_bit(flag, &zone->flags); | |
537 | } | |
538 | ||
0e093d99 MG |
539 | static inline int zone_is_reclaim_congested(const struct zone *zone) |
540 | { | |
541 | return test_bit(ZONE_CONGESTED, &zone->flags); | |
542 | } | |
543 | ||
d43006d5 MG |
544 | static inline int zone_is_reclaim_dirty(const struct zone *zone) |
545 | { | |
546 | return test_bit(ZONE_TAIL_LRU_DIRTY, &zone->flags); | |
547 | } | |
548 | ||
283aba9f MG |
549 | static inline int zone_is_reclaim_writeback(const struct zone *zone) |
550 | { | |
551 | return test_bit(ZONE_WRITEBACK, &zone->flags); | |
552 | } | |
553 | ||
e815af95 DR |
554 | static inline int zone_is_reclaim_locked(const struct zone *zone) |
555 | { | |
556 | return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags); | |
557 | } | |
d773ed6b | 558 | |
098d7f12 DR |
559 | static inline int zone_is_oom_locked(const struct zone *zone) |
560 | { | |
561 | return test_bit(ZONE_OOM_LOCKED, &zone->flags); | |
562 | } | |
e815af95 | 563 | |
f9228b20 | 564 | static inline unsigned long zone_end_pfn(const struct zone *zone) |
108bcc96 CS |
565 | { |
566 | return zone->zone_start_pfn + zone->spanned_pages; | |
567 | } | |
568 | ||
569 | static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn) | |
570 | { | |
571 | return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone); | |
572 | } | |
573 | ||
2a6e3ebe CS |
574 | static inline bool zone_is_initialized(struct zone *zone) |
575 | { | |
576 | return !!zone->wait_table; | |
577 | } | |
578 | ||
579 | static inline bool zone_is_empty(struct zone *zone) | |
580 | { | |
581 | return zone->spanned_pages == 0; | |
582 | } | |
583 | ||
1da177e4 LT |
584 | /* |
585 | * The "priority" of VM scanning is how much of the queues we will scan in one | |
586 | * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the | |
587 | * queues ("queue_length >> 12") during an aging round. | |
588 | */ | |
589 | #define DEF_PRIORITY 12 | |
590 | ||
9276b1bc PJ |
591 | /* Maximum number of zones on a zonelist */ |
592 | #define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES) | |
593 | ||
594 | #ifdef CONFIG_NUMA | |
523b9458 CL |
595 | |
596 | /* | |
25a64ec1 | 597 | * The NUMA zonelists are doubled because we need zonelists that restrict the |
e97ca8e5 | 598 | * allocations to a single node for __GFP_THISNODE. |
523b9458 | 599 | * |
54a6eb5c | 600 | * [0] : Zonelist with fallback |
e97ca8e5 | 601 | * [1] : No fallback (__GFP_THISNODE) |
523b9458 | 602 | */ |
54a6eb5c | 603 | #define MAX_ZONELISTS 2 |
523b9458 CL |
604 | |
605 | ||
9276b1bc PJ |
606 | /* |
607 | * We cache key information from each zonelist for smaller cache | |
608 | * footprint when scanning for free pages in get_page_from_freelist(). | |
609 | * | |
610 | * 1) The BITMAP fullzones tracks which zones in a zonelist have come | |
611 | * up short of free memory since the last time (last_fullzone_zap) | |
612 | * we zero'd fullzones. | |
613 | * 2) The array z_to_n[] maps each zone in the zonelist to its node | |
614 | * id, so that we can efficiently evaluate whether that node is | |
615 | * set in the current tasks mems_allowed. | |
616 | * | |
617 | * Both fullzones and z_to_n[] are one-to-one with the zonelist, | |
618 | * indexed by a zones offset in the zonelist zones[] array. | |
619 | * | |
620 | * The get_page_from_freelist() routine does two scans. During the | |
621 | * first scan, we skip zones whose corresponding bit in 'fullzones' | |
622 | * is set or whose corresponding node in current->mems_allowed (which | |
623 | * comes from cpusets) is not set. During the second scan, we bypass | |
624 | * this zonelist_cache, to ensure we look methodically at each zone. | |
625 | * | |
626 | * Once per second, we zero out (zap) fullzones, forcing us to | |
627 | * reconsider nodes that might have regained more free memory. | |
628 | * The field last_full_zap is the time we last zapped fullzones. | |
629 | * | |
630 | * This mechanism reduces the amount of time we waste repeatedly | |
631 | * reexaming zones for free memory when they just came up low on | |
632 | * memory momentarilly ago. | |
633 | * | |
634 | * The zonelist_cache struct members logically belong in struct | |
635 | * zonelist. However, the mempolicy zonelists constructed for | |
636 | * MPOL_BIND are intentionally variable length (and usually much | |
637 | * shorter). A general purpose mechanism for handling structs with | |
638 | * multiple variable length members is more mechanism than we want | |
639 | * here. We resort to some special case hackery instead. | |
640 | * | |
641 | * The MPOL_BIND zonelists don't need this zonelist_cache (in good | |
642 | * part because they are shorter), so we put the fixed length stuff | |
643 | * at the front of the zonelist struct, ending in a variable length | |
644 | * zones[], as is needed by MPOL_BIND. | |
645 | * | |
646 | * Then we put the optional zonelist cache on the end of the zonelist | |
647 | * struct. This optional stuff is found by a 'zlcache_ptr' pointer in | |
648 | * the fixed length portion at the front of the struct. This pointer | |
649 | * both enables us to find the zonelist cache, and in the case of | |
650 | * MPOL_BIND zonelists, (which will just set the zlcache_ptr to NULL) | |
651 | * to know that the zonelist cache is not there. | |
652 | * | |
653 | * The end result is that struct zonelists come in two flavors: | |
654 | * 1) The full, fixed length version, shown below, and | |
655 | * 2) The custom zonelists for MPOL_BIND. | |
656 | * The custom MPOL_BIND zonelists have a NULL zlcache_ptr and no zlcache. | |
657 | * | |
658 | * Even though there may be multiple CPU cores on a node modifying | |
659 | * fullzones or last_full_zap in the same zonelist_cache at the same | |
660 | * time, we don't lock it. This is just hint data - if it is wrong now | |
661 | * and then, the allocator will still function, perhaps a bit slower. | |
662 | */ | |
663 | ||
664 | ||
665 | struct zonelist_cache { | |
9276b1bc | 666 | unsigned short z_to_n[MAX_ZONES_PER_ZONELIST]; /* zone->nid */ |
7253f4ef | 667 | DECLARE_BITMAP(fullzones, MAX_ZONES_PER_ZONELIST); /* zone full? */ |
9276b1bc PJ |
668 | unsigned long last_full_zap; /* when last zap'd (jiffies) */ |
669 | }; | |
670 | #else | |
54a6eb5c | 671 | #define MAX_ZONELISTS 1 |
9276b1bc PJ |
672 | struct zonelist_cache; |
673 | #endif | |
674 | ||
dd1a239f MG |
675 | /* |
676 | * This struct contains information about a zone in a zonelist. It is stored | |
677 | * here to avoid dereferences into large structures and lookups of tables | |
678 | */ | |
679 | struct zoneref { | |
680 | struct zone *zone; /* Pointer to actual zone */ | |
681 | int zone_idx; /* zone_idx(zoneref->zone) */ | |
682 | }; | |
683 | ||
1da177e4 LT |
684 | /* |
685 | * One allocation request operates on a zonelist. A zonelist | |
686 | * is a list of zones, the first one is the 'goal' of the | |
687 | * allocation, the other zones are fallback zones, in decreasing | |
688 | * priority. | |
689 | * | |
9276b1bc PJ |
690 | * If zlcache_ptr is not NULL, then it is just the address of zlcache, |
691 | * as explained above. If zlcache_ptr is NULL, there is no zlcache. | |
dd1a239f MG |
692 | * * |
693 | * To speed the reading of the zonelist, the zonerefs contain the zone index | |
694 | * of the entry being read. Helper functions to access information given | |
695 | * a struct zoneref are | |
696 | * | |
697 | * zonelist_zone() - Return the struct zone * for an entry in _zonerefs | |
698 | * zonelist_zone_idx() - Return the index of the zone for an entry | |
699 | * zonelist_node_idx() - Return the index of the node for an entry | |
1da177e4 LT |
700 | */ |
701 | struct zonelist { | |
9276b1bc | 702 | struct zonelist_cache *zlcache_ptr; // NULL or &zlcache |
dd1a239f | 703 | struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1]; |
9276b1bc PJ |
704 | #ifdef CONFIG_NUMA |
705 | struct zonelist_cache zlcache; // optional ... | |
706 | #endif | |
1da177e4 LT |
707 | }; |
708 | ||
0ee332c1 | 709 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP |
c713216d MG |
710 | struct node_active_region { |
711 | unsigned long start_pfn; | |
712 | unsigned long end_pfn; | |
713 | int nid; | |
714 | }; | |
0ee332c1 | 715 | #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ |
1da177e4 | 716 | |
5b99cd0e HC |
717 | #ifndef CONFIG_DISCONTIGMEM |
718 | /* The array of struct pages - for discontigmem use pgdat->lmem_map */ | |
719 | extern struct page *mem_map; | |
720 | #endif | |
721 | ||
1da177e4 LT |
722 | /* |
723 | * The pg_data_t structure is used in machines with CONFIG_DISCONTIGMEM | |
724 | * (mostly NUMA machines?) to denote a higher-level memory zone than the | |
725 | * zone denotes. | |
726 | * | |
727 | * On NUMA machines, each NUMA node would have a pg_data_t to describe | |
728 | * it's memory layout. | |
729 | * | |
730 | * Memory statistics and page replacement data structures are maintained on a | |
731 | * per-zone basis. | |
732 | */ | |
733 | struct bootmem_data; | |
734 | typedef struct pglist_data { | |
735 | struct zone node_zones[MAX_NR_ZONES]; | |
523b9458 | 736 | struct zonelist node_zonelists[MAX_ZONELISTS]; |
1da177e4 | 737 | int nr_zones; |
52d4b9ac | 738 | #ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */ |
1da177e4 | 739 | struct page *node_mem_map; |
c255a458 | 740 | #ifdef CONFIG_MEMCG |
52d4b9ac KH |
741 | struct page_cgroup *node_page_cgroup; |
742 | #endif | |
d41dee36 | 743 | #endif |
08677214 | 744 | #ifndef CONFIG_NO_BOOTMEM |
1da177e4 | 745 | struct bootmem_data *bdata; |
08677214 | 746 | #endif |
208d54e5 DH |
747 | #ifdef CONFIG_MEMORY_HOTPLUG |
748 | /* | |
749 | * Must be held any time you expect node_start_pfn, node_present_pages | |
750 | * or node_spanned_pages stay constant. Holding this will also | |
751 | * guarantee that any pfn_valid() stays that way. | |
752 | * | |
114d4b79 CS |
753 | * pgdat_resize_lock() and pgdat_resize_unlock() are provided to |
754 | * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG. | |
755 | * | |
72c3b51b | 756 | * Nests above zone->lock and zone->span_seqlock |
208d54e5 DH |
757 | */ |
758 | spinlock_t node_size_lock; | |
759 | #endif | |
1da177e4 LT |
760 | unsigned long node_start_pfn; |
761 | unsigned long node_present_pages; /* total number of physical pages */ | |
762 | unsigned long node_spanned_pages; /* total size of physical page | |
763 | range, including holes */ | |
764 | int node_id; | |
957f822a | 765 | nodemask_t reclaim_nodes; /* Nodes allowed to reclaim from */ |
1da177e4 | 766 | wait_queue_head_t kswapd_wait; |
5515061d | 767 | wait_queue_head_t pfmemalloc_wait; |
d8adde17 | 768 | struct task_struct *kswapd; /* Protected by lock_memory_hotplug() */ |
1da177e4 | 769 | int kswapd_max_order; |
99504748 | 770 | enum zone_type classzone_idx; |
8177a420 | 771 | #ifdef CONFIG_NUMA_BALANCING |
1c5e9c27 | 772 | /* Lock serializing the migrate rate limiting window */ |
8177a420 AA |
773 | spinlock_t numabalancing_migrate_lock; |
774 | ||
775 | /* Rate limiting time interval */ | |
776 | unsigned long numabalancing_migrate_next_window; | |
777 | ||
778 | /* Number of pages migrated during the rate limiting time interval */ | |
779 | unsigned long numabalancing_migrate_nr_pages; | |
780 | #endif | |
1da177e4 LT |
781 | } pg_data_t; |
782 | ||
783 | #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) | |
784 | #define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages) | |
d41dee36 | 785 | #ifdef CONFIG_FLAT_NODE_MEM_MAP |
408fde81 | 786 | #define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr)) |
d41dee36 AW |
787 | #else |
788 | #define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr)) | |
789 | #endif | |
408fde81 | 790 | #define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr)) |
1da177e4 | 791 | |
c6830c22 | 792 | #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) |
da3649e1 | 793 | #define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid)) |
c6830c22 | 794 | |
da3649e1 CS |
795 | static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat) |
796 | { | |
797 | return pgdat->node_start_pfn + pgdat->node_spanned_pages; | |
798 | } | |
799 | ||
800 | static inline bool pgdat_is_empty(pg_data_t *pgdat) | |
801 | { | |
802 | return !pgdat->node_start_pfn && !pgdat->node_spanned_pages; | |
803 | } | |
c6830c22 | 804 | |
208d54e5 DH |
805 | #include <linux/memory_hotplug.h> |
806 | ||
4eaf3f64 | 807 | extern struct mutex zonelists_mutex; |
9adb62a5 | 808 | void build_all_zonelists(pg_data_t *pgdat, struct zone *zone); |
99504748 | 809 | void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx); |
88f5acf8 MG |
810 | bool zone_watermark_ok(struct zone *z, int order, unsigned long mark, |
811 | int classzone_idx, int alloc_flags); | |
812 | bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark, | |
7fb1d9fc | 813 | int classzone_idx, int alloc_flags); |
a2f3aa02 DH |
814 | enum memmap_context { |
815 | MEMMAP_EARLY, | |
816 | MEMMAP_HOTPLUG, | |
817 | }; | |
718127cc | 818 | extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, |
a2f3aa02 DH |
819 | unsigned long size, |
820 | enum memmap_context context); | |
718127cc | 821 | |
bea8c150 | 822 | extern void lruvec_init(struct lruvec *lruvec); |
7f5e86c2 KK |
823 | |
824 | static inline struct zone *lruvec_zone(struct lruvec *lruvec) | |
825 | { | |
c255a458 | 826 | #ifdef CONFIG_MEMCG |
7f5e86c2 KK |
827 | return lruvec->zone; |
828 | #else | |
829 | return container_of(lruvec, struct zone, lruvec); | |
830 | #endif | |
831 | } | |
832 | ||
1da177e4 LT |
833 | #ifdef CONFIG_HAVE_MEMORY_PRESENT |
834 | void memory_present(int nid, unsigned long start, unsigned long end); | |
835 | #else | |
836 | static inline void memory_present(int nid, unsigned long start, unsigned long end) {} | |
837 | #endif | |
838 | ||
7aac7898 LS |
839 | #ifdef CONFIG_HAVE_MEMORYLESS_NODES |
840 | int local_memory_node(int node_id); | |
841 | #else | |
842 | static inline int local_memory_node(int node_id) { return node_id; }; | |
843 | #endif | |
844 | ||
1da177e4 LT |
845 | #ifdef CONFIG_NEED_NODE_MEMMAP_SIZE |
846 | unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); | |
847 | #endif | |
848 | ||
849 | /* | |
850 | * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc. | |
851 | */ | |
852 | #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) | |
853 | ||
f3fe6512 CK |
854 | static inline int populated_zone(struct zone *zone) |
855 | { | |
856 | return (!!zone->present_pages); | |
857 | } | |
858 | ||
2a1e274a MG |
859 | extern int movable_zone; |
860 | ||
861 | static inline int zone_movable_is_highmem(void) | |
862 | { | |
fe03025d | 863 | #if defined(CONFIG_HIGHMEM) && defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) |
2a1e274a MG |
864 | return movable_zone == ZONE_HIGHMEM; |
865 | #else | |
866 | return 0; | |
867 | #endif | |
868 | } | |
869 | ||
2f1b6248 | 870 | static inline int is_highmem_idx(enum zone_type idx) |
1da177e4 | 871 | { |
e53ef38d | 872 | #ifdef CONFIG_HIGHMEM |
2a1e274a MG |
873 | return (idx == ZONE_HIGHMEM || |
874 | (idx == ZONE_MOVABLE && zone_movable_is_highmem())); | |
e53ef38d CL |
875 | #else |
876 | return 0; | |
877 | #endif | |
1da177e4 LT |
878 | } |
879 | ||
1da177e4 LT |
880 | /** |
881 | * is_highmem - helper function to quickly check if a struct zone is a | |
882 | * highmem zone or not. This is an attempt to keep references | |
883 | * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum. | |
884 | * @zone - pointer to struct zone variable | |
885 | */ | |
886 | static inline int is_highmem(struct zone *zone) | |
887 | { | |
e53ef38d | 888 | #ifdef CONFIG_HIGHMEM |
ddc81ed2 HH |
889 | int zone_off = (char *)zone - (char *)zone->zone_pgdat->node_zones; |
890 | return zone_off == ZONE_HIGHMEM * sizeof(*zone) || | |
891 | (zone_off == ZONE_MOVABLE * sizeof(*zone) && | |
892 | zone_movable_is_highmem()); | |
e53ef38d CL |
893 | #else |
894 | return 0; | |
895 | #endif | |
1da177e4 LT |
896 | } |
897 | ||
1da177e4 LT |
898 | /* These two functions are used to setup the per zone pages min values */ |
899 | struct ctl_table; | |
8d65af78 | 900 | int min_free_kbytes_sysctl_handler(struct ctl_table *, int, |
1da177e4 LT |
901 | void __user *, size_t *, loff_t *); |
902 | extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1]; | |
8d65af78 | 903 | int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, |
1da177e4 | 904 | void __user *, size_t *, loff_t *); |
8d65af78 | 905 | int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, |
8ad4b1fb | 906 | void __user *, size_t *, loff_t *); |
9614634f | 907 | int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, |
8d65af78 | 908 | void __user *, size_t *, loff_t *); |
0ff38490 | 909 | int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int, |
8d65af78 | 910 | void __user *, size_t *, loff_t *); |
1da177e4 | 911 | |
f0c0b2b8 | 912 | extern int numa_zonelist_order_handler(struct ctl_table *, int, |
8d65af78 | 913 | void __user *, size_t *, loff_t *); |
f0c0b2b8 KH |
914 | extern char numa_zonelist_order[]; |
915 | #define NUMA_ZONELIST_ORDER_LEN 16 /* string buffer size */ | |
916 | ||
93b7504e | 917 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
1da177e4 LT |
918 | |
919 | extern struct pglist_data contig_page_data; | |
920 | #define NODE_DATA(nid) (&contig_page_data) | |
921 | #define NODE_MEM_MAP(nid) mem_map | |
1da177e4 | 922 | |
93b7504e | 923 | #else /* CONFIG_NEED_MULTIPLE_NODES */ |
1da177e4 LT |
924 | |
925 | #include <asm/mmzone.h> | |
926 | ||
93b7504e | 927 | #endif /* !CONFIG_NEED_MULTIPLE_NODES */ |
348f8b6c | 928 | |
95144c78 KH |
929 | extern struct pglist_data *first_online_pgdat(void); |
930 | extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat); | |
931 | extern struct zone *next_zone(struct zone *zone); | |
8357f869 KH |
932 | |
933 | /** | |
12d15f0d | 934 | * for_each_online_pgdat - helper macro to iterate over all online nodes |
8357f869 KH |
935 | * @pgdat - pointer to a pg_data_t variable |
936 | */ | |
937 | #define for_each_online_pgdat(pgdat) \ | |
938 | for (pgdat = first_online_pgdat(); \ | |
939 | pgdat; \ | |
940 | pgdat = next_online_pgdat(pgdat)) | |
8357f869 KH |
941 | /** |
942 | * for_each_zone - helper macro to iterate over all memory zones | |
943 | * @zone - pointer to struct zone variable | |
944 | * | |
945 | * The user only needs to declare the zone variable, for_each_zone | |
946 | * fills it in. | |
947 | */ | |
948 | #define for_each_zone(zone) \ | |
949 | for (zone = (first_online_pgdat())->node_zones; \ | |
950 | zone; \ | |
951 | zone = next_zone(zone)) | |
952 | ||
ee99c71c KM |
953 | #define for_each_populated_zone(zone) \ |
954 | for (zone = (first_online_pgdat())->node_zones; \ | |
955 | zone; \ | |
956 | zone = next_zone(zone)) \ | |
957 | if (!populated_zone(zone)) \ | |
958 | ; /* do nothing */ \ | |
959 | else | |
960 | ||
dd1a239f MG |
961 | static inline struct zone *zonelist_zone(struct zoneref *zoneref) |
962 | { | |
963 | return zoneref->zone; | |
964 | } | |
965 | ||
966 | static inline int zonelist_zone_idx(struct zoneref *zoneref) | |
967 | { | |
968 | return zoneref->zone_idx; | |
969 | } | |
970 | ||
971 | static inline int zonelist_node_idx(struct zoneref *zoneref) | |
972 | { | |
973 | #ifdef CONFIG_NUMA | |
974 | /* zone_to_nid not available in this context */ | |
975 | return zoneref->zone->node; | |
976 | #else | |
977 | return 0; | |
978 | #endif /* CONFIG_NUMA */ | |
979 | } | |
980 | ||
19770b32 MG |
981 | /** |
982 | * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point | |
983 | * @z - The cursor used as a starting point for the search | |
984 | * @highest_zoneidx - The zone index of the highest zone to return | |
985 | * @nodes - An optional nodemask to filter the zonelist with | |
986 | * @zone - The first suitable zone found is returned via this parameter | |
987 | * | |
988 | * This function returns the next zone at or below a given zone index that is | |
989 | * within the allowed nodemask using a cursor as the starting point for the | |
5bead2a0 MG |
990 | * search. The zoneref returned is a cursor that represents the current zone |
991 | * being examined. It should be advanced by one before calling | |
992 | * next_zones_zonelist again. | |
19770b32 MG |
993 | */ |
994 | struct zoneref *next_zones_zonelist(struct zoneref *z, | |
995 | enum zone_type highest_zoneidx, | |
996 | nodemask_t *nodes, | |
997 | struct zone **zone); | |
dd1a239f | 998 | |
19770b32 MG |
999 | /** |
1000 | * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist | |
1001 | * @zonelist - The zonelist to search for a suitable zone | |
1002 | * @highest_zoneidx - The zone index of the highest zone to return | |
1003 | * @nodes - An optional nodemask to filter the zonelist with | |
1004 | * @zone - The first suitable zone found is returned via this parameter | |
1005 | * | |
1006 | * This function returns the first zone at or below a given zone index that is | |
1007 | * within the allowed nodemask. The zoneref returned is a cursor that can be | |
5bead2a0 MG |
1008 | * used to iterate the zonelist with next_zones_zonelist by advancing it by |
1009 | * one before calling. | |
19770b32 | 1010 | */ |
dd1a239f | 1011 | static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, |
19770b32 MG |
1012 | enum zone_type highest_zoneidx, |
1013 | nodemask_t *nodes, | |
1014 | struct zone **zone) | |
54a6eb5c | 1015 | { |
19770b32 MG |
1016 | return next_zones_zonelist(zonelist->_zonerefs, highest_zoneidx, nodes, |
1017 | zone); | |
54a6eb5c MG |
1018 | } |
1019 | ||
19770b32 MG |
1020 | /** |
1021 | * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask | |
1022 | * @zone - The current zone in the iterator | |
1023 | * @z - The current pointer within zonelist->zones being iterated | |
1024 | * @zlist - The zonelist being iterated | |
1025 | * @highidx - The zone index of the highest zone to return | |
1026 | * @nodemask - Nodemask allowed by the allocator | |
1027 | * | |
1028 | * This iterator iterates though all zones at or below a given zone index and | |
1029 | * within a given nodemask | |
1030 | */ | |
1031 | #define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \ | |
1032 | for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone); \ | |
1033 | zone; \ | |
5bead2a0 | 1034 | z = next_zones_zonelist(++z, highidx, nodemask, &zone)) \ |
54a6eb5c MG |
1035 | |
1036 | /** | |
1037 | * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index | |
1038 | * @zone - The current zone in the iterator | |
1039 | * @z - The current pointer within zonelist->zones being iterated | |
1040 | * @zlist - The zonelist being iterated | |
1041 | * @highidx - The zone index of the highest zone to return | |
1042 | * | |
1043 | * This iterator iterates though all zones at or below a given zone index. | |
1044 | */ | |
1045 | #define for_each_zone_zonelist(zone, z, zlist, highidx) \ | |
19770b32 | 1046 | for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL) |
54a6eb5c | 1047 | |
d41dee36 AW |
1048 | #ifdef CONFIG_SPARSEMEM |
1049 | #include <asm/sparsemem.h> | |
1050 | #endif | |
1051 | ||
c713216d | 1052 | #if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \ |
0ee332c1 | 1053 | !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) |
b4544568 AM |
1054 | static inline unsigned long early_pfn_to_nid(unsigned long pfn) |
1055 | { | |
1056 | return 0; | |
1057 | } | |
b159d43f AW |
1058 | #endif |
1059 | ||
2bdaf115 AW |
1060 | #ifdef CONFIG_FLATMEM |
1061 | #define pfn_to_nid(pfn) (0) | |
1062 | #endif | |
1063 | ||
d41dee36 AW |
1064 | #ifdef CONFIG_SPARSEMEM |
1065 | ||
1066 | /* | |
1067 | * SECTION_SHIFT #bits space required to store a section # | |
1068 | * | |
1069 | * PA_SECTION_SHIFT physical address to/from section number | |
1070 | * PFN_SECTION_SHIFT pfn to/from section number | |
1071 | */ | |
d41dee36 AW |
1072 | #define PA_SECTION_SHIFT (SECTION_SIZE_BITS) |
1073 | #define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT) | |
1074 | ||
1075 | #define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT) | |
1076 | ||
1077 | #define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT) | |
1078 | #define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1)) | |
1079 | ||
835c134e | 1080 | #define SECTION_BLOCKFLAGS_BITS \ |
d9c23400 | 1081 | ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS) |
835c134e | 1082 | |
d41dee36 AW |
1083 | #if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS |
1084 | #error Allocator MAX_ORDER exceeds SECTION_SIZE | |
1085 | #endif | |
1086 | ||
e3c40f37 DK |
1087 | #define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT) |
1088 | #define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT) | |
1089 | ||
a539f353 DK |
1090 | #define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK) |
1091 | #define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK) | |
1092 | ||
d41dee36 | 1093 | struct page; |
52d4b9ac | 1094 | struct page_cgroup; |
d41dee36 | 1095 | struct mem_section { |
29751f69 AW |
1096 | /* |
1097 | * This is, logically, a pointer to an array of struct | |
1098 | * pages. However, it is stored with some other magic. | |
1099 | * (see sparse.c::sparse_init_one_section()) | |
1100 | * | |
30c253e6 AW |
1101 | * Additionally during early boot we encode node id of |
1102 | * the location of the section here to guide allocation. | |
1103 | * (see sparse.c::memory_present()) | |
1104 | * | |
29751f69 AW |
1105 | * Making it a UL at least makes someone do a cast |
1106 | * before using it wrong. | |
1107 | */ | |
1108 | unsigned long section_mem_map; | |
5c0e3066 MG |
1109 | |
1110 | /* See declaration of similar field in struct zone */ | |
1111 | unsigned long *pageblock_flags; | |
c255a458 | 1112 | #ifdef CONFIG_MEMCG |
52d4b9ac KH |
1113 | /* |
1114 | * If !SPARSEMEM, pgdat doesn't have page_cgroup pointer. We use | |
1115 | * section. (see memcontrol.h/page_cgroup.h about this.) | |
1116 | */ | |
1117 | struct page_cgroup *page_cgroup; | |
1118 | unsigned long pad; | |
1119 | #endif | |
55878e88 CS |
1120 | /* |
1121 | * WARNING: mem_section must be a power-of-2 in size for the | |
1122 | * calculation and use of SECTION_ROOT_MASK to make sense. | |
1123 | */ | |
d41dee36 AW |
1124 | }; |
1125 | ||
3e347261 BP |
1126 | #ifdef CONFIG_SPARSEMEM_EXTREME |
1127 | #define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section)) | |
1128 | #else | |
1129 | #define SECTIONS_PER_ROOT 1 | |
1130 | #endif | |
802f192e | 1131 | |
3e347261 | 1132 | #define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT) |
0faa5638 | 1133 | #define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT) |
3e347261 | 1134 | #define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1) |
802f192e | 1135 | |
3e347261 BP |
1136 | #ifdef CONFIG_SPARSEMEM_EXTREME |
1137 | extern struct mem_section *mem_section[NR_SECTION_ROOTS]; | |
802f192e | 1138 | #else |
3e347261 BP |
1139 | extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]; |
1140 | #endif | |
d41dee36 | 1141 | |
29751f69 AW |
1142 | static inline struct mem_section *__nr_to_section(unsigned long nr) |
1143 | { | |
3e347261 BP |
1144 | if (!mem_section[SECTION_NR_TO_ROOT(nr)]) |
1145 | return NULL; | |
1146 | return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK]; | |
29751f69 | 1147 | } |
4ca644d9 | 1148 | extern int __section_nr(struct mem_section* ms); |
04753278 | 1149 | extern unsigned long usemap_size(void); |
29751f69 AW |
1150 | |
1151 | /* | |
1152 | * We use the lower bits of the mem_map pointer to store | |
1153 | * a little bit of information. There should be at least | |
1154 | * 3 bits here due to 32-bit alignment. | |
1155 | */ | |
1156 | #define SECTION_MARKED_PRESENT (1UL<<0) | |
1157 | #define SECTION_HAS_MEM_MAP (1UL<<1) | |
1158 | #define SECTION_MAP_LAST_BIT (1UL<<2) | |
1159 | #define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1)) | |
30c253e6 | 1160 | #define SECTION_NID_SHIFT 2 |
29751f69 AW |
1161 | |
1162 | static inline struct page *__section_mem_map_addr(struct mem_section *section) | |
1163 | { | |
1164 | unsigned long map = section->section_mem_map; | |
1165 | map &= SECTION_MAP_MASK; | |
1166 | return (struct page *)map; | |
1167 | } | |
1168 | ||
540557b9 | 1169 | static inline int present_section(struct mem_section *section) |
29751f69 | 1170 | { |
802f192e | 1171 | return (section && (section->section_mem_map & SECTION_MARKED_PRESENT)); |
29751f69 AW |
1172 | } |
1173 | ||
540557b9 AW |
1174 | static inline int present_section_nr(unsigned long nr) |
1175 | { | |
1176 | return present_section(__nr_to_section(nr)); | |
1177 | } | |
1178 | ||
1179 | static inline int valid_section(struct mem_section *section) | |
29751f69 | 1180 | { |
802f192e | 1181 | return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP)); |
29751f69 AW |
1182 | } |
1183 | ||
1184 | static inline int valid_section_nr(unsigned long nr) | |
1185 | { | |
1186 | return valid_section(__nr_to_section(nr)); | |
1187 | } | |
1188 | ||
d41dee36 AW |
1189 | static inline struct mem_section *__pfn_to_section(unsigned long pfn) |
1190 | { | |
29751f69 | 1191 | return __nr_to_section(pfn_to_section_nr(pfn)); |
d41dee36 AW |
1192 | } |
1193 | ||
7b7bf499 | 1194 | #ifndef CONFIG_HAVE_ARCH_PFN_VALID |
d41dee36 AW |
1195 | static inline int pfn_valid(unsigned long pfn) |
1196 | { | |
1197 | if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) | |
1198 | return 0; | |
29751f69 | 1199 | return valid_section(__nr_to_section(pfn_to_section_nr(pfn))); |
d41dee36 | 1200 | } |
7b7bf499 | 1201 | #endif |
d41dee36 | 1202 | |
540557b9 AW |
1203 | static inline int pfn_present(unsigned long pfn) |
1204 | { | |
1205 | if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) | |
1206 | return 0; | |
1207 | return present_section(__nr_to_section(pfn_to_section_nr(pfn))); | |
1208 | } | |
1209 | ||
d41dee36 AW |
1210 | /* |
1211 | * These are _only_ used during initialisation, therefore they | |
1212 | * can use __initdata ... They could have names to indicate | |
1213 | * this restriction. | |
1214 | */ | |
1215 | #ifdef CONFIG_NUMA | |
161599ff AW |
1216 | #define pfn_to_nid(pfn) \ |
1217 | ({ \ | |
1218 | unsigned long __pfn_to_nid_pfn = (pfn); \ | |
1219 | page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \ | |
1220 | }) | |
2bdaf115 AW |
1221 | #else |
1222 | #define pfn_to_nid(pfn) (0) | |
d41dee36 AW |
1223 | #endif |
1224 | ||
d41dee36 AW |
1225 | #define early_pfn_valid(pfn) pfn_valid(pfn) |
1226 | void sparse_init(void); | |
1227 | #else | |
1228 | #define sparse_init() do {} while (0) | |
28ae55c9 | 1229 | #define sparse_index_init(_sec, _nid) do {} while (0) |
d41dee36 AW |
1230 | #endif /* CONFIG_SPARSEMEM */ |
1231 | ||
75167957 | 1232 | #ifdef CONFIG_NODES_SPAN_OTHER_NODES |
cc2559bc | 1233 | bool early_pfn_in_nid(unsigned long pfn, int nid); |
75167957 AW |
1234 | #else |
1235 | #define early_pfn_in_nid(pfn, nid) (1) | |
1236 | #endif | |
1237 | ||
d41dee36 AW |
1238 | #ifndef early_pfn_valid |
1239 | #define early_pfn_valid(pfn) (1) | |
1240 | #endif | |
1241 | ||
1242 | void memory_present(int nid, unsigned long start, unsigned long end); | |
1243 | unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); | |
1244 | ||
14e07298 AW |
1245 | /* |
1246 | * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we | |
1247 | * need to check pfn validility within that MAX_ORDER_NR_PAGES block. | |
1248 | * pfn_valid_within() should be used in this case; we optimise this away | |
1249 | * when we have no holes within a MAX_ORDER_NR_PAGES block. | |
1250 | */ | |
1251 | #ifdef CONFIG_HOLES_IN_ZONE | |
1252 | #define pfn_valid_within(pfn) pfn_valid(pfn) | |
1253 | #else | |
1254 | #define pfn_valid_within(pfn) (1) | |
1255 | #endif | |
1256 | ||
eb33575c MG |
1257 | #ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL |
1258 | /* | |
1259 | * pfn_valid() is meant to be able to tell if a given PFN has valid memmap | |
1260 | * associated with it or not. In FLATMEM, it is expected that holes always | |
1261 | * have valid memmap as long as there is valid PFNs either side of the hole. | |
1262 | * In SPARSEMEM, it is assumed that a valid section has a memmap for the | |
1263 | * entire section. | |
1264 | * | |
1265 | * However, an ARM, and maybe other embedded architectures in the future | |
1266 | * free memmap backing holes to save memory on the assumption the memmap is | |
1267 | * never used. The page_zone linkages are then broken even though pfn_valid() | |
1268 | * returns true. A walker of the full memmap must then do this additional | |
1269 | * check to ensure the memmap they are looking at is sane by making sure | |
1270 | * the zone and PFN linkages are still valid. This is expensive, but walkers | |
1271 | * of the full memmap are extremely rare. | |
1272 | */ | |
1273 | int memmap_valid_within(unsigned long pfn, | |
1274 | struct page *page, struct zone *zone); | |
1275 | #else | |
1276 | static inline int memmap_valid_within(unsigned long pfn, | |
1277 | struct page *page, struct zone *zone) | |
1278 | { | |
1279 | return 1; | |
1280 | } | |
1281 | #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ | |
1282 | ||
97965478 | 1283 | #endif /* !__GENERATING_BOUNDS.H */ |
1da177e4 | 1284 | #endif /* !__ASSEMBLY__ */ |
1da177e4 | 1285 | #endif /* _LINUX_MMZONE_H */ |