]>
Commit | Line | Data |
---|---|---|
1 | #ifndef _LINUX_MMZONE_H | |
2 | #define _LINUX_MMZONE_H | |
3 | ||
4 | #ifndef __ASSEMBLY__ | |
5 | #ifndef __GENERATING_BOUNDS_H | |
6 | ||
7 | #include <linux/spinlock.h> | |
8 | #include <linux/list.h> | |
9 | #include <linux/wait.h> | |
10 | #include <linux/bitops.h> | |
11 | #include <linux/cache.h> | |
12 | #include <linux/threads.h> | |
13 | #include <linux/numa.h> | |
14 | #include <linux/init.h> | |
15 | #include <linux/seqlock.h> | |
16 | #include <linux/nodemask.h> | |
17 | #include <linux/pageblock-flags.h> | |
18 | #include <generated/bounds.h> | |
19 | #include <linux/atomic.h> | |
20 | #include <asm/page.h> | |
21 | ||
22 | /* Free memory management - zoned buddy allocator. */ | |
23 | #ifndef CONFIG_FORCE_MAX_ZONEORDER | |
24 | #define MAX_ORDER 11 | |
25 | #else | |
26 | #define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER | |
27 | #endif | |
28 | #define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1)) | |
29 | ||
30 | /* | |
31 | * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed | |
32 | * costly to service. That is between allocation orders which should | |
33 | * coelesce naturally under reasonable reclaim pressure and those which | |
34 | * will not. | |
35 | */ | |
36 | #define PAGE_ALLOC_COSTLY_ORDER 3 | |
37 | ||
38 | #define MIGRATE_UNMOVABLE 0 | |
39 | #define MIGRATE_RECLAIMABLE 1 | |
40 | #define MIGRATE_MOVABLE 2 | |
41 | #define MIGRATE_PCPTYPES 3 /* the number of types on the pcp lists */ | |
42 | #define MIGRATE_RESERVE 3 | |
43 | #define MIGRATE_ISOLATE 4 /* can't allocate from here */ | |
44 | #define MIGRATE_TYPES 5 | |
45 | ||
46 | #define for_each_migratetype_order(order, type) \ | |
47 | for (order = 0; order < MAX_ORDER; order++) \ | |
48 | for (type = 0; type < MIGRATE_TYPES; type++) | |
49 | ||
50 | extern int page_group_by_mobility_disabled; | |
51 | ||
52 | static inline int get_pageblock_migratetype(struct page *page) | |
53 | { | |
54 | return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end); | |
55 | } | |
56 | ||
57 | struct free_area { | |
58 | struct list_head free_list[MIGRATE_TYPES]; | |
59 | unsigned long nr_free; | |
60 | }; | |
61 | ||
62 | struct pglist_data; | |
63 | ||
64 | /* | |
65 | * zone->lock and zone->lru_lock are two of the hottest locks in the kernel. | |
66 | * So add a wild amount of padding here to ensure that they fall into separate | |
67 | * cachelines. There are very few zone structures in the machine, so space | |
68 | * consumption is not a concern here. | |
69 | */ | |
70 | #if defined(CONFIG_SMP) | |
71 | struct zone_padding { | |
72 | char x[0]; | |
73 | } ____cacheline_internodealigned_in_smp; | |
74 | #define ZONE_PADDING(name) struct zone_padding name; | |
75 | #else | |
76 | #define ZONE_PADDING(name) | |
77 | #endif | |
78 | ||
79 | enum zone_stat_item { | |
80 | /* First 128 byte cacheline (assuming 64 bit words) */ | |
81 | NR_FREE_PAGES, | |
82 | NR_LRU_BASE, | |
83 | NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */ | |
84 | NR_ACTIVE_ANON, /* " " " " " */ | |
85 | NR_INACTIVE_FILE, /* " " " " " */ | |
86 | NR_ACTIVE_FILE, /* " " " " " */ | |
87 | NR_UNEVICTABLE, /* " " " " " */ | |
88 | NR_MLOCK, /* mlock()ed pages found and moved off LRU */ | |
89 | NR_ANON_PAGES, /* Mapped anonymous pages */ | |
90 | NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. | |
91 | only modified from process context */ | |
92 | NR_FILE_PAGES, | |
93 | NR_FILE_DIRTY, | |
94 | NR_WRITEBACK, | |
95 | NR_SLAB_RECLAIMABLE, | |
96 | NR_SLAB_UNRECLAIMABLE, | |
97 | NR_PAGETABLE, /* used for pagetables */ | |
98 | NR_KERNEL_STACK, | |
99 | /* Second 128 byte cacheline */ | |
100 | NR_UNSTABLE_NFS, /* NFS unstable pages */ | |
101 | NR_BOUNCE, | |
102 | NR_VMSCAN_WRITE, | |
103 | NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */ | |
104 | NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ | |
105 | NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ | |
106 | NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */ | |
107 | NR_DIRTIED, /* page dirtyings since bootup */ | |
108 | NR_WRITTEN, /* page writings since bootup */ | |
109 | #ifdef CONFIG_NUMA | |
110 | NUMA_HIT, /* allocated in intended node */ | |
111 | NUMA_MISS, /* allocated in non intended node */ | |
112 | NUMA_FOREIGN, /* was intended here, hit elsewhere */ | |
113 | NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */ | |
114 | NUMA_LOCAL, /* allocation from local node */ | |
115 | NUMA_OTHER, /* allocation from other node */ | |
116 | #endif | |
117 | NR_ANON_TRANSPARENT_HUGEPAGES, | |
118 | NR_VM_ZONE_STAT_ITEMS }; | |
119 | ||
120 | /* | |
121 | * We do arithmetic on the LRU lists in various places in the code, | |
122 | * so it is important to keep the active lists LRU_ACTIVE higher in | |
123 | * the array than the corresponding inactive lists, and to keep | |
124 | * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists. | |
125 | * | |
126 | * This has to be kept in sync with the statistics in zone_stat_item | |
127 | * above and the descriptions in vmstat_text in mm/vmstat.c | |
128 | */ | |
129 | #define LRU_BASE 0 | |
130 | #define LRU_ACTIVE 1 | |
131 | #define LRU_FILE 2 | |
132 | ||
133 | enum lru_list { | |
134 | LRU_INACTIVE_ANON = LRU_BASE, | |
135 | LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE, | |
136 | LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE, | |
137 | LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE, | |
138 | LRU_UNEVICTABLE, | |
139 | NR_LRU_LISTS | |
140 | }; | |
141 | ||
142 | #define for_each_lru(l) for (l = 0; l < NR_LRU_LISTS; l++) | |
143 | ||
144 | #define for_each_evictable_lru(l) for (l = 0; l <= LRU_ACTIVE_FILE; l++) | |
145 | ||
146 | static inline int is_file_lru(enum lru_list l) | |
147 | { | |
148 | return (l == LRU_INACTIVE_FILE || l == LRU_ACTIVE_FILE); | |
149 | } | |
150 | ||
151 | static inline int is_active_lru(enum lru_list l) | |
152 | { | |
153 | return (l == LRU_ACTIVE_ANON || l == LRU_ACTIVE_FILE); | |
154 | } | |
155 | ||
156 | static inline int is_unevictable_lru(enum lru_list l) | |
157 | { | |
158 | return (l == LRU_UNEVICTABLE); | |
159 | } | |
160 | ||
161 | /* Mask used at gathering information at once (see memcontrol.c) */ | |
162 | #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE)) | |
163 | #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON)) | |
164 | #define LRU_ALL_EVICTABLE (LRU_ALL_FILE | LRU_ALL_ANON) | |
165 | #define LRU_ALL ((1 << NR_LRU_LISTS) - 1) | |
166 | ||
167 | enum zone_watermarks { | |
168 | WMARK_MIN, | |
169 | WMARK_LOW, | |
170 | WMARK_HIGH, | |
171 | NR_WMARK | |
172 | }; | |
173 | ||
174 | #define min_wmark_pages(z) (z->watermark[WMARK_MIN]) | |
175 | #define low_wmark_pages(z) (z->watermark[WMARK_LOW]) | |
176 | #define high_wmark_pages(z) (z->watermark[WMARK_HIGH]) | |
177 | ||
178 | struct per_cpu_pages { | |
179 | int count; /* number of pages in the list */ | |
180 | int high; /* high watermark, emptying needed */ | |
181 | int batch; /* chunk size for buddy add/remove */ | |
182 | ||
183 | /* Lists of pages, one per migrate type stored on the pcp-lists */ | |
184 | struct list_head lists[MIGRATE_PCPTYPES]; | |
185 | }; | |
186 | ||
187 | struct per_cpu_pageset { | |
188 | struct per_cpu_pages pcp; | |
189 | #ifdef CONFIG_NUMA | |
190 | s8 expire; | |
191 | #endif | |
192 | #ifdef CONFIG_SMP | |
193 | s8 stat_threshold; | |
194 | s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; | |
195 | #endif | |
196 | }; | |
197 | ||
198 | #endif /* !__GENERATING_BOUNDS.H */ | |
199 | ||
200 | enum zone_type { | |
201 | #ifdef CONFIG_ZONE_DMA | |
202 | /* | |
203 | * ZONE_DMA is used when there are devices that are not able | |
204 | * to do DMA to all of addressable memory (ZONE_NORMAL). Then we | |
205 | * carve out the portion of memory that is needed for these devices. | |
206 | * The range is arch specific. | |
207 | * | |
208 | * Some examples | |
209 | * | |
210 | * Architecture Limit | |
211 | * --------------------------- | |
212 | * parisc, ia64, sparc <4G | |
213 | * s390 <2G | |
214 | * arm Various | |
215 | * alpha Unlimited or 0-16MB. | |
216 | * | |
217 | * i386, x86_64 and multiple other arches | |
218 | * <16M. | |
219 | */ | |
220 | ZONE_DMA, | |
221 | #endif | |
222 | #ifdef CONFIG_ZONE_DMA32 | |
223 | /* | |
224 | * x86_64 needs two ZONE_DMAs because it supports devices that are | |
225 | * only able to do DMA to the lower 16M but also 32 bit devices that | |
226 | * can only do DMA areas below 4G. | |
227 | */ | |
228 | ZONE_DMA32, | |
229 | #endif | |
230 | /* | |
231 | * Normal addressable memory is in ZONE_NORMAL. DMA operations can be | |
232 | * performed on pages in ZONE_NORMAL if the DMA devices support | |
233 | * transfers to all addressable memory. | |
234 | */ | |
235 | ZONE_NORMAL, | |
236 | #ifdef CONFIG_HIGHMEM | |
237 | /* | |
238 | * A memory area that is only addressable by the kernel through | |
239 | * mapping portions into its own address space. This is for example | |
240 | * used by i386 to allow the kernel to address the memory beyond | |
241 | * 900MB. The kernel will set up special mappings (page | |
242 | * table entries on i386) for each page that the kernel needs to | |
243 | * access. | |
244 | */ | |
245 | ZONE_HIGHMEM, | |
246 | #endif | |
247 | ZONE_MOVABLE, | |
248 | __MAX_NR_ZONES | |
249 | }; | |
250 | ||
251 | #ifndef __GENERATING_BOUNDS_H | |
252 | ||
253 | /* | |
254 | * When a memory allocation must conform to specific limitations (such | |
255 | * as being suitable for DMA) the caller will pass in hints to the | |
256 | * allocator in the gfp_mask, in the zone modifier bits. These bits | |
257 | * are used to select a priority ordered list of memory zones which | |
258 | * match the requested limits. See gfp_zone() in include/linux/gfp.h | |
259 | */ | |
260 | ||
261 | #if MAX_NR_ZONES < 2 | |
262 | #define ZONES_SHIFT 0 | |
263 | #elif MAX_NR_ZONES <= 2 | |
264 | #define ZONES_SHIFT 1 | |
265 | #elif MAX_NR_ZONES <= 4 | |
266 | #define ZONES_SHIFT 2 | |
267 | #else | |
268 | #error ZONES_SHIFT -- too many zones configured adjust calculation | |
269 | #endif | |
270 | ||
271 | struct zone_reclaim_stat { | |
272 | /* | |
273 | * The pageout code in vmscan.c keeps track of how many of the | |
274 | * mem/swap backed and file backed pages are refeferenced. | |
275 | * The higher the rotated/scanned ratio, the more valuable | |
276 | * that cache is. | |
277 | * | |
278 | * The anon LRU stats live in [0], file LRU stats in [1] | |
279 | */ | |
280 | unsigned long recent_rotated[2]; | |
281 | unsigned long recent_scanned[2]; | |
282 | }; | |
283 | ||
284 | struct zone { | |
285 | /* Fields commonly accessed by the page allocator */ | |
286 | ||
287 | /* zone watermarks, access with *_wmark_pages(zone) macros */ | |
288 | unsigned long watermark[NR_WMARK]; | |
289 | ||
290 | /* | |
291 | * When free pages are below this point, additional steps are taken | |
292 | * when reading the number of free pages to avoid per-cpu counter | |
293 | * drift allowing watermarks to be breached | |
294 | */ | |
295 | unsigned long percpu_drift_mark; | |
296 | ||
297 | /* | |
298 | * We don't know if the memory that we're going to allocate will be freeable | |
299 | * or/and it will be released eventually, so to avoid totally wasting several | |
300 | * GB of ram we must reserve some of the lower zone memory (otherwise we risk | |
301 | * to run OOM on the lower zones despite there's tons of freeable ram | |
302 | * on the higher zones). This array is recalculated at runtime if the | |
303 | * sysctl_lowmem_reserve_ratio sysctl changes. | |
304 | */ | |
305 | unsigned long lowmem_reserve[MAX_NR_ZONES]; | |
306 | ||
307 | #ifdef CONFIG_NUMA | |
308 | int node; | |
309 | /* | |
310 | * zone reclaim becomes active if more unmapped pages exist. | |
311 | */ | |
312 | unsigned long min_unmapped_pages; | |
313 | unsigned long min_slab_pages; | |
314 | #endif | |
315 | struct per_cpu_pageset __percpu *pageset; | |
316 | /* | |
317 | * free areas of different sizes | |
318 | */ | |
319 | spinlock_t lock; | |
320 | int all_unreclaimable; /* All pages pinned */ | |
321 | #ifdef CONFIG_MEMORY_HOTPLUG | |
322 | /* see spanned/present_pages for more description */ | |
323 | seqlock_t span_seqlock; | |
324 | #endif | |
325 | struct free_area free_area[MAX_ORDER]; | |
326 | ||
327 | #ifndef CONFIG_SPARSEMEM | |
328 | /* | |
329 | * Flags for a pageblock_nr_pages block. See pageblock-flags.h. | |
330 | * In SPARSEMEM, this map is stored in struct mem_section | |
331 | */ | |
332 | unsigned long *pageblock_flags; | |
333 | #endif /* CONFIG_SPARSEMEM */ | |
334 | ||
335 | #ifdef CONFIG_COMPACTION | |
336 | /* | |
337 | * On compaction failure, 1<<compact_defer_shift compactions | |
338 | * are skipped before trying again. The number attempted since | |
339 | * last failure is tracked with compact_considered. | |
340 | */ | |
341 | unsigned int compact_considered; | |
342 | unsigned int compact_defer_shift; | |
343 | #endif | |
344 | ||
345 | ZONE_PADDING(_pad1_) | |
346 | ||
347 | /* Fields commonly accessed by the page reclaim scanner */ | |
348 | spinlock_t lru_lock; | |
349 | struct zone_lru { | |
350 | struct list_head list; | |
351 | } lru[NR_LRU_LISTS]; | |
352 | ||
353 | struct zone_reclaim_stat reclaim_stat; | |
354 | ||
355 | unsigned long pages_scanned; /* since last reclaim */ | |
356 | unsigned long flags; /* zone flags, see below */ | |
357 | ||
358 | /* Zone statistics */ | |
359 | atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; | |
360 | ||
361 | /* | |
362 | * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on | |
363 | * this zone's LRU. Maintained by the pageout code. | |
364 | */ | |
365 | unsigned int inactive_ratio; | |
366 | ||
367 | ||
368 | ZONE_PADDING(_pad2_) | |
369 | /* Rarely used or read-mostly fields */ | |
370 | ||
371 | /* | |
372 | * wait_table -- the array holding the hash table | |
373 | * wait_table_hash_nr_entries -- the size of the hash table array | |
374 | * wait_table_bits -- wait_table_size == (1 << wait_table_bits) | |
375 | * | |
376 | * The purpose of all these is to keep track of the people | |
377 | * waiting for a page to become available and make them | |
378 | * runnable again when possible. The trouble is that this | |
379 | * consumes a lot of space, especially when so few things | |
380 | * wait on pages at a given time. So instead of using | |
381 | * per-page waitqueues, we use a waitqueue hash table. | |
382 | * | |
383 | * The bucket discipline is to sleep on the same queue when | |
384 | * colliding and wake all in that wait queue when removing. | |
385 | * When something wakes, it must check to be sure its page is | |
386 | * truly available, a la thundering herd. The cost of a | |
387 | * collision is great, but given the expected load of the | |
388 | * table, they should be so rare as to be outweighed by the | |
389 | * benefits from the saved space. | |
390 | * | |
391 | * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the | |
392 | * primary users of these fields, and in mm/page_alloc.c | |
393 | * free_area_init_core() performs the initialization of them. | |
394 | */ | |
395 | wait_queue_head_t * wait_table; | |
396 | unsigned long wait_table_hash_nr_entries; | |
397 | unsigned long wait_table_bits; | |
398 | ||
399 | /* | |
400 | * Discontig memory support fields. | |
401 | */ | |
402 | struct pglist_data *zone_pgdat; | |
403 | /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ | |
404 | unsigned long zone_start_pfn; | |
405 | ||
406 | /* | |
407 | * zone_start_pfn, spanned_pages and present_pages are all | |
408 | * protected by span_seqlock. It is a seqlock because it has | |
409 | * to be read outside of zone->lock, and it is done in the main | |
410 | * allocator path. But, it is written quite infrequently. | |
411 | * | |
412 | * The lock is declared along with zone->lock because it is | |
413 | * frequently read in proximity to zone->lock. It's good to | |
414 | * give them a chance of being in the same cacheline. | |
415 | */ | |
416 | unsigned long spanned_pages; /* total size, including holes */ | |
417 | unsigned long present_pages; /* amount of memory (excluding holes) */ | |
418 | ||
419 | /* | |
420 | * rarely used fields: | |
421 | */ | |
422 | const char *name; | |
423 | } ____cacheline_internodealigned_in_smp; | |
424 | ||
425 | typedef enum { | |
426 | ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */ | |
427 | ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */ | |
428 | ZONE_CONGESTED, /* zone has many dirty pages backed by | |
429 | * a congested BDI | |
430 | */ | |
431 | } zone_flags_t; | |
432 | ||
433 | static inline void zone_set_flag(struct zone *zone, zone_flags_t flag) | |
434 | { | |
435 | set_bit(flag, &zone->flags); | |
436 | } | |
437 | ||
438 | static inline int zone_test_and_set_flag(struct zone *zone, zone_flags_t flag) | |
439 | { | |
440 | return test_and_set_bit(flag, &zone->flags); | |
441 | } | |
442 | ||
443 | static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag) | |
444 | { | |
445 | clear_bit(flag, &zone->flags); | |
446 | } | |
447 | ||
448 | static inline int zone_is_reclaim_congested(const struct zone *zone) | |
449 | { | |
450 | return test_bit(ZONE_CONGESTED, &zone->flags); | |
451 | } | |
452 | ||
453 | static inline int zone_is_reclaim_locked(const struct zone *zone) | |
454 | { | |
455 | return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags); | |
456 | } | |
457 | ||
458 | static inline int zone_is_oom_locked(const struct zone *zone) | |
459 | { | |
460 | return test_bit(ZONE_OOM_LOCKED, &zone->flags); | |
461 | } | |
462 | ||
463 | /* | |
464 | * The "priority" of VM scanning is how much of the queues we will scan in one | |
465 | * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the | |
466 | * queues ("queue_length >> 12") during an aging round. | |
467 | */ | |
468 | #define DEF_PRIORITY 12 | |
469 | ||
470 | /* Maximum number of zones on a zonelist */ | |
471 | #define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES) | |
472 | ||
473 | #ifdef CONFIG_NUMA | |
474 | ||
475 | /* | |
476 | * The NUMA zonelists are doubled because we need zonelists that restrict the | |
477 | * allocations to a single node for GFP_THISNODE. | |
478 | * | |
479 | * [0] : Zonelist with fallback | |
480 | * [1] : No fallback (GFP_THISNODE) | |
481 | */ | |
482 | #define MAX_ZONELISTS 2 | |
483 | ||
484 | ||
485 | /* | |
486 | * We cache key information from each zonelist for smaller cache | |
487 | * footprint when scanning for free pages in get_page_from_freelist(). | |
488 | * | |
489 | * 1) The BITMAP fullzones tracks which zones in a zonelist have come | |
490 | * up short of free memory since the last time (last_fullzone_zap) | |
491 | * we zero'd fullzones. | |
492 | * 2) The array z_to_n[] maps each zone in the zonelist to its node | |
493 | * id, so that we can efficiently evaluate whether that node is | |
494 | * set in the current tasks mems_allowed. | |
495 | * | |
496 | * Both fullzones and z_to_n[] are one-to-one with the zonelist, | |
497 | * indexed by a zones offset in the zonelist zones[] array. | |
498 | * | |
499 | * The get_page_from_freelist() routine does two scans. During the | |
500 | * first scan, we skip zones whose corresponding bit in 'fullzones' | |
501 | * is set or whose corresponding node in current->mems_allowed (which | |
502 | * comes from cpusets) is not set. During the second scan, we bypass | |
503 | * this zonelist_cache, to ensure we look methodically at each zone. | |
504 | * | |
505 | * Once per second, we zero out (zap) fullzones, forcing us to | |
506 | * reconsider nodes that might have regained more free memory. | |
507 | * The field last_full_zap is the time we last zapped fullzones. | |
508 | * | |
509 | * This mechanism reduces the amount of time we waste repeatedly | |
510 | * reexaming zones for free memory when they just came up low on | |
511 | * memory momentarilly ago. | |
512 | * | |
513 | * The zonelist_cache struct members logically belong in struct | |
514 | * zonelist. However, the mempolicy zonelists constructed for | |
515 | * MPOL_BIND are intentionally variable length (and usually much | |
516 | * shorter). A general purpose mechanism for handling structs with | |
517 | * multiple variable length members is more mechanism than we want | |
518 | * here. We resort to some special case hackery instead. | |
519 | * | |
520 | * The MPOL_BIND zonelists don't need this zonelist_cache (in good | |
521 | * part because they are shorter), so we put the fixed length stuff | |
522 | * at the front of the zonelist struct, ending in a variable length | |
523 | * zones[], as is needed by MPOL_BIND. | |
524 | * | |
525 | * Then we put the optional zonelist cache on the end of the zonelist | |
526 | * struct. This optional stuff is found by a 'zlcache_ptr' pointer in | |
527 | * the fixed length portion at the front of the struct. This pointer | |
528 | * both enables us to find the zonelist cache, and in the case of | |
529 | * MPOL_BIND zonelists, (which will just set the zlcache_ptr to NULL) | |
530 | * to know that the zonelist cache is not there. | |
531 | * | |
532 | * The end result is that struct zonelists come in two flavors: | |
533 | * 1) The full, fixed length version, shown below, and | |
534 | * 2) The custom zonelists for MPOL_BIND. | |
535 | * The custom MPOL_BIND zonelists have a NULL zlcache_ptr and no zlcache. | |
536 | * | |
537 | * Even though there may be multiple CPU cores on a node modifying | |
538 | * fullzones or last_full_zap in the same zonelist_cache at the same | |
539 | * time, we don't lock it. This is just hint data - if it is wrong now | |
540 | * and then, the allocator will still function, perhaps a bit slower. | |
541 | */ | |
542 | ||
543 | ||
544 | struct zonelist_cache { | |
545 | unsigned short z_to_n[MAX_ZONES_PER_ZONELIST]; /* zone->nid */ | |
546 | DECLARE_BITMAP(fullzones, MAX_ZONES_PER_ZONELIST); /* zone full? */ | |
547 | unsigned long last_full_zap; /* when last zap'd (jiffies) */ | |
548 | }; | |
549 | #else | |
550 | #define MAX_ZONELISTS 1 | |
551 | struct zonelist_cache; | |
552 | #endif | |
553 | ||
554 | /* | |
555 | * This struct contains information about a zone in a zonelist. It is stored | |
556 | * here to avoid dereferences into large structures and lookups of tables | |
557 | */ | |
558 | struct zoneref { | |
559 | struct zone *zone; /* Pointer to actual zone */ | |
560 | int zone_idx; /* zone_idx(zoneref->zone) */ | |
561 | }; | |
562 | ||
563 | /* | |
564 | * One allocation request operates on a zonelist. A zonelist | |
565 | * is a list of zones, the first one is the 'goal' of the | |
566 | * allocation, the other zones are fallback zones, in decreasing | |
567 | * priority. | |
568 | * | |
569 | * If zlcache_ptr is not NULL, then it is just the address of zlcache, | |
570 | * as explained above. If zlcache_ptr is NULL, there is no zlcache. | |
571 | * * | |
572 | * To speed the reading of the zonelist, the zonerefs contain the zone index | |
573 | * of the entry being read. Helper functions to access information given | |
574 | * a struct zoneref are | |
575 | * | |
576 | * zonelist_zone() - Return the struct zone * for an entry in _zonerefs | |
577 | * zonelist_zone_idx() - Return the index of the zone for an entry | |
578 | * zonelist_node_idx() - Return the index of the node for an entry | |
579 | */ | |
580 | struct zonelist { | |
581 | struct zonelist_cache *zlcache_ptr; // NULL or &zlcache | |
582 | struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1]; | |
583 | #ifdef CONFIG_NUMA | |
584 | struct zonelist_cache zlcache; // optional ... | |
585 | #endif | |
586 | }; | |
587 | ||
588 | #ifdef CONFIG_ARCH_POPULATES_NODE_MAP | |
589 | struct node_active_region { | |
590 | unsigned long start_pfn; | |
591 | unsigned long end_pfn; | |
592 | int nid; | |
593 | }; | |
594 | #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ | |
595 | ||
596 | #ifndef CONFIG_DISCONTIGMEM | |
597 | /* The array of struct pages - for discontigmem use pgdat->lmem_map */ | |
598 | extern struct page *mem_map; | |
599 | #endif | |
600 | ||
601 | /* | |
602 | * The pg_data_t structure is used in machines with CONFIG_DISCONTIGMEM | |
603 | * (mostly NUMA machines?) to denote a higher-level memory zone than the | |
604 | * zone denotes. | |
605 | * | |
606 | * On NUMA machines, each NUMA node would have a pg_data_t to describe | |
607 | * it's memory layout. | |
608 | * | |
609 | * Memory statistics and page replacement data structures are maintained on a | |
610 | * per-zone basis. | |
611 | */ | |
612 | struct bootmem_data; | |
613 | typedef struct pglist_data { | |
614 | struct zone node_zones[MAX_NR_ZONES]; | |
615 | struct zonelist node_zonelists[MAX_ZONELISTS]; | |
616 | int nr_zones; | |
617 | #ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */ | |
618 | struct page *node_mem_map; | |
619 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | |
620 | struct page_cgroup *node_page_cgroup; | |
621 | #endif | |
622 | #endif | |
623 | #ifndef CONFIG_NO_BOOTMEM | |
624 | struct bootmem_data *bdata; | |
625 | #endif | |
626 | #ifdef CONFIG_MEMORY_HOTPLUG | |
627 | /* | |
628 | * Must be held any time you expect node_start_pfn, node_present_pages | |
629 | * or node_spanned_pages stay constant. Holding this will also | |
630 | * guarantee that any pfn_valid() stays that way. | |
631 | * | |
632 | * Nests above zone->lock and zone->size_seqlock. | |
633 | */ | |
634 | spinlock_t node_size_lock; | |
635 | #endif | |
636 | unsigned long node_start_pfn; | |
637 | unsigned long node_present_pages; /* total number of physical pages */ | |
638 | unsigned long node_spanned_pages; /* total size of physical page | |
639 | range, including holes */ | |
640 | int node_id; | |
641 | wait_queue_head_t kswapd_wait; | |
642 | struct task_struct *kswapd; | |
643 | int kswapd_max_order; | |
644 | enum zone_type classzone_idx; | |
645 | } pg_data_t; | |
646 | ||
647 | #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) | |
648 | #define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages) | |
649 | #ifdef CONFIG_FLAT_NODE_MEM_MAP | |
650 | #define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr)) | |
651 | #else | |
652 | #define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr)) | |
653 | #endif | |
654 | #define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr)) | |
655 | ||
656 | #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) | |
657 | ||
658 | #define node_end_pfn(nid) ({\ | |
659 | pg_data_t *__pgdat = NODE_DATA(nid);\ | |
660 | __pgdat->node_start_pfn + __pgdat->node_spanned_pages;\ | |
661 | }) | |
662 | ||
663 | #include <linux/memory_hotplug.h> | |
664 | ||
665 | extern struct mutex zonelists_mutex; | |
666 | void build_all_zonelists(void *data); | |
667 | void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx); | |
668 | bool zone_watermark_ok(struct zone *z, int order, unsigned long mark, | |
669 | int classzone_idx, int alloc_flags); | |
670 | bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark, | |
671 | int classzone_idx, int alloc_flags); | |
672 | enum memmap_context { | |
673 | MEMMAP_EARLY, | |
674 | MEMMAP_HOTPLUG, | |
675 | }; | |
676 | extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, | |
677 | unsigned long size, | |
678 | enum memmap_context context); | |
679 | ||
680 | #ifdef CONFIG_HAVE_MEMORY_PRESENT | |
681 | void memory_present(int nid, unsigned long start, unsigned long end); | |
682 | #else | |
683 | static inline void memory_present(int nid, unsigned long start, unsigned long end) {} | |
684 | #endif | |
685 | ||
686 | #ifdef CONFIG_HAVE_MEMORYLESS_NODES | |
687 | int local_memory_node(int node_id); | |
688 | #else | |
689 | static inline int local_memory_node(int node_id) { return node_id; }; | |
690 | #endif | |
691 | ||
692 | #ifdef CONFIG_NEED_NODE_MEMMAP_SIZE | |
693 | unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); | |
694 | #endif | |
695 | ||
696 | /* | |
697 | * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc. | |
698 | */ | |
699 | #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) | |
700 | ||
701 | static inline int populated_zone(struct zone *zone) | |
702 | { | |
703 | return (!!zone->present_pages); | |
704 | } | |
705 | ||
706 | extern int movable_zone; | |
707 | ||
708 | static inline int zone_movable_is_highmem(void) | |
709 | { | |
710 | #if defined(CONFIG_HIGHMEM) && defined(CONFIG_ARCH_POPULATES_NODE_MAP) | |
711 | return movable_zone == ZONE_HIGHMEM; | |
712 | #else | |
713 | return 0; | |
714 | #endif | |
715 | } | |
716 | ||
717 | static inline int is_highmem_idx(enum zone_type idx) | |
718 | { | |
719 | #ifdef CONFIG_HIGHMEM | |
720 | return (idx == ZONE_HIGHMEM || | |
721 | (idx == ZONE_MOVABLE && zone_movable_is_highmem())); | |
722 | #else | |
723 | return 0; | |
724 | #endif | |
725 | } | |
726 | ||
727 | static inline int is_normal_idx(enum zone_type idx) | |
728 | { | |
729 | return (idx == ZONE_NORMAL); | |
730 | } | |
731 | ||
732 | /** | |
733 | * is_highmem - helper function to quickly check if a struct zone is a | |
734 | * highmem zone or not. This is an attempt to keep references | |
735 | * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum. | |
736 | * @zone - pointer to struct zone variable | |
737 | */ | |
738 | static inline int is_highmem(struct zone *zone) | |
739 | { | |
740 | #ifdef CONFIG_HIGHMEM | |
741 | int zone_off = (char *)zone - (char *)zone->zone_pgdat->node_zones; | |
742 | return zone_off == ZONE_HIGHMEM * sizeof(*zone) || | |
743 | (zone_off == ZONE_MOVABLE * sizeof(*zone) && | |
744 | zone_movable_is_highmem()); | |
745 | #else | |
746 | return 0; | |
747 | #endif | |
748 | } | |
749 | ||
750 | static inline int is_normal(struct zone *zone) | |
751 | { | |
752 | return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL; | |
753 | } | |
754 | ||
755 | static inline int is_dma32(struct zone *zone) | |
756 | { | |
757 | #ifdef CONFIG_ZONE_DMA32 | |
758 | return zone == zone->zone_pgdat->node_zones + ZONE_DMA32; | |
759 | #else | |
760 | return 0; | |
761 | #endif | |
762 | } | |
763 | ||
764 | static inline int is_dma(struct zone *zone) | |
765 | { | |
766 | #ifdef CONFIG_ZONE_DMA | |
767 | return zone == zone->zone_pgdat->node_zones + ZONE_DMA; | |
768 | #else | |
769 | return 0; | |
770 | #endif | |
771 | } | |
772 | ||
773 | /* These two functions are used to setup the per zone pages min values */ | |
774 | struct ctl_table; | |
775 | int min_free_kbytes_sysctl_handler(struct ctl_table *, int, | |
776 | void __user *, size_t *, loff_t *); | |
777 | extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1]; | |
778 | int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, | |
779 | void __user *, size_t *, loff_t *); | |
780 | int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, | |
781 | void __user *, size_t *, loff_t *); | |
782 | int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, | |
783 | void __user *, size_t *, loff_t *); | |
784 | int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int, | |
785 | void __user *, size_t *, loff_t *); | |
786 | ||
787 | extern int numa_zonelist_order_handler(struct ctl_table *, int, | |
788 | void __user *, size_t *, loff_t *); | |
789 | extern char numa_zonelist_order[]; | |
790 | #define NUMA_ZONELIST_ORDER_LEN 16 /* string buffer size */ | |
791 | ||
792 | #ifndef CONFIG_NEED_MULTIPLE_NODES | |
793 | ||
794 | extern struct pglist_data contig_page_data; | |
795 | #define NODE_DATA(nid) (&contig_page_data) | |
796 | #define NODE_MEM_MAP(nid) mem_map | |
797 | ||
798 | #else /* CONFIG_NEED_MULTIPLE_NODES */ | |
799 | ||
800 | #include <asm/mmzone.h> | |
801 | ||
802 | #endif /* !CONFIG_NEED_MULTIPLE_NODES */ | |
803 | ||
804 | extern struct pglist_data *first_online_pgdat(void); | |
805 | extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat); | |
806 | extern struct zone *next_zone(struct zone *zone); | |
807 | ||
808 | /** | |
809 | * for_each_online_pgdat - helper macro to iterate over all online nodes | |
810 | * @pgdat - pointer to a pg_data_t variable | |
811 | */ | |
812 | #define for_each_online_pgdat(pgdat) \ | |
813 | for (pgdat = first_online_pgdat(); \ | |
814 | pgdat; \ | |
815 | pgdat = next_online_pgdat(pgdat)) | |
816 | /** | |
817 | * for_each_zone - helper macro to iterate over all memory zones | |
818 | * @zone - pointer to struct zone variable | |
819 | * | |
820 | * The user only needs to declare the zone variable, for_each_zone | |
821 | * fills it in. | |
822 | */ | |
823 | #define for_each_zone(zone) \ | |
824 | for (zone = (first_online_pgdat())->node_zones; \ | |
825 | zone; \ | |
826 | zone = next_zone(zone)) | |
827 | ||
828 | #define for_each_populated_zone(zone) \ | |
829 | for (zone = (first_online_pgdat())->node_zones; \ | |
830 | zone; \ | |
831 | zone = next_zone(zone)) \ | |
832 | if (!populated_zone(zone)) \ | |
833 | ; /* do nothing */ \ | |
834 | else | |
835 | ||
836 | static inline struct zone *zonelist_zone(struct zoneref *zoneref) | |
837 | { | |
838 | return zoneref->zone; | |
839 | } | |
840 | ||
841 | static inline int zonelist_zone_idx(struct zoneref *zoneref) | |
842 | { | |
843 | return zoneref->zone_idx; | |
844 | } | |
845 | ||
846 | static inline int zonelist_node_idx(struct zoneref *zoneref) | |
847 | { | |
848 | #ifdef CONFIG_NUMA | |
849 | /* zone_to_nid not available in this context */ | |
850 | return zoneref->zone->node; | |
851 | #else | |
852 | return 0; | |
853 | #endif /* CONFIG_NUMA */ | |
854 | } | |
855 | ||
856 | /** | |
857 | * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point | |
858 | * @z - The cursor used as a starting point for the search | |
859 | * @highest_zoneidx - The zone index of the highest zone to return | |
860 | * @nodes - An optional nodemask to filter the zonelist with | |
861 | * @zone - The first suitable zone found is returned via this parameter | |
862 | * | |
863 | * This function returns the next zone at or below a given zone index that is | |
864 | * within the allowed nodemask using a cursor as the starting point for the | |
865 | * search. The zoneref returned is a cursor that represents the current zone | |
866 | * being examined. It should be advanced by one before calling | |
867 | * next_zones_zonelist again. | |
868 | */ | |
869 | struct zoneref *next_zones_zonelist(struct zoneref *z, | |
870 | enum zone_type highest_zoneidx, | |
871 | nodemask_t *nodes, | |
872 | struct zone **zone); | |
873 | ||
874 | /** | |
875 | * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist | |
876 | * @zonelist - The zonelist to search for a suitable zone | |
877 | * @highest_zoneidx - The zone index of the highest zone to return | |
878 | * @nodes - An optional nodemask to filter the zonelist with | |
879 | * @zone - The first suitable zone found is returned via this parameter | |
880 | * | |
881 | * This function returns the first zone at or below a given zone index that is | |
882 | * within the allowed nodemask. The zoneref returned is a cursor that can be | |
883 | * used to iterate the zonelist with next_zones_zonelist by advancing it by | |
884 | * one before calling. | |
885 | */ | |
886 | static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, | |
887 | enum zone_type highest_zoneidx, | |
888 | nodemask_t *nodes, | |
889 | struct zone **zone) | |
890 | { | |
891 | return next_zones_zonelist(zonelist->_zonerefs, highest_zoneidx, nodes, | |
892 | zone); | |
893 | } | |
894 | ||
895 | /** | |
896 | * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask | |
897 | * @zone - The current zone in the iterator | |
898 | * @z - The current pointer within zonelist->zones being iterated | |
899 | * @zlist - The zonelist being iterated | |
900 | * @highidx - The zone index of the highest zone to return | |
901 | * @nodemask - Nodemask allowed by the allocator | |
902 | * | |
903 | * This iterator iterates though all zones at or below a given zone index and | |
904 | * within a given nodemask | |
905 | */ | |
906 | #define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \ | |
907 | for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone); \ | |
908 | zone; \ | |
909 | z = next_zones_zonelist(++z, highidx, nodemask, &zone)) \ | |
910 | ||
911 | /** | |
912 | * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index | |
913 | * @zone - The current zone in the iterator | |
914 | * @z - The current pointer within zonelist->zones being iterated | |
915 | * @zlist - The zonelist being iterated | |
916 | * @highidx - The zone index of the highest zone to return | |
917 | * | |
918 | * This iterator iterates though all zones at or below a given zone index. | |
919 | */ | |
920 | #define for_each_zone_zonelist(zone, z, zlist, highidx) \ | |
921 | for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL) | |
922 | ||
923 | #ifdef CONFIG_SPARSEMEM | |
924 | #include <asm/sparsemem.h> | |
925 | #endif | |
926 | ||
927 | #if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \ | |
928 | !defined(CONFIG_ARCH_POPULATES_NODE_MAP) | |
929 | static inline unsigned long early_pfn_to_nid(unsigned long pfn) | |
930 | { | |
931 | return 0; | |
932 | } | |
933 | #endif | |
934 | ||
935 | #ifdef CONFIG_FLATMEM | |
936 | #define pfn_to_nid(pfn) (0) | |
937 | #endif | |
938 | ||
939 | #ifdef CONFIG_SPARSEMEM | |
940 | ||
941 | /* | |
942 | * SECTION_SHIFT #bits space required to store a section # | |
943 | * | |
944 | * PA_SECTION_SHIFT physical address to/from section number | |
945 | * PFN_SECTION_SHIFT pfn to/from section number | |
946 | */ | |
947 | #define SECTIONS_SHIFT (MAX_PHYSMEM_BITS - SECTION_SIZE_BITS) | |
948 | ||
949 | #define PA_SECTION_SHIFT (SECTION_SIZE_BITS) | |
950 | #define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT) | |
951 | ||
952 | #define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT) | |
953 | ||
954 | #define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT) | |
955 | #define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1)) | |
956 | ||
957 | #define SECTION_BLOCKFLAGS_BITS \ | |
958 | ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS) | |
959 | ||
960 | #if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS | |
961 | #error Allocator MAX_ORDER exceeds SECTION_SIZE | |
962 | #endif | |
963 | ||
964 | #define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT) | |
965 | #define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT) | |
966 | ||
967 | #define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK) | |
968 | #define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK) | |
969 | ||
970 | struct page; | |
971 | struct page_cgroup; | |
972 | struct mem_section { | |
973 | /* | |
974 | * This is, logically, a pointer to an array of struct | |
975 | * pages. However, it is stored with some other magic. | |
976 | * (see sparse.c::sparse_init_one_section()) | |
977 | * | |
978 | * Additionally during early boot we encode node id of | |
979 | * the location of the section here to guide allocation. | |
980 | * (see sparse.c::memory_present()) | |
981 | * | |
982 | * Making it a UL at least makes someone do a cast | |
983 | * before using it wrong. | |
984 | */ | |
985 | unsigned long section_mem_map; | |
986 | ||
987 | /* See declaration of similar field in struct zone */ | |
988 | unsigned long *pageblock_flags; | |
989 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | |
990 | /* | |
991 | * If !SPARSEMEM, pgdat doesn't have page_cgroup pointer. We use | |
992 | * section. (see memcontrol.h/page_cgroup.h about this.) | |
993 | */ | |
994 | struct page_cgroup *page_cgroup; | |
995 | unsigned long pad; | |
996 | #endif | |
997 | }; | |
998 | ||
999 | #ifdef CONFIG_SPARSEMEM_EXTREME | |
1000 | #define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section)) | |
1001 | #else | |
1002 | #define SECTIONS_PER_ROOT 1 | |
1003 | #endif | |
1004 | ||
1005 | #define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT) | |
1006 | #define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT) | |
1007 | #define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1) | |
1008 | ||
1009 | #ifdef CONFIG_SPARSEMEM_EXTREME | |
1010 | extern struct mem_section *mem_section[NR_SECTION_ROOTS]; | |
1011 | #else | |
1012 | extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]; | |
1013 | #endif | |
1014 | ||
1015 | static inline struct mem_section *__nr_to_section(unsigned long nr) | |
1016 | { | |
1017 | if (!mem_section[SECTION_NR_TO_ROOT(nr)]) | |
1018 | return NULL; | |
1019 | return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK]; | |
1020 | } | |
1021 | extern int __section_nr(struct mem_section* ms); | |
1022 | extern unsigned long usemap_size(void); | |
1023 | ||
1024 | /* | |
1025 | * We use the lower bits of the mem_map pointer to store | |
1026 | * a little bit of information. There should be at least | |
1027 | * 3 bits here due to 32-bit alignment. | |
1028 | */ | |
1029 | #define SECTION_MARKED_PRESENT (1UL<<0) | |
1030 | #define SECTION_HAS_MEM_MAP (1UL<<1) | |
1031 | #define SECTION_MAP_LAST_BIT (1UL<<2) | |
1032 | #define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1)) | |
1033 | #define SECTION_NID_SHIFT 2 | |
1034 | ||
1035 | static inline struct page *__section_mem_map_addr(struct mem_section *section) | |
1036 | { | |
1037 | unsigned long map = section->section_mem_map; | |
1038 | map &= SECTION_MAP_MASK; | |
1039 | return (struct page *)map; | |
1040 | } | |
1041 | ||
1042 | static inline int present_section(struct mem_section *section) | |
1043 | { | |
1044 | return (section && (section->section_mem_map & SECTION_MARKED_PRESENT)); | |
1045 | } | |
1046 | ||
1047 | static inline int present_section_nr(unsigned long nr) | |
1048 | { | |
1049 | return present_section(__nr_to_section(nr)); | |
1050 | } | |
1051 | ||
1052 | static inline int valid_section(struct mem_section *section) | |
1053 | { | |
1054 | return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP)); | |
1055 | } | |
1056 | ||
1057 | static inline int valid_section_nr(unsigned long nr) | |
1058 | { | |
1059 | return valid_section(__nr_to_section(nr)); | |
1060 | } | |
1061 | ||
1062 | static inline struct mem_section *__pfn_to_section(unsigned long pfn) | |
1063 | { | |
1064 | return __nr_to_section(pfn_to_section_nr(pfn)); | |
1065 | } | |
1066 | ||
1067 | #ifndef CONFIG_HAVE_ARCH_PFN_VALID | |
1068 | static inline int pfn_valid(unsigned long pfn) | |
1069 | { | |
1070 | if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) | |
1071 | return 0; | |
1072 | return valid_section(__nr_to_section(pfn_to_section_nr(pfn))); | |
1073 | } | |
1074 | #endif | |
1075 | ||
1076 | static inline int pfn_present(unsigned long pfn) | |
1077 | { | |
1078 | if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) | |
1079 | return 0; | |
1080 | return present_section(__nr_to_section(pfn_to_section_nr(pfn))); | |
1081 | } | |
1082 | ||
1083 | /* | |
1084 | * These are _only_ used during initialisation, therefore they | |
1085 | * can use __initdata ... They could have names to indicate | |
1086 | * this restriction. | |
1087 | */ | |
1088 | #ifdef CONFIG_NUMA | |
1089 | #define pfn_to_nid(pfn) \ | |
1090 | ({ \ | |
1091 | unsigned long __pfn_to_nid_pfn = (pfn); \ | |
1092 | page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \ | |
1093 | }) | |
1094 | #else | |
1095 | #define pfn_to_nid(pfn) (0) | |
1096 | #endif | |
1097 | ||
1098 | #define early_pfn_valid(pfn) pfn_valid(pfn) | |
1099 | void sparse_init(void); | |
1100 | #else | |
1101 | #define sparse_init() do {} while (0) | |
1102 | #define sparse_index_init(_sec, _nid) do {} while (0) | |
1103 | #endif /* CONFIG_SPARSEMEM */ | |
1104 | ||
1105 | #ifdef CONFIG_NODES_SPAN_OTHER_NODES | |
1106 | bool early_pfn_in_nid(unsigned long pfn, int nid); | |
1107 | #else | |
1108 | #define early_pfn_in_nid(pfn, nid) (1) | |
1109 | #endif | |
1110 | ||
1111 | #ifndef early_pfn_valid | |
1112 | #define early_pfn_valid(pfn) (1) | |
1113 | #endif | |
1114 | ||
1115 | void memory_present(int nid, unsigned long start, unsigned long end); | |
1116 | unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); | |
1117 | ||
1118 | /* | |
1119 | * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we | |
1120 | * need to check pfn validility within that MAX_ORDER_NR_PAGES block. | |
1121 | * pfn_valid_within() should be used in this case; we optimise this away | |
1122 | * when we have no holes within a MAX_ORDER_NR_PAGES block. | |
1123 | */ | |
1124 | #ifdef CONFIG_HOLES_IN_ZONE | |
1125 | #define pfn_valid_within(pfn) pfn_valid(pfn) | |
1126 | #else | |
1127 | #define pfn_valid_within(pfn) (1) | |
1128 | #endif | |
1129 | ||
1130 | #ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL | |
1131 | /* | |
1132 | * pfn_valid() is meant to be able to tell if a given PFN has valid memmap | |
1133 | * associated with it or not. In FLATMEM, it is expected that holes always | |
1134 | * have valid memmap as long as there is valid PFNs either side of the hole. | |
1135 | * In SPARSEMEM, it is assumed that a valid section has a memmap for the | |
1136 | * entire section. | |
1137 | * | |
1138 | * However, an ARM, and maybe other embedded architectures in the future | |
1139 | * free memmap backing holes to save memory on the assumption the memmap is | |
1140 | * never used. The page_zone linkages are then broken even though pfn_valid() | |
1141 | * returns true. A walker of the full memmap must then do this additional | |
1142 | * check to ensure the memmap they are looking at is sane by making sure | |
1143 | * the zone and PFN linkages are still valid. This is expensive, but walkers | |
1144 | * of the full memmap are extremely rare. | |
1145 | */ | |
1146 | int memmap_valid_within(unsigned long pfn, | |
1147 | struct page *page, struct zone *zone); | |
1148 | #else | |
1149 | static inline int memmap_valid_within(unsigned long pfn, | |
1150 | struct page *page, struct zone *zone) | |
1151 | { | |
1152 | return 1; | |
1153 | } | |
1154 | #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ | |
1155 | ||
1156 | #endif /* !__GENERATING_BOUNDS.H */ | |
1157 | #endif /* !__ASSEMBLY__ */ | |
1158 | #endif /* _LINUX_MMZONE_H */ |