]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/x86_64/mm/init.c | |
3 | * | |
4 | * Copyright (C) 1995 Linus Torvalds | |
5 | * Copyright (C) 2000 Pavel Machek <[email protected]> | |
6 | * Copyright (C) 2002,2003 Andi Kleen <[email protected]> | |
7 | */ | |
8 | ||
1da177e4 LT |
9 | #include <linux/signal.h> |
10 | #include <linux/sched.h> | |
11 | #include <linux/kernel.h> | |
12 | #include <linux/errno.h> | |
13 | #include <linux/string.h> | |
14 | #include <linux/types.h> | |
15 | #include <linux/ptrace.h> | |
16 | #include <linux/mman.h> | |
17 | #include <linux/mm.h> | |
18 | #include <linux/swap.h> | |
19 | #include <linux/smp.h> | |
20 | #include <linux/init.h> | |
21 | #include <linux/pagemap.h> | |
22 | #include <linux/bootmem.h> | |
23 | #include <linux/proc_fs.h> | |
59170891 | 24 | #include <linux/pci.h> |
c9cf5528 | 25 | #include <linux/poison.h> |
17a941d8 | 26 | #include <linux/dma-mapping.h> |
44df75e6 MT |
27 | #include <linux/module.h> |
28 | #include <linux/memory_hotplug.h> | |
1da177e4 LT |
29 | |
30 | #include <asm/processor.h> | |
31 | #include <asm/system.h> | |
32 | #include <asm/uaccess.h> | |
33 | #include <asm/pgtable.h> | |
34 | #include <asm/pgalloc.h> | |
35 | #include <asm/dma.h> | |
36 | #include <asm/fixmap.h> | |
37 | #include <asm/e820.h> | |
38 | #include <asm/apic.h> | |
39 | #include <asm/tlb.h> | |
40 | #include <asm/mmu_context.h> | |
41 | #include <asm/proto.h> | |
42 | #include <asm/smp.h> | |
2bc0414e | 43 | #include <asm/sections.h> |
1da177e4 LT |
44 | |
45 | #ifndef Dprintk | |
46 | #define Dprintk(x...) | |
47 | #endif | |
48 | ||
e6584504 | 49 | const struct dma_mapping_ops* dma_ops; |
17a941d8 MBY |
50 | EXPORT_SYMBOL(dma_ops); |
51 | ||
e18c6874 AK |
52 | static unsigned long dma_reserve __initdata; |
53 | ||
1da177e4 LT |
54 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
55 | ||
56 | /* | |
57 | * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the | |
58 | * physical space so we can cache the place of the first one and move | |
59 | * around without checking the pgd every time. | |
60 | */ | |
61 | ||
62 | void show_mem(void) | |
63 | { | |
e92343cc AK |
64 | long i, total = 0, reserved = 0; |
65 | long shared = 0, cached = 0; | |
1da177e4 LT |
66 | pg_data_t *pgdat; |
67 | struct page *page; | |
68 | ||
e92343cc | 69 | printk(KERN_INFO "Mem-info:\n"); |
1da177e4 | 70 | show_free_areas(); |
e92343cc | 71 | printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); |
1da177e4 | 72 | |
ec936fc5 | 73 | for_each_online_pgdat(pgdat) { |
1da177e4 LT |
74 | for (i = 0; i < pgdat->node_spanned_pages; ++i) { |
75 | page = pfn_to_page(pgdat->node_start_pfn + i); | |
76 | total++; | |
e92343cc AK |
77 | if (PageReserved(page)) |
78 | reserved++; | |
79 | else if (PageSwapCache(page)) | |
80 | cached++; | |
81 | else if (page_count(page)) | |
82 | shared += page_count(page) - 1; | |
1da177e4 LT |
83 | } |
84 | } | |
e92343cc AK |
85 | printk(KERN_INFO "%lu pages of RAM\n", total); |
86 | printk(KERN_INFO "%lu reserved pages\n",reserved); | |
87 | printk(KERN_INFO "%lu pages shared\n",shared); | |
88 | printk(KERN_INFO "%lu pages swap cached\n",cached); | |
1da177e4 LT |
89 | } |
90 | ||
1da177e4 LT |
91 | int after_bootmem; |
92 | ||
5f44a669 | 93 | static __init void *spp_getpage(void) |
1da177e4 LT |
94 | { |
95 | void *ptr; | |
96 | if (after_bootmem) | |
97 | ptr = (void *) get_zeroed_page(GFP_ATOMIC); | |
98 | else | |
99 | ptr = alloc_bootmem_pages(PAGE_SIZE); | |
100 | if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) | |
101 | panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":""); | |
102 | ||
103 | Dprintk("spp_getpage %p\n", ptr); | |
104 | return ptr; | |
105 | } | |
106 | ||
5f44a669 | 107 | static __init void set_pte_phys(unsigned long vaddr, |
1da177e4 LT |
108 | unsigned long phys, pgprot_t prot) |
109 | { | |
110 | pgd_t *pgd; | |
111 | pud_t *pud; | |
112 | pmd_t *pmd; | |
113 | pte_t *pte, new_pte; | |
114 | ||
115 | Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys); | |
116 | ||
117 | pgd = pgd_offset_k(vaddr); | |
118 | if (pgd_none(*pgd)) { | |
119 | printk("PGD FIXMAP MISSING, it should be setup in head.S!\n"); | |
120 | return; | |
121 | } | |
122 | pud = pud_offset(pgd, vaddr); | |
123 | if (pud_none(*pud)) { | |
124 | pmd = (pmd_t *) spp_getpage(); | |
125 | set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER)); | |
126 | if (pmd != pmd_offset(pud, 0)) { | |
127 | printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0)); | |
128 | return; | |
129 | } | |
130 | } | |
131 | pmd = pmd_offset(pud, vaddr); | |
132 | if (pmd_none(*pmd)) { | |
133 | pte = (pte_t *) spp_getpage(); | |
134 | set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER)); | |
135 | if (pte != pte_offset_kernel(pmd, 0)) { | |
136 | printk("PAGETABLE BUG #02!\n"); | |
137 | return; | |
138 | } | |
139 | } | |
140 | new_pte = pfn_pte(phys >> PAGE_SHIFT, prot); | |
141 | ||
142 | pte = pte_offset_kernel(pmd, vaddr); | |
143 | if (!pte_none(*pte) && | |
144 | pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask)) | |
145 | pte_ERROR(*pte); | |
146 | set_pte(pte, new_pte); | |
147 | ||
148 | /* | |
149 | * It's enough to flush this one mapping. | |
150 | * (PGE mappings get flushed as well) | |
151 | */ | |
152 | __flush_tlb_one(vaddr); | |
153 | } | |
154 | ||
155 | /* NOTE: this is meant to be run only at boot */ | |
5f44a669 AK |
156 | void __init |
157 | __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot) | |
1da177e4 LT |
158 | { |
159 | unsigned long address = __fix_to_virt(idx); | |
160 | ||
161 | if (idx >= __end_of_fixed_addresses) { | |
162 | printk("Invalid __set_fixmap\n"); | |
163 | return; | |
164 | } | |
165 | set_pte_phys(address, phys, prot); | |
166 | } | |
167 | ||
168 | unsigned long __initdata table_start, table_end; | |
169 | ||
170 | extern pmd_t temp_boot_pmds[]; | |
171 | ||
172 | static struct temp_map { | |
173 | pmd_t *pmd; | |
174 | void *address; | |
175 | int allocated; | |
176 | } temp_mappings[] __initdata = { | |
177 | { &temp_boot_pmds[0], (void *)(40UL * 1024 * 1024) }, | |
178 | { &temp_boot_pmds[1], (void *)(42UL * 1024 * 1024) }, | |
179 | {} | |
180 | }; | |
181 | ||
44df75e6 | 182 | static __meminit void *alloc_low_page(int *index, unsigned long *phys) |
1da177e4 LT |
183 | { |
184 | struct temp_map *ti; | |
185 | int i; | |
186 | unsigned long pfn = table_end++, paddr; | |
187 | void *adr; | |
188 | ||
44df75e6 MT |
189 | if (after_bootmem) { |
190 | adr = (void *)get_zeroed_page(GFP_ATOMIC); | |
191 | *phys = __pa(adr); | |
192 | return adr; | |
193 | } | |
194 | ||
1da177e4 LT |
195 | if (pfn >= end_pfn) |
196 | panic("alloc_low_page: ran out of memory"); | |
197 | for (i = 0; temp_mappings[i].allocated; i++) { | |
198 | if (!temp_mappings[i].pmd) | |
199 | panic("alloc_low_page: ran out of temp mappings"); | |
200 | } | |
201 | ti = &temp_mappings[i]; | |
202 | paddr = (pfn << PAGE_SHIFT) & PMD_MASK; | |
203 | set_pmd(ti->pmd, __pmd(paddr | _KERNPG_TABLE | _PAGE_PSE)); | |
204 | ti->allocated = 1; | |
205 | __flush_tlb(); | |
206 | adr = ti->address + ((pfn << PAGE_SHIFT) & ~PMD_MASK); | |
44df75e6 | 207 | memset(adr, 0, PAGE_SIZE); |
1da177e4 LT |
208 | *index = i; |
209 | *phys = pfn * PAGE_SIZE; | |
210 | return adr; | |
211 | } | |
212 | ||
44df75e6 | 213 | static __meminit void unmap_low_page(int i) |
1da177e4 | 214 | { |
44df75e6 MT |
215 | struct temp_map *ti; |
216 | ||
217 | if (after_bootmem) | |
218 | return; | |
219 | ||
220 | ti = &temp_mappings[i]; | |
1da177e4 LT |
221 | set_pmd(ti->pmd, __pmd(0)); |
222 | ti->allocated = 0; | |
223 | } | |
224 | ||
f2d3efed AK |
225 | /* Must run before zap_low_mappings */ |
226 | __init void *early_ioremap(unsigned long addr, unsigned long size) | |
227 | { | |
228 | unsigned long map = round_down(addr, LARGE_PAGE_SIZE); | |
229 | ||
230 | /* actually usually some more */ | |
231 | if (size >= LARGE_PAGE_SIZE) { | |
f2d3efed AK |
232 | return NULL; |
233 | } | |
234 | set_pmd(temp_mappings[0].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE)); | |
235 | map += LARGE_PAGE_SIZE; | |
236 | set_pmd(temp_mappings[1].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE)); | |
237 | __flush_tlb(); | |
238 | return temp_mappings[0].address + (addr & (LARGE_PAGE_SIZE-1)); | |
239 | } | |
240 | ||
241 | /* To avoid virtual aliases later */ | |
242 | __init void early_iounmap(void *addr, unsigned long size) | |
243 | { | |
244 | if ((void *)round_down((unsigned long)addr, LARGE_PAGE_SIZE) != temp_mappings[0].address) | |
245 | printk("early_iounmap: bad address %p\n", addr); | |
246 | set_pmd(temp_mappings[0].pmd, __pmd(0)); | |
247 | set_pmd(temp_mappings[1].pmd, __pmd(0)); | |
248 | __flush_tlb(); | |
249 | } | |
250 | ||
44df75e6 | 251 | static void __meminit |
6ad91658 | 252 | phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end) |
44df75e6 | 253 | { |
6ad91658 | 254 | int i = pmd_index(address); |
44df75e6 | 255 | |
6ad91658 | 256 | for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) { |
44df75e6 | 257 | unsigned long entry; |
6ad91658 | 258 | pmd_t *pmd = pmd_page + pmd_index(address); |
44df75e6 | 259 | |
5f51e139 JB |
260 | if (address >= end) { |
261 | if (!after_bootmem) | |
262 | for (; i < PTRS_PER_PMD; i++, pmd++) | |
263 | set_pmd(pmd, __pmd(0)); | |
44df75e6 MT |
264 | break; |
265 | } | |
6ad91658 KM |
266 | |
267 | if (pmd_val(*pmd)) | |
268 | continue; | |
269 | ||
44df75e6 MT |
270 | entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address; |
271 | entry &= __supported_pte_mask; | |
272 | set_pmd(pmd, __pmd(entry)); | |
273 | } | |
274 | } | |
275 | ||
276 | static void __meminit | |
277 | phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end) | |
278 | { | |
6ad91658 KM |
279 | pmd_t *pmd = pmd_offset(pud,0); |
280 | spin_lock(&init_mm.page_table_lock); | |
281 | phys_pmd_init(pmd, address, end); | |
282 | spin_unlock(&init_mm.page_table_lock); | |
283 | __flush_tlb_all(); | |
44df75e6 MT |
284 | } |
285 | ||
6ad91658 | 286 | static void __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end) |
1da177e4 | 287 | { |
6ad91658 | 288 | int i = pud_index(addr); |
44df75e6 | 289 | |
44df75e6 | 290 | |
6ad91658 | 291 | for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE ) { |
1da177e4 | 292 | int map; |
6ad91658 KM |
293 | unsigned long pmd_phys; |
294 | pud_t *pud = pud_page + pud_index(addr); | |
1da177e4 LT |
295 | pmd_t *pmd; |
296 | ||
6ad91658 | 297 | if (addr >= end) |
1da177e4 | 298 | break; |
1da177e4 | 299 | |
6ad91658 | 300 | if (!after_bootmem && !e820_any_mapped(addr,addr+PUD_SIZE,0)) { |
1da177e4 LT |
301 | set_pud(pud, __pud(0)); |
302 | continue; | |
303 | } | |
304 | ||
6ad91658 KM |
305 | if (pud_val(*pud)) { |
306 | phys_pmd_update(pud, addr, end); | |
307 | continue; | |
308 | } | |
309 | ||
1da177e4 | 310 | pmd = alloc_low_page(&map, &pmd_phys); |
44df75e6 | 311 | spin_lock(&init_mm.page_table_lock); |
1da177e4 | 312 | set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE)); |
6ad91658 | 313 | phys_pmd_init(pmd, addr, end); |
44df75e6 | 314 | spin_unlock(&init_mm.page_table_lock); |
1da177e4 LT |
315 | unmap_low_page(map); |
316 | } | |
317 | __flush_tlb(); | |
318 | } | |
319 | ||
320 | static void __init find_early_table_space(unsigned long end) | |
321 | { | |
6c5acd16 | 322 | unsigned long puds, pmds, tables, start; |
1da177e4 LT |
323 | |
324 | puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; | |
325 | pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; | |
326 | tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) + | |
327 | round_up(pmds * sizeof(pmd_t), PAGE_SIZE); | |
328 | ||
ee408c79 AK |
329 | /* RED-PEN putting page tables only on node 0 could |
330 | cause a hotspot and fill up ZONE_DMA. The page tables | |
331 | need roughly 0.5KB per GB. */ | |
332 | start = 0x8000; | |
333 | table_start = find_e820_area(start, end, tables); | |
1da177e4 LT |
334 | if (table_start == -1UL) |
335 | panic("Cannot find space for the kernel page tables"); | |
336 | ||
337 | table_start >>= PAGE_SHIFT; | |
338 | table_end = table_start; | |
44df75e6 MT |
339 | |
340 | early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n", | |
5f51e139 JB |
341 | end, table_start << PAGE_SHIFT, |
342 | (table_start << PAGE_SHIFT) + tables); | |
1da177e4 LT |
343 | } |
344 | ||
345 | /* Setup the direct mapping of the physical memory at PAGE_OFFSET. | |
346 | This runs before bootmem is initialized and gets pages directly from the | |
347 | physical memory. To access them they are temporarily mapped. */ | |
44df75e6 | 348 | void __meminit init_memory_mapping(unsigned long start, unsigned long end) |
1da177e4 LT |
349 | { |
350 | unsigned long next; | |
351 | ||
352 | Dprintk("init_memory_mapping\n"); | |
353 | ||
354 | /* | |
355 | * Find space for the kernel direct mapping tables. | |
356 | * Later we should allocate these tables in the local node of the memory | |
357 | * mapped. Unfortunately this is done currently before the nodes are | |
358 | * discovered. | |
359 | */ | |
44df75e6 MT |
360 | if (!after_bootmem) |
361 | find_early_table_space(end); | |
1da177e4 LT |
362 | |
363 | start = (unsigned long)__va(start); | |
364 | end = (unsigned long)__va(end); | |
365 | ||
366 | for (; start < end; start = next) { | |
367 | int map; | |
368 | unsigned long pud_phys; | |
44df75e6 MT |
369 | pgd_t *pgd = pgd_offset_k(start); |
370 | pud_t *pud; | |
371 | ||
372 | if (after_bootmem) | |
d2ae5b5f | 373 | pud = pud_offset(pgd, start & PGDIR_MASK); |
44df75e6 MT |
374 | else |
375 | pud = alloc_low_page(&map, &pud_phys); | |
376 | ||
1da177e4 LT |
377 | next = start + PGDIR_SIZE; |
378 | if (next > end) | |
379 | next = end; | |
380 | phys_pud_init(pud, __pa(start), __pa(next)); | |
44df75e6 MT |
381 | if (!after_bootmem) |
382 | set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys)); | |
1da177e4 LT |
383 | unmap_low_page(map); |
384 | } | |
385 | ||
44df75e6 MT |
386 | if (!after_bootmem) |
387 | asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features)); | |
1da177e4 | 388 | __flush_tlb_all(); |
1da177e4 LT |
389 | } |
390 | ||
f6c2e333 | 391 | void __cpuinit zap_low_mappings(int cpu) |
1da177e4 | 392 | { |
f6c2e333 SS |
393 | if (cpu == 0) { |
394 | pgd_t *pgd = pgd_offset_k(0UL); | |
395 | pgd_clear(pgd); | |
396 | } else { | |
397 | /* | |
398 | * For AP's, zap the low identity mappings by changing the cr3 | |
399 | * to init_level4_pgt and doing local flush tlb all | |
400 | */ | |
401 | asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt))); | |
402 | } | |
403 | __flush_tlb_all(); | |
1da177e4 LT |
404 | } |
405 | ||
2b97690f | 406 | #ifndef CONFIG_NUMA |
1da177e4 LT |
407 | void __init paging_init(void) |
408 | { | |
6391af17 MG |
409 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
410 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); | |
411 | max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; | |
412 | max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; | |
413 | max_zone_pfns[ZONE_NORMAL] = end_pfn; | |
414 | ||
44df75e6 MT |
415 | memory_present(0, 0, end_pfn); |
416 | sparse_init(); | |
5cb248ab | 417 | free_area_init_nodes(max_zone_pfns); |
1da177e4 LT |
418 | } |
419 | #endif | |
420 | ||
421 | /* Unmap a kernel mapping if it exists. This is useful to avoid prefetches | |
422 | from the CPU leading to inconsistent cache lines. address and size | |
423 | must be aligned to 2MB boundaries. | |
424 | Does nothing when the mapping doesn't exist. */ | |
425 | void __init clear_kernel_mapping(unsigned long address, unsigned long size) | |
426 | { | |
427 | unsigned long end = address + size; | |
428 | ||
429 | BUG_ON(address & ~LARGE_PAGE_MASK); | |
430 | BUG_ON(size & ~LARGE_PAGE_MASK); | |
431 | ||
432 | for (; address < end; address += LARGE_PAGE_SIZE) { | |
433 | pgd_t *pgd = pgd_offset_k(address); | |
434 | pud_t *pud; | |
435 | pmd_t *pmd; | |
436 | if (pgd_none(*pgd)) | |
437 | continue; | |
438 | pud = pud_offset(pgd, address); | |
439 | if (pud_none(*pud)) | |
440 | continue; | |
441 | pmd = pmd_offset(pud, address); | |
442 | if (!pmd || pmd_none(*pmd)) | |
443 | continue; | |
444 | if (0 == (pmd_val(*pmd) & _PAGE_PSE)) { | |
445 | /* Could handle this, but it should not happen currently. */ | |
446 | printk(KERN_ERR | |
447 | "clear_kernel_mapping: mapping has been split. will leak memory\n"); | |
448 | pmd_ERROR(*pmd); | |
449 | } | |
450 | set_pmd(pmd, __pmd(0)); | |
451 | } | |
452 | __flush_tlb_all(); | |
453 | } | |
454 | ||
44df75e6 MT |
455 | /* |
456 | * Memory hotplug specific functions | |
44df75e6 | 457 | */ |
44df75e6 MT |
458 | void online_page(struct page *page) |
459 | { | |
460 | ClearPageReserved(page); | |
7835e98b | 461 | init_page_count(page); |
44df75e6 MT |
462 | __free_page(page); |
463 | totalram_pages++; | |
464 | num_physpages++; | |
465 | } | |
466 | ||
bc02af93 | 467 | #ifdef CONFIG_MEMORY_HOTPLUG |
9d99aaa3 AK |
468 | /* |
469 | * Memory is added always to NORMAL zone. This means you will never get | |
470 | * additional DMA/DMA32 memory. | |
471 | */ | |
bc02af93 | 472 | int arch_add_memory(int nid, u64 start, u64 size) |
44df75e6 | 473 | { |
bc02af93 | 474 | struct pglist_data *pgdat = NODE_DATA(nid); |
776ed98b | 475 | struct zone *zone = pgdat->node_zones + ZONE_NORMAL; |
44df75e6 MT |
476 | unsigned long start_pfn = start >> PAGE_SHIFT; |
477 | unsigned long nr_pages = size >> PAGE_SHIFT; | |
478 | int ret; | |
479 | ||
45e0b78b KM |
480 | init_memory_mapping(start, (start + size -1)); |
481 | ||
44df75e6 MT |
482 | ret = __add_pages(zone, start_pfn, nr_pages); |
483 | if (ret) | |
484 | goto error; | |
485 | ||
44df75e6 MT |
486 | return ret; |
487 | error: | |
488 | printk("%s: Problem encountered in __add_pages!\n", __func__); | |
489 | return ret; | |
490 | } | |
bc02af93 | 491 | EXPORT_SYMBOL_GPL(arch_add_memory); |
44df75e6 MT |
492 | |
493 | int remove_memory(u64 start, u64 size) | |
494 | { | |
495 | return -EINVAL; | |
496 | } | |
497 | EXPORT_SYMBOL_GPL(remove_memory); | |
498 | ||
8243229f | 499 | #if !defined(CONFIG_ACPI_NUMA) && defined(CONFIG_NUMA) |
4942e998 KM |
500 | int memory_add_physaddr_to_nid(u64 start) |
501 | { | |
502 | return 0; | |
503 | } | |
8c2676a5 | 504 | EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); |
4942e998 KM |
505 | #endif |
506 | ||
45e0b78b KM |
507 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
508 | ||
509 | #ifdef CONFIG_MEMORY_HOTPLUG_RESERVE | |
bc02af93 YG |
510 | /* |
511 | * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance, | |
512 | * just online the pages. | |
513 | */ | |
514 | int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages) | |
515 | { | |
516 | int err = -EIO; | |
517 | unsigned long pfn; | |
518 | unsigned long total = 0, mem = 0; | |
519 | for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) { | |
520 | if (pfn_valid(pfn)) { | |
521 | online_page(pfn_to_page(pfn)); | |
522 | err = 0; | |
523 | mem++; | |
524 | } | |
525 | total++; | |
526 | } | |
527 | if (!err) { | |
528 | z->spanned_pages += total; | |
529 | z->present_pages += mem; | |
530 | z->zone_pgdat->node_spanned_pages += total; | |
531 | z->zone_pgdat->node_present_pages += mem; | |
532 | } | |
533 | return err; | |
534 | } | |
45e0b78b | 535 | #endif |
44df75e6 | 536 | |
1da177e4 LT |
537 | static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules, |
538 | kcore_vsyscall; | |
539 | ||
540 | void __init mem_init(void) | |
541 | { | |
0a43e4bf | 542 | long codesize, reservedpages, datasize, initsize; |
1da177e4 | 543 | |
0dc243ae | 544 | pci_iommu_alloc(); |
1da177e4 | 545 | |
1da177e4 LT |
546 | /* clear the zero-page */ |
547 | memset(empty_zero_page, 0, PAGE_SIZE); | |
548 | ||
549 | reservedpages = 0; | |
550 | ||
551 | /* this will put all low memory onto the freelists */ | |
2b97690f | 552 | #ifdef CONFIG_NUMA |
0a43e4bf | 553 | totalram_pages = numa_free_all_bootmem(); |
1da177e4 | 554 | #else |
0a43e4bf | 555 | totalram_pages = free_all_bootmem(); |
1da177e4 | 556 | #endif |
5cb248ab MG |
557 | reservedpages = end_pfn - totalram_pages - |
558 | absent_pages_in_range(0, end_pfn); | |
1da177e4 LT |
559 | |
560 | after_bootmem = 1; | |
561 | ||
562 | codesize = (unsigned long) &_etext - (unsigned long) &_text; | |
563 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; | |
564 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; | |
565 | ||
566 | /* Register memory areas for /proc/kcore */ | |
567 | kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); | |
568 | kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, | |
569 | VMALLOC_END-VMALLOC_START); | |
570 | kclist_add(&kcore_kernel, &_stext, _end - _stext); | |
571 | kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN); | |
572 | kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START, | |
573 | VSYSCALL_END - VSYSCALL_START); | |
574 | ||
0a43e4bf | 575 | printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n", |
1da177e4 LT |
576 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), |
577 | end_pfn << (PAGE_SHIFT-10), | |
578 | codesize >> 10, | |
579 | reservedpages << (PAGE_SHIFT-10), | |
580 | datasize >> 10, | |
581 | initsize >> 10); | |
582 | ||
f6c2e333 | 583 | #ifdef CONFIG_SMP |
1da177e4 | 584 | /* |
f6c2e333 SS |
585 | * Sync boot_level4_pgt mappings with the init_level4_pgt |
586 | * except for the low identity mappings which are already zapped | |
587 | * in init_level4_pgt. This sync-up is essential for AP's bringup | |
1da177e4 | 588 | */ |
f6c2e333 | 589 | memcpy(boot_level4_pgt+1, init_level4_pgt+1, (PTRS_PER_PGD-1)*sizeof(pgd_t)); |
1da177e4 LT |
590 | #endif |
591 | } | |
592 | ||
d167a518 | 593 | void free_init_pages(char *what, unsigned long begin, unsigned long end) |
1da177e4 LT |
594 | { |
595 | unsigned long addr; | |
596 | ||
d167a518 GH |
597 | if (begin >= end) |
598 | return; | |
599 | ||
600 | printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); | |
601 | for (addr = begin; addr < end; addr += PAGE_SIZE) { | |
1da177e4 | 602 | ClearPageReserved(virt_to_page(addr)); |
7835e98b | 603 | init_page_count(virt_to_page(addr)); |
c9cf5528 RD |
604 | memset((void *)(addr & ~(PAGE_SIZE-1)), |
605 | POISON_FREE_INITMEM, PAGE_SIZE); | |
1da177e4 LT |
606 | free_page(addr); |
607 | totalram_pages++; | |
608 | } | |
d167a518 GH |
609 | } |
610 | ||
611 | void free_initmem(void) | |
612 | { | |
c9cf5528 RD |
613 | memset(__initdata_begin, POISON_FREE_INITDATA, |
614 | __initdata_end - __initdata_begin); | |
d167a518 GH |
615 | free_init_pages("unused kernel memory", |
616 | (unsigned long)(&__init_begin), | |
617 | (unsigned long)(&__init_end)); | |
1da177e4 LT |
618 | } |
619 | ||
67df197b AV |
620 | #ifdef CONFIG_DEBUG_RODATA |
621 | ||
67df197b AV |
622 | void mark_rodata_ro(void) |
623 | { | |
a581c2a4 | 624 | unsigned long addr = (unsigned long)__start_rodata; |
67df197b | 625 | |
a581c2a4 | 626 | for (; addr < (unsigned long)__end_rodata; addr += PAGE_SIZE) |
67df197b AV |
627 | change_page_attr_addr(addr, 1, PAGE_KERNEL_RO); |
628 | ||
629 | printk ("Write protecting the kernel read-only data: %luk\n", | |
a581c2a4 | 630 | (__end_rodata - __start_rodata) >> 10); |
67df197b AV |
631 | |
632 | /* | |
633 | * change_page_attr_addr() requires a global_flush_tlb() call after it. | |
634 | * We do this after the printk so that if something went wrong in the | |
635 | * change, the printk gets out at least to give a better debug hint | |
636 | * of who is the culprit. | |
637 | */ | |
638 | global_flush_tlb(); | |
639 | } | |
640 | #endif | |
641 | ||
1da177e4 LT |
642 | #ifdef CONFIG_BLK_DEV_INITRD |
643 | void free_initrd_mem(unsigned long start, unsigned long end) | |
644 | { | |
d167a518 | 645 | free_init_pages("initrd memory", start, end); |
1da177e4 LT |
646 | } |
647 | #endif | |
648 | ||
649 | void __init reserve_bootmem_generic(unsigned long phys, unsigned len) | |
650 | { | |
2b97690f | 651 | #ifdef CONFIG_NUMA |
1da177e4 | 652 | int nid = phys_to_nid(phys); |
5e58a02a AK |
653 | #endif |
654 | unsigned long pfn = phys >> PAGE_SHIFT; | |
655 | if (pfn >= end_pfn) { | |
656 | /* This can happen with kdump kernels when accessing firmware | |
657 | tables. */ | |
658 | if (pfn < end_pfn_map) | |
659 | return; | |
660 | printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %u\n", | |
661 | phys, len); | |
662 | return; | |
663 | } | |
664 | ||
665 | /* Should check here against the e820 map to avoid double free */ | |
666 | #ifdef CONFIG_NUMA | |
1da177e4 LT |
667 | reserve_bootmem_node(NODE_DATA(nid), phys, len); |
668 | #else | |
669 | reserve_bootmem(phys, len); | |
670 | #endif | |
0e0b864e | 671 | if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) { |
e18c6874 | 672 | dma_reserve += len / PAGE_SIZE; |
0e0b864e MG |
673 | set_dma_reserve(dma_reserve); |
674 | } | |
1da177e4 LT |
675 | } |
676 | ||
677 | int kern_addr_valid(unsigned long addr) | |
678 | { | |
679 | unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT; | |
680 | pgd_t *pgd; | |
681 | pud_t *pud; | |
682 | pmd_t *pmd; | |
683 | pte_t *pte; | |
684 | ||
685 | if (above != 0 && above != -1UL) | |
686 | return 0; | |
687 | ||
688 | pgd = pgd_offset_k(addr); | |
689 | if (pgd_none(*pgd)) | |
690 | return 0; | |
691 | ||
692 | pud = pud_offset(pgd, addr); | |
693 | if (pud_none(*pud)) | |
694 | return 0; | |
695 | ||
696 | pmd = pmd_offset(pud, addr); | |
697 | if (pmd_none(*pmd)) | |
698 | return 0; | |
699 | if (pmd_large(*pmd)) | |
700 | return pfn_valid(pmd_pfn(*pmd)); | |
701 | ||
702 | pte = pte_offset_kernel(pmd, addr); | |
703 | if (pte_none(*pte)) | |
704 | return 0; | |
705 | return pfn_valid(pte_pfn(*pte)); | |
706 | } | |
707 | ||
708 | #ifdef CONFIG_SYSCTL | |
709 | #include <linux/sysctl.h> | |
710 | ||
711 | extern int exception_trace, page_fault_trace; | |
712 | ||
713 | static ctl_table debug_table2[] = { | |
c37ce032 EB |
714 | { |
715 | .ctl_name = 99, | |
716 | .procname = "exception-trace", | |
717 | .data = &exception_trace, | |
718 | .maxlen = sizeof(int), | |
719 | .mode = 0644, | |
720 | .proc_handler = proc_dointvec | |
721 | }, | |
722 | {} | |
1da177e4 LT |
723 | }; |
724 | ||
725 | static ctl_table debug_root_table2[] = { | |
c37ce032 EB |
726 | { |
727 | .ctl_name = CTL_DEBUG, | |
728 | .procname = "debug", | |
729 | .mode = 0555, | |
730 | .child = debug_table2 | |
731 | }, | |
732 | {} | |
1da177e4 LT |
733 | }; |
734 | ||
735 | static __init int x8664_sysctl_init(void) | |
736 | { | |
0b4d4147 | 737 | register_sysctl_table(debug_root_table2); |
1da177e4 LT |
738 | return 0; |
739 | } | |
740 | __initcall(x8664_sysctl_init); | |
741 | #endif | |
742 | ||
103efcd9 | 743 | /* A pseudo VMA to allow ptrace access for the vsyscall page. This only |
1e014410 AK |
744 | covers the 64bit vsyscall page now. 32bit has a real VMA now and does |
745 | not need special handling anymore. */ | |
1da177e4 LT |
746 | |
747 | static struct vm_area_struct gate_vma = { | |
748 | .vm_start = VSYSCALL_START, | |
103efcd9 EP |
749 | .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES << PAGE_SHIFT), |
750 | .vm_page_prot = PAGE_READONLY_EXEC, | |
751 | .vm_flags = VM_READ | VM_EXEC | |
1da177e4 LT |
752 | }; |
753 | ||
1da177e4 LT |
754 | struct vm_area_struct *get_gate_vma(struct task_struct *tsk) |
755 | { | |
756 | #ifdef CONFIG_IA32_EMULATION | |
1e014410 AK |
757 | if (test_tsk_thread_flag(tsk, TIF_IA32)) |
758 | return NULL; | |
1da177e4 LT |
759 | #endif |
760 | return &gate_vma; | |
761 | } | |
762 | ||
763 | int in_gate_area(struct task_struct *task, unsigned long addr) | |
764 | { | |
765 | struct vm_area_struct *vma = get_gate_vma(task); | |
1e014410 AK |
766 | if (!vma) |
767 | return 0; | |
1da177e4 LT |
768 | return (addr >= vma->vm_start) && (addr < vma->vm_end); |
769 | } | |
770 | ||
771 | /* Use this when you have no reliable task/vma, typically from interrupt | |
772 | * context. It is less reliable than using the task's vma and may give | |
773 | * false positives. | |
774 | */ | |
775 | int in_gate_area_no_task(unsigned long addr) | |
776 | { | |
1e014410 | 777 | return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END); |
1da177e4 | 778 | } |