]>
Commit | Line | Data |
---|---|---|
3947be19 DH |
1 | /* |
2 | * linux/mm/memory_hotplug.c | |
3 | * | |
4 | * Copyright (C) | |
5 | */ | |
6 | ||
3947be19 DH |
7 | #include <linux/stddef.h> |
8 | #include <linux/mm.h> | |
9 | #include <linux/swap.h> | |
10 | #include <linux/interrupt.h> | |
11 | #include <linux/pagemap.h> | |
12 | #include <linux/bootmem.h> | |
13 | #include <linux/compiler.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/pagevec.h> | |
2d1d43f6 | 16 | #include <linux/writeback.h> |
3947be19 DH |
17 | #include <linux/slab.h> |
18 | #include <linux/sysctl.h> | |
19 | #include <linux/cpu.h> | |
20 | #include <linux/memory.h> | |
21 | #include <linux/memory_hotplug.h> | |
22 | #include <linux/highmem.h> | |
23 | #include <linux/vmalloc.h> | |
0a547039 | 24 | #include <linux/ioport.h> |
38837fc7 | 25 | #include <linux/cpuset.h> |
0c0e6195 KH |
26 | #include <linux/delay.h> |
27 | #include <linux/migrate.h> | |
28 | #include <linux/page-isolation.h> | |
3947be19 DH |
29 | |
30 | #include <asm/tlbflush.h> | |
31 | ||
1e5ad9a3 AB |
32 | #include "internal.h" |
33 | ||
45e0b78b KM |
34 | /* add this memory to iomem resource */ |
35 | static struct resource *register_memory_resource(u64 start, u64 size) | |
36 | { | |
37 | struct resource *res; | |
38 | res = kzalloc(sizeof(struct resource), GFP_KERNEL); | |
39 | BUG_ON(!res); | |
40 | ||
41 | res->name = "System RAM"; | |
42 | res->start = start; | |
43 | res->end = start + size - 1; | |
887c3cb1 | 44 | res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; |
45e0b78b KM |
45 | if (request_resource(&iomem_resource, res) < 0) { |
46 | printk("System RAM resource %llx - %llx cannot be added\n", | |
47 | (unsigned long long)res->start, (unsigned long long)res->end); | |
48 | kfree(res); | |
49 | res = NULL; | |
50 | } | |
51 | return res; | |
52 | } | |
53 | ||
54 | static void release_memory_resource(struct resource *res) | |
55 | { | |
56 | if (!res) | |
57 | return; | |
58 | release_resource(res); | |
59 | kfree(res); | |
60 | return; | |
61 | } | |
62 | ||
53947027 | 63 | #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE |
04753278 | 64 | #ifndef CONFIG_SPARSEMEM_VMEMMAP |
af370fb8 | 65 | static void get_page_bootmem(unsigned long info, struct page *page, int type) |
04753278 | 66 | { |
af370fb8 | 67 | atomic_set(&page->_mapcount, type); |
04753278 YG |
68 | SetPagePrivate(page); |
69 | set_page_private(page, info); | |
70 | atomic_inc(&page->_count); | |
71 | } | |
72 | ||
73 | void put_page_bootmem(struct page *page) | |
74 | { | |
af370fb8 | 75 | int type; |
04753278 | 76 | |
af370fb8 YG |
77 | type = atomic_read(&page->_mapcount); |
78 | BUG_ON(type >= -1); | |
04753278 YG |
79 | |
80 | if (atomic_dec_return(&page->_count) == 1) { | |
81 | ClearPagePrivate(page); | |
82 | set_page_private(page, 0); | |
83 | reset_page_mapcount(page); | |
84 | __free_pages_bootmem(page, 0); | |
85 | } | |
86 | ||
87 | } | |
88 | ||
d92bc318 | 89 | static void register_page_bootmem_info_section(unsigned long start_pfn) |
04753278 YG |
90 | { |
91 | unsigned long *usemap, mapsize, section_nr, i; | |
92 | struct mem_section *ms; | |
93 | struct page *page, *memmap; | |
94 | ||
95 | if (!pfn_valid(start_pfn)) | |
96 | return; | |
97 | ||
98 | section_nr = pfn_to_section_nr(start_pfn); | |
99 | ms = __nr_to_section(section_nr); | |
100 | ||
101 | /* Get section's memmap address */ | |
102 | memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr); | |
103 | ||
104 | /* | |
105 | * Get page for the memmap's phys address | |
106 | * XXX: need more consideration for sparse_vmemmap... | |
107 | */ | |
108 | page = virt_to_page(memmap); | |
109 | mapsize = sizeof(struct page) * PAGES_PER_SECTION; | |
110 | mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT; | |
111 | ||
112 | /* remember memmap's page */ | |
113 | for (i = 0; i < mapsize; i++, page++) | |
114 | get_page_bootmem(section_nr, page, SECTION_INFO); | |
115 | ||
116 | usemap = __nr_to_section(section_nr)->pageblock_flags; | |
117 | page = virt_to_page(usemap); | |
118 | ||
119 | mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT; | |
120 | ||
121 | for (i = 0; i < mapsize; i++, page++) | |
af370fb8 | 122 | get_page_bootmem(section_nr, page, MIX_SECTION_INFO); |
04753278 YG |
123 | |
124 | } | |
125 | ||
126 | void register_page_bootmem_info_node(struct pglist_data *pgdat) | |
127 | { | |
128 | unsigned long i, pfn, end_pfn, nr_pages; | |
129 | int node = pgdat->node_id; | |
130 | struct page *page; | |
131 | struct zone *zone; | |
132 | ||
133 | nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT; | |
134 | page = virt_to_page(pgdat); | |
135 | ||
136 | for (i = 0; i < nr_pages; i++, page++) | |
137 | get_page_bootmem(node, page, NODE_INFO); | |
138 | ||
139 | zone = &pgdat->node_zones[0]; | |
140 | for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) { | |
141 | if (zone->wait_table) { | |
142 | nr_pages = zone->wait_table_hash_nr_entries | |
143 | * sizeof(wait_queue_head_t); | |
144 | nr_pages = PAGE_ALIGN(nr_pages) >> PAGE_SHIFT; | |
145 | page = virt_to_page(zone->wait_table); | |
146 | ||
147 | for (i = 0; i < nr_pages; i++, page++) | |
148 | get_page_bootmem(node, page, NODE_INFO); | |
149 | } | |
150 | } | |
151 | ||
152 | pfn = pgdat->node_start_pfn; | |
153 | end_pfn = pfn + pgdat->node_spanned_pages; | |
154 | ||
155 | /* register_section info */ | |
156 | for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) | |
157 | register_page_bootmem_info_section(pfn); | |
158 | ||
159 | } | |
160 | #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ | |
161 | ||
76cdd58e HC |
162 | static void grow_zone_span(struct zone *zone, unsigned long start_pfn, |
163 | unsigned long end_pfn) | |
164 | { | |
165 | unsigned long old_zone_end_pfn; | |
166 | ||
167 | zone_span_writelock(zone); | |
168 | ||
169 | old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; | |
170 | if (start_pfn < zone->zone_start_pfn) | |
171 | zone->zone_start_pfn = start_pfn; | |
172 | ||
173 | zone->spanned_pages = max(old_zone_end_pfn, end_pfn) - | |
174 | zone->zone_start_pfn; | |
175 | ||
176 | zone_span_writeunlock(zone); | |
177 | } | |
178 | ||
179 | static void grow_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn, | |
180 | unsigned long end_pfn) | |
181 | { | |
182 | unsigned long old_pgdat_end_pfn = | |
183 | pgdat->node_start_pfn + pgdat->node_spanned_pages; | |
184 | ||
185 | if (start_pfn < pgdat->node_start_pfn) | |
186 | pgdat->node_start_pfn = start_pfn; | |
187 | ||
188 | pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) - | |
189 | pgdat->node_start_pfn; | |
190 | } | |
191 | ||
718127cc | 192 | static int __add_zone(struct zone *zone, unsigned long phys_start_pfn) |
3947be19 DH |
193 | { |
194 | struct pglist_data *pgdat = zone->zone_pgdat; | |
195 | int nr_pages = PAGES_PER_SECTION; | |
196 | int nid = pgdat->node_id; | |
197 | int zone_type; | |
76cdd58e | 198 | unsigned long flags; |
3947be19 DH |
199 | |
200 | zone_type = zone - pgdat->node_zones; | |
76cdd58e HC |
201 | if (!zone->wait_table) { |
202 | int ret; | |
203 | ||
204 | ret = init_currently_empty_zone(zone, phys_start_pfn, | |
205 | nr_pages, MEMMAP_HOTPLUG); | |
206 | if (ret) | |
207 | return ret; | |
208 | } | |
209 | pgdat_resize_lock(zone->zone_pgdat, &flags); | |
210 | grow_zone_span(zone, phys_start_pfn, phys_start_pfn + nr_pages); | |
211 | grow_pgdat_span(zone->zone_pgdat, phys_start_pfn, | |
212 | phys_start_pfn + nr_pages); | |
213 | pgdat_resize_unlock(zone->zone_pgdat, &flags); | |
a2f3aa02 DH |
214 | memmap_init_zone(nr_pages, nid, zone_type, |
215 | phys_start_pfn, MEMMAP_HOTPLUG); | |
718127cc | 216 | return 0; |
3947be19 DH |
217 | } |
218 | ||
3947be19 DH |
219 | static int __add_section(struct zone *zone, unsigned long phys_start_pfn) |
220 | { | |
3947be19 | 221 | int nr_pages = PAGES_PER_SECTION; |
3947be19 DH |
222 | int ret; |
223 | ||
ebd15302 KH |
224 | if (pfn_valid(phys_start_pfn)) |
225 | return -EEXIST; | |
226 | ||
0b0acbec | 227 | ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages); |
3947be19 DH |
228 | |
229 | if (ret < 0) | |
230 | return ret; | |
231 | ||
718127cc YG |
232 | ret = __add_zone(zone, phys_start_pfn); |
233 | ||
234 | if (ret < 0) | |
235 | return ret; | |
236 | ||
3947be19 DH |
237 | return register_new_memory(__pfn_to_section(phys_start_pfn)); |
238 | } | |
239 | ||
0c0a4a51 YG |
240 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
241 | static int __remove_section(struct zone *zone, struct mem_section *ms) | |
242 | { | |
243 | /* | |
244 | * XXX: Freeing memmap with vmemmap is not implement yet. | |
245 | * This should be removed later. | |
246 | */ | |
247 | return -EBUSY; | |
248 | } | |
249 | #else | |
ea01ea93 BP |
250 | static int __remove_section(struct zone *zone, struct mem_section *ms) |
251 | { | |
252 | unsigned long flags; | |
253 | struct pglist_data *pgdat = zone->zone_pgdat; | |
254 | int ret = -EINVAL; | |
255 | ||
256 | if (!valid_section(ms)) | |
257 | return ret; | |
258 | ||
259 | ret = unregister_memory_section(ms); | |
260 | if (ret) | |
261 | return ret; | |
262 | ||
263 | pgdat_resize_lock(pgdat, &flags); | |
264 | sparse_remove_one_section(zone, ms); | |
265 | pgdat_resize_unlock(pgdat, &flags); | |
266 | return 0; | |
267 | } | |
0c0a4a51 | 268 | #endif |
ea01ea93 | 269 | |
3947be19 DH |
270 | /* |
271 | * Reasonably generic function for adding memory. It is | |
272 | * expected that archs that support memory hotplug will | |
273 | * call this function after deciding the zone to which to | |
274 | * add the new pages. | |
275 | */ | |
276 | int __add_pages(struct zone *zone, unsigned long phys_start_pfn, | |
277 | unsigned long nr_pages) | |
278 | { | |
279 | unsigned long i; | |
280 | int err = 0; | |
6f712711 KH |
281 | int start_sec, end_sec; |
282 | /* during initialize mem_map, align hot-added range to section */ | |
283 | start_sec = pfn_to_section_nr(phys_start_pfn); | |
284 | end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1); | |
3947be19 | 285 | |
6f712711 KH |
286 | for (i = start_sec; i <= end_sec; i++) { |
287 | err = __add_section(zone, i << PFN_SECTION_SHIFT); | |
3947be19 | 288 | |
6f712711 | 289 | /* |
183ff22b | 290 | * EEXIST is finally dealt with by ioresource collision |
6f712711 KH |
291 | * check. see add_memory() => register_memory_resource() |
292 | * Warning will be printed if there is collision. | |
bed120c6 JS |
293 | */ |
294 | if (err && (err != -EEXIST)) | |
3947be19 | 295 | break; |
6f712711 | 296 | err = 0; |
3947be19 DH |
297 | } |
298 | ||
299 | return err; | |
300 | } | |
bed120c6 | 301 | EXPORT_SYMBOL_GPL(__add_pages); |
3947be19 | 302 | |
ea01ea93 BP |
303 | /** |
304 | * __remove_pages() - remove sections of pages from a zone | |
305 | * @zone: zone from which pages need to be removed | |
306 | * @phys_start_pfn: starting pageframe (must be aligned to start of a section) | |
307 | * @nr_pages: number of pages to remove (must be multiple of section size) | |
308 | * | |
309 | * Generic helper function to remove section mappings and sysfs entries | |
310 | * for the section of the memory we are removing. Caller needs to make | |
311 | * sure that pages are marked reserved and zones are adjust properly by | |
312 | * calling offline_pages(). | |
313 | */ | |
314 | int __remove_pages(struct zone *zone, unsigned long phys_start_pfn, | |
315 | unsigned long nr_pages) | |
316 | { | |
317 | unsigned long i, ret = 0; | |
318 | int sections_to_remove; | |
319 | ||
320 | /* | |
321 | * We can only remove entire sections | |
322 | */ | |
323 | BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK); | |
324 | BUG_ON(nr_pages % PAGES_PER_SECTION); | |
325 | ||
326 | release_mem_region(phys_start_pfn << PAGE_SHIFT, nr_pages * PAGE_SIZE); | |
327 | ||
328 | sections_to_remove = nr_pages / PAGES_PER_SECTION; | |
329 | for (i = 0; i < sections_to_remove; i++) { | |
330 | unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION; | |
331 | ret = __remove_section(zone, __pfn_to_section(pfn)); | |
332 | if (ret) | |
333 | break; | |
334 | } | |
335 | return ret; | |
336 | } | |
337 | EXPORT_SYMBOL_GPL(__remove_pages); | |
338 | ||
180c06ef JF |
339 | void online_page(struct page *page) |
340 | { | |
341 | totalram_pages++; | |
342 | num_physpages++; | |
343 | ||
344 | #ifdef CONFIG_HIGHMEM | |
345 | if (PageHighMem(page)) | |
346 | totalhigh_pages++; | |
347 | #endif | |
348 | ||
349 | #ifdef CONFIG_FLATMEM | |
350 | max_mapnr = max(page_to_pfn(page), max_mapnr); | |
351 | #endif | |
352 | ||
353 | ClearPageReserved(page); | |
354 | init_page_count(page); | |
355 | __free_page(page); | |
356 | } | |
357 | ||
75884fb1 KH |
358 | static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages, |
359 | void *arg) | |
3947be19 DH |
360 | { |
361 | unsigned long i; | |
75884fb1 KH |
362 | unsigned long onlined_pages = *(unsigned long *)arg; |
363 | struct page *page; | |
364 | if (PageReserved(pfn_to_page(start_pfn))) | |
365 | for (i = 0; i < nr_pages; i++) { | |
366 | page = pfn_to_page(start_pfn + i); | |
367 | online_page(page); | |
368 | onlined_pages++; | |
369 | } | |
370 | *(unsigned long *)arg = onlined_pages; | |
371 | return 0; | |
372 | } | |
373 | ||
374 | ||
375 | int online_pages(unsigned long pfn, unsigned long nr_pages) | |
376 | { | |
3947be19 DH |
377 | unsigned long onlined_pages = 0; |
378 | struct zone *zone; | |
6811378e | 379 | int need_zonelists_rebuild = 0; |
7b78d335 YG |
380 | int nid; |
381 | int ret; | |
382 | struct memory_notify arg; | |
383 | ||
384 | arg.start_pfn = pfn; | |
385 | arg.nr_pages = nr_pages; | |
386 | arg.status_change_nid = -1; | |
387 | ||
388 | nid = page_to_nid(pfn_to_page(pfn)); | |
389 | if (node_present_pages(nid) == 0) | |
390 | arg.status_change_nid = nid; | |
3947be19 | 391 | |
7b78d335 YG |
392 | ret = memory_notify(MEM_GOING_ONLINE, &arg); |
393 | ret = notifier_to_errno(ret); | |
394 | if (ret) { | |
395 | memory_notify(MEM_CANCEL_ONLINE, &arg); | |
396 | return ret; | |
397 | } | |
3947be19 DH |
398 | /* |
399 | * This doesn't need a lock to do pfn_to_page(). | |
400 | * The section can't be removed here because of the | |
da19cbcf | 401 | * memory_block->state_mutex. |
3947be19 DH |
402 | */ |
403 | zone = page_zone(pfn_to_page(pfn)); | |
6811378e YG |
404 | /* |
405 | * If this zone is not populated, then it is not in zonelist. | |
406 | * This means the page allocator ignores this zone. | |
407 | * So, zonelist must be updated after online. | |
408 | */ | |
409 | if (!populated_zone(zone)) | |
410 | need_zonelists_rebuild = 1; | |
411 | ||
fd8a4221 | 412 | ret = walk_memory_resource(pfn, nr_pages, &onlined_pages, |
75884fb1 | 413 | online_pages_range); |
fd8a4221 GL |
414 | if (ret) { |
415 | printk(KERN_DEBUG "online_pages %lx at %lx failed\n", | |
416 | nr_pages, pfn); | |
417 | memory_notify(MEM_CANCEL_ONLINE, &arg); | |
418 | return ret; | |
419 | } | |
420 | ||
3947be19 | 421 | zone->present_pages += onlined_pages; |
f2937be5 | 422 | zone->zone_pgdat->node_present_pages += onlined_pages; |
3947be19 | 423 | |
61b13993 | 424 | setup_per_zone_pages_min(); |
7ea1530a CL |
425 | if (onlined_pages) { |
426 | kswapd_run(zone_to_nid(zone)); | |
427 | node_set_state(zone_to_nid(zone), N_HIGH_MEMORY); | |
428 | } | |
61b13993 | 429 | |
6811378e YG |
430 | if (need_zonelists_rebuild) |
431 | build_all_zonelists(); | |
2f7f24ec KL |
432 | else |
433 | vm_total_pages = nr_free_pagecache_pages(); | |
434 | ||
2d1d43f6 | 435 | writeback_set_ratelimit(); |
7b78d335 YG |
436 | |
437 | if (onlined_pages) | |
438 | memory_notify(MEM_ONLINE, &arg); | |
439 | ||
3947be19 DH |
440 | return 0; |
441 | } | |
53947027 | 442 | #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ |
bc02af93 | 443 | |
9af3c2de YG |
444 | static pg_data_t *hotadd_new_pgdat(int nid, u64 start) |
445 | { | |
446 | struct pglist_data *pgdat; | |
447 | unsigned long zones_size[MAX_NR_ZONES] = {0}; | |
448 | unsigned long zholes_size[MAX_NR_ZONES] = {0}; | |
449 | unsigned long start_pfn = start >> PAGE_SHIFT; | |
450 | ||
451 | pgdat = arch_alloc_nodedata(nid); | |
452 | if (!pgdat) | |
453 | return NULL; | |
454 | ||
455 | arch_refresh_nodedata(nid, pgdat); | |
456 | ||
457 | /* we can use NODE_DATA(nid) from here */ | |
458 | ||
459 | /* init node's zones as empty zones, we don't have any present pages.*/ | |
9109fb7b | 460 | free_area_init_node(nid, zones_size, start_pfn, zholes_size); |
9af3c2de YG |
461 | |
462 | return pgdat; | |
463 | } | |
464 | ||
465 | static void rollback_node_hotadd(int nid, pg_data_t *pgdat) | |
466 | { | |
467 | arch_refresh_nodedata(nid, NULL); | |
468 | arch_free_nodedata(pgdat); | |
469 | return; | |
470 | } | |
471 | ||
0a547039 | 472 | |
bc02af93 YG |
473 | int add_memory(int nid, u64 start, u64 size) |
474 | { | |
9af3c2de YG |
475 | pg_data_t *pgdat = NULL; |
476 | int new_pgdat = 0; | |
ebd15302 | 477 | struct resource *res; |
bc02af93 YG |
478 | int ret; |
479 | ||
ebd15302 KH |
480 | res = register_memory_resource(start, size); |
481 | if (!res) | |
482 | return -EEXIST; | |
483 | ||
9af3c2de YG |
484 | if (!node_online(nid)) { |
485 | pgdat = hotadd_new_pgdat(nid, start); | |
486 | if (!pgdat) | |
487 | return -ENOMEM; | |
488 | new_pgdat = 1; | |
9af3c2de YG |
489 | } |
490 | ||
bc02af93 YG |
491 | /* call arch's memory hotadd */ |
492 | ret = arch_add_memory(nid, start, size); | |
493 | ||
9af3c2de YG |
494 | if (ret < 0) |
495 | goto error; | |
496 | ||
0fc44159 | 497 | /* we online node here. we can't roll back from here. */ |
9af3c2de YG |
498 | node_set_online(nid); |
499 | ||
38837fc7 PJ |
500 | cpuset_track_online_nodes(); |
501 | ||
0fc44159 YG |
502 | if (new_pgdat) { |
503 | ret = register_one_node(nid); | |
504 | /* | |
505 | * If sysfs file of new node can't create, cpu on the node | |
506 | * can't be hot-added. There is no rollback way now. | |
507 | * So, check by BUG_ON() to catch it reluctantly.. | |
508 | */ | |
509 | BUG_ON(ret); | |
510 | } | |
511 | ||
9af3c2de YG |
512 | return ret; |
513 | error: | |
514 | /* rollback pgdat allocation and others */ | |
515 | if (new_pgdat) | |
516 | rollback_node_hotadd(nid, pgdat); | |
ebd15302 KH |
517 | if (res) |
518 | release_memory_resource(res); | |
9af3c2de | 519 | |
bc02af93 YG |
520 | return ret; |
521 | } | |
522 | EXPORT_SYMBOL_GPL(add_memory); | |
0c0e6195 KH |
523 | |
524 | #ifdef CONFIG_MEMORY_HOTREMOVE | |
5c755e9f BP |
525 | /* |
526 | * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy | |
527 | * set and the size of the free page is given by page_order(). Using this, | |
528 | * the function determines if the pageblock contains only free pages. | |
529 | * Due to buddy contraints, a free page at least the size of a pageblock will | |
530 | * be located at the start of the pageblock | |
531 | */ | |
532 | static inline int pageblock_free(struct page *page) | |
533 | { | |
534 | return PageBuddy(page) && page_order(page) >= pageblock_order; | |
535 | } | |
536 | ||
537 | /* Return the start of the next active pageblock after a given page */ | |
538 | static struct page *next_active_pageblock(struct page *page) | |
539 | { | |
540 | int pageblocks_stride; | |
541 | ||
542 | /* Ensure the starting page is pageblock-aligned */ | |
543 | BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1)); | |
544 | ||
545 | /* Move forward by at least 1 * pageblock_nr_pages */ | |
546 | pageblocks_stride = 1; | |
547 | ||
548 | /* If the entire pageblock is free, move to the end of free page */ | |
549 | if (pageblock_free(page)) | |
550 | pageblocks_stride += page_order(page) - pageblock_order; | |
551 | ||
552 | return page + (pageblocks_stride * pageblock_nr_pages); | |
553 | } | |
554 | ||
555 | /* Checks if this range of memory is likely to be hot-removable. */ | |
556 | int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) | |
557 | { | |
558 | int type; | |
559 | struct page *page = pfn_to_page(start_pfn); | |
560 | struct page *end_page = page + nr_pages; | |
561 | ||
562 | /* Check the starting page of each pageblock within the range */ | |
563 | for (; page < end_page; page = next_active_pageblock(page)) { | |
564 | type = get_pageblock_migratetype(page); | |
565 | ||
566 | /* | |
567 | * A pageblock containing MOVABLE or free pages is considered | |
568 | * removable | |
569 | */ | |
570 | if (type != MIGRATE_MOVABLE && !pageblock_free(page)) | |
571 | return 0; | |
572 | ||
573 | /* | |
574 | * A pageblock starting with a PageReserved page is not | |
575 | * considered removable. | |
576 | */ | |
577 | if (PageReserved(page)) | |
578 | return 0; | |
579 | } | |
580 | ||
581 | /* All pageblocks in the memory block are likely to be hot-removable */ | |
582 | return 1; | |
583 | } | |
584 | ||
0c0e6195 KH |
585 | /* |
586 | * Confirm all pages in a range [start, end) is belongs to the same zone. | |
587 | */ | |
588 | static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn) | |
589 | { | |
590 | unsigned long pfn; | |
591 | struct zone *zone = NULL; | |
592 | struct page *page; | |
593 | int i; | |
594 | for (pfn = start_pfn; | |
595 | pfn < end_pfn; | |
596 | pfn += MAX_ORDER_NR_PAGES) { | |
597 | i = 0; | |
598 | /* This is just a CONFIG_HOLES_IN_ZONE check.*/ | |
599 | while ((i < MAX_ORDER_NR_PAGES) && !pfn_valid_within(pfn + i)) | |
600 | i++; | |
601 | if (i == MAX_ORDER_NR_PAGES) | |
602 | continue; | |
603 | page = pfn_to_page(pfn + i); | |
604 | if (zone && page_zone(page) != zone) | |
605 | return 0; | |
606 | zone = page_zone(page); | |
607 | } | |
608 | return 1; | |
609 | } | |
610 | ||
611 | /* | |
612 | * Scanning pfn is much easier than scanning lru list. | |
613 | * Scan pfn from start to end and Find LRU page. | |
614 | */ | |
615 | int scan_lru_pages(unsigned long start, unsigned long end) | |
616 | { | |
617 | unsigned long pfn; | |
618 | struct page *page; | |
619 | for (pfn = start; pfn < end; pfn++) { | |
620 | if (pfn_valid(pfn)) { | |
621 | page = pfn_to_page(pfn); | |
622 | if (PageLRU(page)) | |
623 | return pfn; | |
624 | } | |
625 | } | |
626 | return 0; | |
627 | } | |
628 | ||
629 | static struct page * | |
630 | hotremove_migrate_alloc(struct page *page, | |
631 | unsigned long private, | |
632 | int **x) | |
633 | { | |
634 | /* This should be improoooooved!! */ | |
635 | return alloc_page(GFP_HIGHUSER_PAGECACHE); | |
636 | } | |
637 | ||
638 | ||
639 | #define NR_OFFLINE_AT_ONCE_PAGES (256) | |
640 | static int | |
641 | do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) | |
642 | { | |
643 | unsigned long pfn; | |
644 | struct page *page; | |
645 | int move_pages = NR_OFFLINE_AT_ONCE_PAGES; | |
646 | int not_managed = 0; | |
647 | int ret = 0; | |
648 | LIST_HEAD(source); | |
649 | ||
650 | for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) { | |
651 | if (!pfn_valid(pfn)) | |
652 | continue; | |
653 | page = pfn_to_page(pfn); | |
654 | if (!page_count(page)) | |
655 | continue; | |
656 | /* | |
657 | * We can skip free pages. And we can only deal with pages on | |
658 | * LRU. | |
659 | */ | |
660 | ret = isolate_lru_page(page, &source); | |
661 | if (!ret) { /* Success */ | |
662 | move_pages--; | |
663 | } else { | |
664 | /* Becasue we don't have big zone->lock. we should | |
665 | check this again here. */ | |
666 | if (page_count(page)) | |
667 | not_managed++; | |
668 | #ifdef CONFIG_DEBUG_VM | |
669 | printk(KERN_INFO "removing from LRU failed" | |
670 | " %lx/%d/%lx\n", | |
671 | pfn, page_count(page), page->flags); | |
672 | #endif | |
673 | } | |
674 | } | |
675 | ret = -EBUSY; | |
676 | if (not_managed) { | |
677 | if (!list_empty(&source)) | |
678 | putback_lru_pages(&source); | |
679 | goto out; | |
680 | } | |
681 | ret = 0; | |
682 | if (list_empty(&source)) | |
683 | goto out; | |
684 | /* this function returns # of failed pages */ | |
685 | ret = migrate_pages(&source, hotremove_migrate_alloc, 0); | |
686 | ||
687 | out: | |
688 | return ret; | |
689 | } | |
690 | ||
691 | /* | |
692 | * remove from free_area[] and mark all as Reserved. | |
693 | */ | |
694 | static int | |
695 | offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages, | |
696 | void *data) | |
697 | { | |
698 | __offline_isolated_pages(start, start + nr_pages); | |
699 | return 0; | |
700 | } | |
701 | ||
702 | static void | |
703 | offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) | |
704 | { | |
705 | walk_memory_resource(start_pfn, end_pfn - start_pfn, NULL, | |
706 | offline_isolated_pages_cb); | |
707 | } | |
708 | ||
709 | /* | |
710 | * Check all pages in range, recoreded as memory resource, are isolated. | |
711 | */ | |
712 | static int | |
713 | check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages, | |
714 | void *data) | |
715 | { | |
716 | int ret; | |
717 | long offlined = *(long *)data; | |
718 | ret = test_pages_isolated(start_pfn, start_pfn + nr_pages); | |
719 | offlined = nr_pages; | |
720 | if (!ret) | |
721 | *(long *)data += offlined; | |
722 | return ret; | |
723 | } | |
724 | ||
725 | static long | |
726 | check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn) | |
727 | { | |
728 | long offlined = 0; | |
729 | int ret; | |
730 | ||
731 | ret = walk_memory_resource(start_pfn, end_pfn - start_pfn, &offlined, | |
732 | check_pages_isolated_cb); | |
733 | if (ret < 0) | |
734 | offlined = (long)ret; | |
735 | return offlined; | |
736 | } | |
737 | ||
0c0e6195 KH |
738 | int offline_pages(unsigned long start_pfn, |
739 | unsigned long end_pfn, unsigned long timeout) | |
740 | { | |
741 | unsigned long pfn, nr_pages, expire; | |
742 | long offlined_pages; | |
7b78d335 | 743 | int ret, drain, retry_max, node; |
0c0e6195 | 744 | struct zone *zone; |
7b78d335 | 745 | struct memory_notify arg; |
0c0e6195 KH |
746 | |
747 | BUG_ON(start_pfn >= end_pfn); | |
748 | /* at least, alignment against pageblock is necessary */ | |
749 | if (!IS_ALIGNED(start_pfn, pageblock_nr_pages)) | |
750 | return -EINVAL; | |
751 | if (!IS_ALIGNED(end_pfn, pageblock_nr_pages)) | |
752 | return -EINVAL; | |
753 | /* This makes hotplug much easier...and readable. | |
754 | we assume this for now. .*/ | |
755 | if (!test_pages_in_a_zone(start_pfn, end_pfn)) | |
756 | return -EINVAL; | |
7b78d335 YG |
757 | |
758 | zone = page_zone(pfn_to_page(start_pfn)); | |
759 | node = zone_to_nid(zone); | |
760 | nr_pages = end_pfn - start_pfn; | |
761 | ||
0c0e6195 KH |
762 | /* set above range as isolated */ |
763 | ret = start_isolate_page_range(start_pfn, end_pfn); | |
764 | if (ret) | |
765 | return ret; | |
7b78d335 YG |
766 | |
767 | arg.start_pfn = start_pfn; | |
768 | arg.nr_pages = nr_pages; | |
769 | arg.status_change_nid = -1; | |
770 | if (nr_pages >= node_present_pages(node)) | |
771 | arg.status_change_nid = node; | |
772 | ||
773 | ret = memory_notify(MEM_GOING_OFFLINE, &arg); | |
774 | ret = notifier_to_errno(ret); | |
775 | if (ret) | |
776 | goto failed_removal; | |
777 | ||
0c0e6195 KH |
778 | pfn = start_pfn; |
779 | expire = jiffies + timeout; | |
780 | drain = 0; | |
781 | retry_max = 5; | |
782 | repeat: | |
783 | /* start memory hot removal */ | |
784 | ret = -EAGAIN; | |
785 | if (time_after(jiffies, expire)) | |
786 | goto failed_removal; | |
787 | ret = -EINTR; | |
788 | if (signal_pending(current)) | |
789 | goto failed_removal; | |
790 | ret = 0; | |
791 | if (drain) { | |
792 | lru_add_drain_all(); | |
793 | flush_scheduled_work(); | |
794 | cond_resched(); | |
9f8f2172 | 795 | drain_all_pages(); |
0c0e6195 KH |
796 | } |
797 | ||
798 | pfn = scan_lru_pages(start_pfn, end_pfn); | |
799 | if (pfn) { /* We have page on LRU */ | |
800 | ret = do_migrate_range(pfn, end_pfn); | |
801 | if (!ret) { | |
802 | drain = 1; | |
803 | goto repeat; | |
804 | } else { | |
805 | if (ret < 0) | |
806 | if (--retry_max == 0) | |
807 | goto failed_removal; | |
808 | yield(); | |
809 | drain = 1; | |
810 | goto repeat; | |
811 | } | |
812 | } | |
813 | /* drain all zone's lru pagevec, this is asyncronous... */ | |
814 | lru_add_drain_all(); | |
815 | flush_scheduled_work(); | |
816 | yield(); | |
817 | /* drain pcp pages , this is synchrouns. */ | |
9f8f2172 | 818 | drain_all_pages(); |
0c0e6195 KH |
819 | /* check again */ |
820 | offlined_pages = check_pages_isolated(start_pfn, end_pfn); | |
821 | if (offlined_pages < 0) { | |
822 | ret = -EBUSY; | |
823 | goto failed_removal; | |
824 | } | |
825 | printk(KERN_INFO "Offlined Pages %ld\n", offlined_pages); | |
826 | /* Ok, all of our target is islaoted. | |
827 | We cannot do rollback at this point. */ | |
828 | offline_isolated_pages(start_pfn, end_pfn); | |
dbc0e4ce KH |
829 | /* reset pagetype flags and makes migrate type to be MOVABLE */ |
830 | undo_isolate_page_range(start_pfn, end_pfn); | |
0c0e6195 | 831 | /* removal success */ |
0c0e6195 KH |
832 | zone->present_pages -= offlined_pages; |
833 | zone->zone_pgdat->node_present_pages -= offlined_pages; | |
834 | totalram_pages -= offlined_pages; | |
835 | num_physpages -= offlined_pages; | |
7b78d335 | 836 | |
0c0e6195 KH |
837 | vm_total_pages = nr_free_pagecache_pages(); |
838 | writeback_set_ratelimit(); | |
7b78d335 YG |
839 | |
840 | memory_notify(MEM_OFFLINE, &arg); | |
0c0e6195 KH |
841 | return 0; |
842 | ||
843 | failed_removal: | |
844 | printk(KERN_INFO "memory offlining %lx to %lx failed\n", | |
845 | start_pfn, end_pfn); | |
7b78d335 | 846 | memory_notify(MEM_CANCEL_OFFLINE, &arg); |
0c0e6195 KH |
847 | /* pushback to free area */ |
848 | undo_isolate_page_range(start_pfn, end_pfn); | |
7b78d335 | 849 | |
0c0e6195 KH |
850 | return ret; |
851 | } | |
48e94196 KH |
852 | #else |
853 | int remove_memory(u64 start, u64 size) | |
854 | { | |
855 | return -EINVAL; | |
856 | } | |
857 | EXPORT_SYMBOL_GPL(remove_memory); | |
0c0e6195 | 858 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |