]>
Commit | Line | Data |
---|---|---|
95f72d1e YL |
1 | /* |
2 | * Procedures for maintaining information about logical memory blocks. | |
3 | * | |
4 | * Peter Bergner, IBM Corp. June 2001. | |
5 | * Copyright (C) 2001 Peter Bergner. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU General Public License | |
9 | * as published by the Free Software Foundation; either version | |
10 | * 2 of the License, or (at your option) any later version. | |
11 | */ | |
12 | ||
13 | #include <linux/kernel.h> | |
142b45a7 | 14 | #include <linux/slab.h> |
95f72d1e YL |
15 | #include <linux/init.h> |
16 | #include <linux/bitops.h> | |
449e8df3 | 17 | #include <linux/poison.h> |
c196f76f | 18 | #include <linux/pfn.h> |
6d03b885 BH |
19 | #include <linux/debugfs.h> |
20 | #include <linux/seq_file.h> | |
95f72d1e YL |
21 | #include <linux/memblock.h> |
22 | ||
10d06439 | 23 | struct memblock memblock __initdata_memblock; |
95f72d1e | 24 | |
10d06439 YL |
25 | int memblock_debug __initdata_memblock; |
26 | int memblock_can_resize __initdata_memblock; | |
27 | static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS + 1] __initdata_memblock; | |
28 | static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS + 1] __initdata_memblock; | |
95f72d1e | 29 | |
142b45a7 BH |
30 | /* inline so we don't get a warning when pr_debug is compiled out */ |
31 | static inline const char *memblock_type_name(struct memblock_type *type) | |
32 | { | |
33 | if (type == &memblock.memory) | |
34 | return "memory"; | |
35 | else if (type == &memblock.reserved) | |
36 | return "reserved"; | |
37 | else | |
38 | return "unknown"; | |
39 | } | |
40 | ||
6ed311b2 BH |
41 | /* |
42 | * Address comparison utilities | |
43 | */ | |
10d06439 | 44 | static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, |
2898cc4c | 45 | phys_addr_t base2, phys_addr_t size2) |
95f72d1e YL |
46 | { |
47 | return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); | |
48 | } | |
49 | ||
10d06439 | 50 | long __init_memblock memblock_overlaps_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size) |
6ed311b2 BH |
51 | { |
52 | unsigned long i; | |
53 | ||
54 | for (i = 0; i < type->cnt; i++) { | |
55 | phys_addr_t rgnbase = type->regions[i].base; | |
56 | phys_addr_t rgnsize = type->regions[i].size; | |
57 | if (memblock_addrs_overlap(base, size, rgnbase, rgnsize)) | |
58 | break; | |
59 | } | |
60 | ||
61 | return (i < type->cnt) ? i : -1; | |
62 | } | |
63 | ||
64 | /* | |
65 | * Find, allocate, deallocate or reserve unreserved regions. All allocations | |
66 | * are top-down. | |
67 | */ | |
68 | ||
cd79481d | 69 | static phys_addr_t __init_memblock memblock_find_region(phys_addr_t start, phys_addr_t end, |
6ed311b2 BH |
70 | phys_addr_t size, phys_addr_t align) |
71 | { | |
72 | phys_addr_t base, res_base; | |
73 | long j; | |
74 | ||
f1af98c7 YL |
75 | /* In case, huge size is requested */ |
76 | if (end < size) | |
1f5026a7 | 77 | return 0; |
f1af98c7 | 78 | |
348968eb | 79 | base = round_down(end - size, align); |
f1af98c7 | 80 | |
25818f0f BH |
81 | /* Prevent allocations returning 0 as it's also used to |
82 | * indicate an allocation failure | |
83 | */ | |
84 | if (start == 0) | |
85 | start = PAGE_SIZE; | |
86 | ||
6ed311b2 BH |
87 | while (start <= base) { |
88 | j = memblock_overlaps_region(&memblock.reserved, base, size); | |
89 | if (j < 0) | |
90 | return base; | |
91 | res_base = memblock.reserved.regions[j].base; | |
92 | if (res_base < size) | |
93 | break; | |
348968eb | 94 | base = round_down(res_base - size, align); |
6ed311b2 BH |
95 | } |
96 | ||
1f5026a7 | 97 | return 0; |
6ed311b2 BH |
98 | } |
99 | ||
fc769a8e TH |
100 | /* |
101 | * Find a free area with specified alignment in a specific range. | |
102 | */ | |
103 | phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start, phys_addr_t end, | |
104 | phys_addr_t size, phys_addr_t align) | |
6ed311b2 BH |
105 | { |
106 | long i; | |
6ed311b2 BH |
107 | |
108 | BUG_ON(0 == size); | |
109 | ||
6ed311b2 | 110 | /* Pump up max_addr */ |
fef501d4 BH |
111 | if (end == MEMBLOCK_ALLOC_ACCESSIBLE) |
112 | end = memblock.current_limit; | |
6ed311b2 BH |
113 | |
114 | /* We do a top-down search, this tends to limit memory | |
115 | * fragmentation by keeping early boot allocs near the | |
116 | * top of memory | |
117 | */ | |
118 | for (i = memblock.memory.cnt - 1; i >= 0; i--) { | |
119 | phys_addr_t memblockbase = memblock.memory.regions[i].base; | |
120 | phys_addr_t memblocksize = memblock.memory.regions[i].size; | |
fef501d4 | 121 | phys_addr_t bottom, top, found; |
6ed311b2 BH |
122 | |
123 | if (memblocksize < size) | |
124 | continue; | |
fef501d4 BH |
125 | if ((memblockbase + memblocksize) <= start) |
126 | break; | |
127 | bottom = max(memblockbase, start); | |
128 | top = min(memblockbase + memblocksize, end); | |
129 | if (bottom >= top) | |
130 | continue; | |
131 | found = memblock_find_region(bottom, top, size, align); | |
1f5026a7 | 132 | if (found) |
fef501d4 | 133 | return found; |
6ed311b2 | 134 | } |
1f5026a7 | 135 | return 0; |
6ed311b2 BH |
136 | } |
137 | ||
7950c407 YL |
138 | /* |
139 | * Free memblock.reserved.regions | |
140 | */ | |
141 | int __init_memblock memblock_free_reserved_regions(void) | |
142 | { | |
143 | if (memblock.reserved.regions == memblock_reserved_init_regions) | |
144 | return 0; | |
145 | ||
146 | return memblock_free(__pa(memblock.reserved.regions), | |
147 | sizeof(struct memblock_region) * memblock.reserved.max); | |
148 | } | |
149 | ||
150 | /* | |
151 | * Reserve memblock.reserved.regions | |
152 | */ | |
153 | int __init_memblock memblock_reserve_reserved_regions(void) | |
154 | { | |
155 | if (memblock.reserved.regions == memblock_reserved_init_regions) | |
156 | return 0; | |
157 | ||
158 | return memblock_reserve(__pa(memblock.reserved.regions), | |
159 | sizeof(struct memblock_region) * memblock.reserved.max); | |
160 | } | |
161 | ||
10d06439 | 162 | static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r) |
95f72d1e YL |
163 | { |
164 | unsigned long i; | |
165 | ||
e3239ff9 BH |
166 | for (i = r; i < type->cnt - 1; i++) { |
167 | type->regions[i].base = type->regions[i + 1].base; | |
168 | type->regions[i].size = type->regions[i + 1].size; | |
95f72d1e | 169 | } |
e3239ff9 | 170 | type->cnt--; |
95f72d1e | 171 | |
8f7a6605 BH |
172 | /* Special case for empty arrays */ |
173 | if (type->cnt == 0) { | |
174 | type->cnt = 1; | |
175 | type->regions[0].base = 0; | |
176 | type->regions[0].size = 0; | |
177 | } | |
95f72d1e YL |
178 | } |
179 | ||
142b45a7 BH |
180 | /* Defined below but needed now */ |
181 | static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size); | |
182 | ||
10d06439 | 183 | static int __init_memblock memblock_double_array(struct memblock_type *type) |
142b45a7 BH |
184 | { |
185 | struct memblock_region *new_array, *old_array; | |
186 | phys_addr_t old_size, new_size, addr; | |
187 | int use_slab = slab_is_available(); | |
188 | ||
189 | /* We don't allow resizing until we know about the reserved regions | |
190 | * of memory that aren't suitable for allocation | |
191 | */ | |
192 | if (!memblock_can_resize) | |
193 | return -1; | |
194 | ||
142b45a7 BH |
195 | /* Calculate new doubled size */ |
196 | old_size = type->max * sizeof(struct memblock_region); | |
197 | new_size = old_size << 1; | |
198 | ||
199 | /* Try to find some space for it. | |
200 | * | |
201 | * WARNING: We assume that either slab_is_available() and we use it or | |
202 | * we use MEMBLOCK for allocations. That means that this is unsafe to use | |
203 | * when bootmem is currently active (unless bootmem itself is implemented | |
204 | * on top of MEMBLOCK which isn't the case yet) | |
205 | * | |
206 | * This should however not be an issue for now, as we currently only | |
207 | * call into MEMBLOCK while it's still active, or much later when slab is | |
208 | * active for memory hotplug operations | |
209 | */ | |
210 | if (use_slab) { | |
211 | new_array = kmalloc(new_size, GFP_KERNEL); | |
1f5026a7 | 212 | addr = new_array ? __pa(new_array) : 0; |
142b45a7 | 213 | } else |
fc769a8e | 214 | addr = memblock_find_in_range(0, MEMBLOCK_ALLOC_ACCESSIBLE, new_size, sizeof(phys_addr_t)); |
1f5026a7 | 215 | if (!addr) { |
142b45a7 BH |
216 | pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n", |
217 | memblock_type_name(type), type->max, type->max * 2); | |
218 | return -1; | |
219 | } | |
220 | new_array = __va(addr); | |
221 | ||
ea9e4376 YL |
222 | memblock_dbg("memblock: %s array is doubled to %ld at [%#010llx-%#010llx]", |
223 | memblock_type_name(type), type->max * 2, (u64)addr, (u64)addr + new_size - 1); | |
224 | ||
142b45a7 BH |
225 | /* Found space, we now need to move the array over before |
226 | * we add the reserved region since it may be our reserved | |
227 | * array itself that is full. | |
228 | */ | |
229 | memcpy(new_array, type->regions, old_size); | |
230 | memset(new_array + type->max, 0, old_size); | |
231 | old_array = type->regions; | |
232 | type->regions = new_array; | |
233 | type->max <<= 1; | |
234 | ||
235 | /* If we use SLAB that's it, we are done */ | |
236 | if (use_slab) | |
237 | return 0; | |
238 | ||
239 | /* Add the new reserved region now. Should not fail ! */ | |
8f7a6605 | 240 | BUG_ON(memblock_add_region(&memblock.reserved, addr, new_size)); |
142b45a7 BH |
241 | |
242 | /* If the array wasn't our static init one, then free it. We only do | |
243 | * that before SLAB is available as later on, we don't know whether | |
244 | * to use kfree or free_bootmem_pages(). Shouldn't be a big deal | |
245 | * anyways | |
246 | */ | |
247 | if (old_array != memblock_memory_init_regions && | |
248 | old_array != memblock_reserved_init_regions) | |
249 | memblock_free(__pa(old_array), old_size); | |
250 | ||
251 | return 0; | |
252 | } | |
253 | ||
8f7a6605 BH |
254 | static long __init_memblock memblock_add_region(struct memblock_type *type, |
255 | phys_addr_t base, phys_addr_t size) | |
95f72d1e | 256 | { |
8f7a6605 BH |
257 | phys_addr_t end = base + size; |
258 | int i, slot = -1; | |
95f72d1e | 259 | |
8f7a6605 | 260 | /* First try and coalesce this MEMBLOCK with others */ |
e3239ff9 | 261 | for (i = 0; i < type->cnt; i++) { |
8f7a6605 BH |
262 | struct memblock_region *rgn = &type->regions[i]; |
263 | phys_addr_t rend = rgn->base + rgn->size; | |
264 | ||
265 | /* Exit if there's no possible hits */ | |
266 | if (rgn->base > end || rgn->size == 0) | |
267 | break; | |
95f72d1e | 268 | |
8f7a6605 BH |
269 | /* Check if we are fully enclosed within an existing |
270 | * block | |
271 | */ | |
272 | if (rgn->base <= base && rend >= end) | |
95f72d1e YL |
273 | return 0; |
274 | ||
8f7a6605 BH |
275 | /* Check if we overlap or are adjacent with the bottom |
276 | * of a block. | |
277 | */ | |
278 | if (base < rgn->base && end >= rgn->base) { | |
8f7a6605 BH |
279 | /* We extend the bottom of the block down to our |
280 | * base | |
281 | */ | |
282 | rgn->base = base; | |
283 | rgn->size = rend - base; | |
284 | ||
285 | /* Return if we have nothing else to allocate | |
286 | * (fully coalesced) | |
287 | */ | |
288 | if (rend >= end) | |
289 | return 0; | |
290 | ||
291 | /* We continue processing from the end of the | |
292 | * coalesced block. | |
293 | */ | |
294 | base = rend; | |
295 | size = end - base; | |
296 | } | |
297 | ||
298 | /* Now check if we overlap or are adjacent with the | |
299 | * top of a block | |
300 | */ | |
301 | if (base <= rend && end >= rend) { | |
8f7a6605 BH |
302 | /* We adjust our base down to enclose the |
303 | * original block and destroy it. It will be | |
304 | * part of our new allocation. Since we've | |
305 | * freed an entry, we know we won't fail | |
306 | * to allocate one later, so we won't risk | |
307 | * losing the original block allocation. | |
308 | */ | |
309 | size += (base - rgn->base); | |
310 | base = rgn->base; | |
311 | memblock_remove_region(type, i--); | |
95f72d1e YL |
312 | } |
313 | } | |
314 | ||
8f7a6605 BH |
315 | /* If the array is empty, special case, replace the fake |
316 | * filler region and return | |
d2cd563b | 317 | */ |
8f7a6605 BH |
318 | if ((type->cnt == 1) && (type->regions[0].size == 0)) { |
319 | type->regions[0].base = base; | |
320 | type->regions[0].size = size; | |
321 | return 0; | |
95f72d1e YL |
322 | } |
323 | ||
142b45a7 BH |
324 | /* If we are out of space, we fail. It's too late to resize the array |
325 | * but then this shouldn't have happened in the first place. | |
326 | */ | |
327 | if (WARN_ON(type->cnt >= type->max)) | |
95f72d1e YL |
328 | return -1; |
329 | ||
330 | /* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */ | |
e3239ff9 BH |
331 | for (i = type->cnt - 1; i >= 0; i--) { |
332 | if (base < type->regions[i].base) { | |
333 | type->regions[i+1].base = type->regions[i].base; | |
334 | type->regions[i+1].size = type->regions[i].size; | |
95f72d1e | 335 | } else { |
e3239ff9 BH |
336 | type->regions[i+1].base = base; |
337 | type->regions[i+1].size = size; | |
8f7a6605 | 338 | slot = i + 1; |
95f72d1e YL |
339 | break; |
340 | } | |
341 | } | |
e3239ff9 BH |
342 | if (base < type->regions[0].base) { |
343 | type->regions[0].base = base; | |
344 | type->regions[0].size = size; | |
8f7a6605 | 345 | slot = 0; |
95f72d1e | 346 | } |
e3239ff9 | 347 | type->cnt++; |
95f72d1e | 348 | |
142b45a7 BH |
349 | /* The array is full ? Try to resize it. If that fails, we undo |
350 | * our allocation and return an error | |
351 | */ | |
352 | if (type->cnt == type->max && memblock_double_array(type)) { | |
8f7a6605 BH |
353 | BUG_ON(slot < 0); |
354 | memblock_remove_region(type, slot); | |
142b45a7 BH |
355 | return -1; |
356 | } | |
357 | ||
95f72d1e YL |
358 | return 0; |
359 | } | |
360 | ||
10d06439 | 361 | long __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) |
95f72d1e | 362 | { |
e3239ff9 | 363 | return memblock_add_region(&memblock.memory, base, size); |
95f72d1e YL |
364 | |
365 | } | |
366 | ||
8f7a6605 BH |
367 | static long __init_memblock __memblock_remove(struct memblock_type *type, |
368 | phys_addr_t base, phys_addr_t size) | |
95f72d1e | 369 | { |
2898cc4c | 370 | phys_addr_t end = base + size; |
95f72d1e YL |
371 | int i; |
372 | ||
8f7a6605 BH |
373 | /* Walk through the array for collisions */ |
374 | for (i = 0; i < type->cnt; i++) { | |
375 | struct memblock_region *rgn = &type->regions[i]; | |
376 | phys_addr_t rend = rgn->base + rgn->size; | |
95f72d1e | 377 | |
8f7a6605 BH |
378 | /* Nothing more to do, exit */ |
379 | if (rgn->base > end || rgn->size == 0) | |
95f72d1e | 380 | break; |
95f72d1e | 381 | |
8f7a6605 BH |
382 | /* If we fully enclose the block, drop it */ |
383 | if (base <= rgn->base && end >= rend) { | |
384 | memblock_remove_region(type, i--); | |
385 | continue; | |
386 | } | |
95f72d1e | 387 | |
8f7a6605 BH |
388 | /* If we are fully enclosed within a block |
389 | * then we need to split it and we are done | |
390 | */ | |
391 | if (base > rgn->base && end < rend) { | |
392 | rgn->size = base - rgn->base; | |
393 | if (!memblock_add_region(type, end, rend - end)) | |
394 | return 0; | |
395 | /* Failure to split is bad, we at least | |
396 | * restore the block before erroring | |
397 | */ | |
398 | rgn->size = rend - rgn->base; | |
399 | WARN_ON(1); | |
400 | return -1; | |
401 | } | |
95f72d1e | 402 | |
8f7a6605 BH |
403 | /* Check if we need to trim the bottom of a block */ |
404 | if (rgn->base < end && rend > end) { | |
405 | rgn->size -= end - rgn->base; | |
406 | rgn->base = end; | |
407 | break; | |
408 | } | |
95f72d1e | 409 | |
8f7a6605 BH |
410 | /* And check if we need to trim the top of a block */ |
411 | if (base < rend) | |
412 | rgn->size -= rend - base; | |
95f72d1e | 413 | |
8f7a6605 BH |
414 | } |
415 | return 0; | |
95f72d1e YL |
416 | } |
417 | ||
10d06439 | 418 | long __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size) |
95f72d1e YL |
419 | { |
420 | return __memblock_remove(&memblock.memory, base, size); | |
421 | } | |
422 | ||
3661ca66 | 423 | long __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) |
95f72d1e YL |
424 | { |
425 | return __memblock_remove(&memblock.reserved, base, size); | |
426 | } | |
427 | ||
3661ca66 | 428 | long __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) |
95f72d1e | 429 | { |
e3239ff9 | 430 | struct memblock_type *_rgn = &memblock.reserved; |
95f72d1e YL |
431 | |
432 | BUG_ON(0 == size); | |
433 | ||
434 | return memblock_add_region(_rgn, base, size); | |
435 | } | |
436 | ||
6ed311b2 | 437 | phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) |
95f72d1e | 438 | { |
6ed311b2 | 439 | phys_addr_t found; |
95f72d1e | 440 | |
6ed311b2 BH |
441 | /* We align the size to limit fragmentation. Without this, a lot of |
442 | * small allocs quickly eat up the whole reserve array on sparc | |
443 | */ | |
348968eb | 444 | size = round_up(size, align); |
95f72d1e | 445 | |
fc769a8e | 446 | found = memblock_find_in_range(0, max_addr, size, align); |
1f5026a7 | 447 | if (found && !memblock_add_region(&memblock.reserved, found, size)) |
6ed311b2 | 448 | return found; |
95f72d1e | 449 | |
6ed311b2 | 450 | return 0; |
95f72d1e YL |
451 | } |
452 | ||
6ed311b2 | 453 | phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) |
95f72d1e | 454 | { |
6ed311b2 BH |
455 | phys_addr_t alloc; |
456 | ||
457 | alloc = __memblock_alloc_base(size, align, max_addr); | |
458 | ||
459 | if (alloc == 0) | |
460 | panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n", | |
461 | (unsigned long long) size, (unsigned long long) max_addr); | |
462 | ||
463 | return alloc; | |
95f72d1e YL |
464 | } |
465 | ||
6ed311b2 | 466 | phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align) |
95f72d1e | 467 | { |
6ed311b2 BH |
468 | return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); |
469 | } | |
95f72d1e | 470 | |
95f72d1e | 471 | |
6ed311b2 | 472 | /* |
34e18455 | 473 | * Additional node-local top-down allocators. |
c196f76f BH |
474 | * |
475 | * WARNING: Only available after early_node_map[] has been populated, | |
476 | * on some architectures, that is after all the calls to add_active_range() | |
477 | * have been done to populate it. | |
6ed311b2 | 478 | */ |
95f72d1e | 479 | |
34e18455 TH |
480 | static phys_addr_t __init memblock_nid_range_rev(phys_addr_t start, |
481 | phys_addr_t end, int *nid) | |
c3f72b57 | 482 | { |
c196f76f | 483 | #ifdef CONFIG_ARCH_POPULATES_NODE_MAP |
c196f76f BH |
484 | unsigned long start_pfn, end_pfn; |
485 | int i; | |
486 | ||
b2fea988 | 487 | for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, nid) |
34e18455 TH |
488 | if (end > PFN_PHYS(start_pfn) && end <= PFN_PHYS(end_pfn)) |
489 | return max(start, PFN_PHYS(start_pfn)); | |
c196f76f | 490 | #endif |
c3f72b57 | 491 | *nid = 0; |
34e18455 | 492 | return start; |
c3f72b57 BH |
493 | } |
494 | ||
e6498040 TH |
495 | phys_addr_t __init memblock_find_in_range_node(phys_addr_t start, |
496 | phys_addr_t end, | |
2898cc4c BH |
497 | phys_addr_t size, |
498 | phys_addr_t align, int nid) | |
95f72d1e | 499 | { |
e6498040 TH |
500 | struct memblock_type *mem = &memblock.memory; |
501 | int i; | |
95f72d1e | 502 | |
e6498040 | 503 | BUG_ON(0 == size); |
95f72d1e | 504 | |
e6498040 TH |
505 | /* Pump up max_addr */ |
506 | if (end == MEMBLOCK_ALLOC_ACCESSIBLE) | |
507 | end = memblock.current_limit; | |
95f72d1e | 508 | |
e6498040 TH |
509 | for (i = mem->cnt - 1; i >= 0; i--) { |
510 | struct memblock_region *r = &mem->regions[i]; | |
511 | phys_addr_t base = max(start, r->base); | |
512 | phys_addr_t top = min(end, r->base + r->size); | |
513 | ||
514 | while (base < top) { | |
515 | phys_addr_t tbase, ret; | |
516 | int tnid; | |
517 | ||
518 | tbase = memblock_nid_range_rev(base, top, &tnid); | |
519 | if (nid == MAX_NUMNODES || tnid == nid) { | |
520 | ret = memblock_find_region(tbase, top, size, align); | |
521 | if (ret) | |
522 | return ret; | |
523 | } | |
524 | top = tbase; | |
95f72d1e | 525 | } |
95f72d1e | 526 | } |
e6498040 | 527 | |
1f5026a7 | 528 | return 0; |
95f72d1e YL |
529 | } |
530 | ||
2898cc4c | 531 | phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid) |
95f72d1e | 532 | { |
e6498040 | 533 | phys_addr_t found; |
95f72d1e | 534 | |
e6498040 TH |
535 | /* |
536 | * We align the size to limit fragmentation. Without this, a lot of | |
7f219c73 BH |
537 | * small allocs quickly eat up the whole reserve array on sparc |
538 | */ | |
348968eb | 539 | size = round_up(size, align); |
7f219c73 | 540 | |
e6498040 TH |
541 | found = memblock_find_in_range_node(0, MEMBLOCK_ALLOC_ACCESSIBLE, |
542 | size, align, nid); | |
543 | if (found && !memblock_add_region(&memblock.reserved, found, size)) | |
544 | return found; | |
95f72d1e | 545 | |
9d1e2492 BH |
546 | return 0; |
547 | } | |
548 | ||
549 | phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid) | |
550 | { | |
551 | phys_addr_t res = memblock_alloc_nid(size, align, nid); | |
552 | ||
553 | if (res) | |
554 | return res; | |
15fb0972 | 555 | return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); |
95f72d1e YL |
556 | } |
557 | ||
9d1e2492 BH |
558 | |
559 | /* | |
560 | * Remaining API functions | |
561 | */ | |
562 | ||
95f72d1e | 563 | /* You must call memblock_analyze() before this. */ |
2898cc4c | 564 | phys_addr_t __init memblock_phys_mem_size(void) |
95f72d1e | 565 | { |
4734b594 | 566 | return memblock.memory_size; |
95f72d1e YL |
567 | } |
568 | ||
10d06439 | 569 | phys_addr_t __init_memblock memblock_end_of_DRAM(void) |
95f72d1e YL |
570 | { |
571 | int idx = memblock.memory.cnt - 1; | |
572 | ||
e3239ff9 | 573 | return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size); |
95f72d1e YL |
574 | } |
575 | ||
576 | /* You must call memblock_analyze() after this. */ | |
2898cc4c | 577 | void __init memblock_enforce_memory_limit(phys_addr_t memory_limit) |
95f72d1e YL |
578 | { |
579 | unsigned long i; | |
2898cc4c | 580 | phys_addr_t limit; |
e3239ff9 | 581 | struct memblock_region *p; |
95f72d1e YL |
582 | |
583 | if (!memory_limit) | |
584 | return; | |
585 | ||
586 | /* Truncate the memblock regions to satisfy the memory limit. */ | |
587 | limit = memory_limit; | |
588 | for (i = 0; i < memblock.memory.cnt; i++) { | |
e3239ff9 BH |
589 | if (limit > memblock.memory.regions[i].size) { |
590 | limit -= memblock.memory.regions[i].size; | |
95f72d1e YL |
591 | continue; |
592 | } | |
593 | ||
e3239ff9 | 594 | memblock.memory.regions[i].size = limit; |
95f72d1e YL |
595 | memblock.memory.cnt = i + 1; |
596 | break; | |
597 | } | |
598 | ||
95f72d1e YL |
599 | memory_limit = memblock_end_of_DRAM(); |
600 | ||
601 | /* And truncate any reserves above the limit also. */ | |
602 | for (i = 0; i < memblock.reserved.cnt; i++) { | |
e3239ff9 | 603 | p = &memblock.reserved.regions[i]; |
95f72d1e YL |
604 | |
605 | if (p->base > memory_limit) | |
606 | p->size = 0; | |
607 | else if ((p->base + p->size) > memory_limit) | |
608 | p->size = memory_limit - p->base; | |
609 | ||
610 | if (p->size == 0) { | |
611 | memblock_remove_region(&memblock.reserved, i); | |
612 | i--; | |
613 | } | |
614 | } | |
615 | } | |
616 | ||
cd79481d | 617 | static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr) |
72d4b0b4 BH |
618 | { |
619 | unsigned int left = 0, right = type->cnt; | |
620 | ||
621 | do { | |
622 | unsigned int mid = (right + left) / 2; | |
623 | ||
624 | if (addr < type->regions[mid].base) | |
625 | right = mid; | |
626 | else if (addr >= (type->regions[mid].base + | |
627 | type->regions[mid].size)) | |
628 | left = mid + 1; | |
629 | else | |
630 | return mid; | |
631 | } while (left < right); | |
632 | return -1; | |
633 | } | |
634 | ||
2898cc4c | 635 | int __init memblock_is_reserved(phys_addr_t addr) |
95f72d1e | 636 | { |
72d4b0b4 BH |
637 | return memblock_search(&memblock.reserved, addr) != -1; |
638 | } | |
95f72d1e | 639 | |
3661ca66 | 640 | int __init_memblock memblock_is_memory(phys_addr_t addr) |
72d4b0b4 BH |
641 | { |
642 | return memblock_search(&memblock.memory, addr) != -1; | |
643 | } | |
644 | ||
3661ca66 | 645 | int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) |
72d4b0b4 | 646 | { |
abb65272 | 647 | int idx = memblock_search(&memblock.memory, base); |
72d4b0b4 BH |
648 | |
649 | if (idx == -1) | |
650 | return 0; | |
abb65272 TV |
651 | return memblock.memory.regions[idx].base <= base && |
652 | (memblock.memory.regions[idx].base + | |
653 | memblock.memory.regions[idx].size) >= (base + size); | |
95f72d1e YL |
654 | } |
655 | ||
10d06439 | 656 | int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) |
95f72d1e | 657 | { |
f1c2c19c | 658 | return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; |
95f72d1e YL |
659 | } |
660 | ||
e63075a3 | 661 | |
3661ca66 | 662 | void __init_memblock memblock_set_current_limit(phys_addr_t limit) |
e63075a3 BH |
663 | { |
664 | memblock.current_limit = limit; | |
665 | } | |
666 | ||
10d06439 | 667 | static void __init_memblock memblock_dump(struct memblock_type *region, char *name) |
6ed311b2 BH |
668 | { |
669 | unsigned long long base, size; | |
670 | int i; | |
671 | ||
672 | pr_info(" %s.cnt = 0x%lx\n", name, region->cnt); | |
673 | ||
674 | for (i = 0; i < region->cnt; i++) { | |
675 | base = region->regions[i].base; | |
676 | size = region->regions[i].size; | |
677 | ||
ea9e4376 | 678 | pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes\n", |
6ed311b2 BH |
679 | name, i, base, base + size - 1, size); |
680 | } | |
681 | } | |
682 | ||
10d06439 | 683 | void __init_memblock memblock_dump_all(void) |
6ed311b2 BH |
684 | { |
685 | if (!memblock_debug) | |
686 | return; | |
687 | ||
688 | pr_info("MEMBLOCK configuration:\n"); | |
689 | pr_info(" memory size = 0x%llx\n", (unsigned long long)memblock.memory_size); | |
690 | ||
691 | memblock_dump(&memblock.memory, "memory"); | |
692 | memblock_dump(&memblock.reserved, "reserved"); | |
693 | } | |
694 | ||
695 | void __init memblock_analyze(void) | |
696 | { | |
697 | int i; | |
698 | ||
699 | /* Check marker in the unused last array entry */ | |
700 | WARN_ON(memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS].base | |
701 | != (phys_addr_t)RED_INACTIVE); | |
702 | WARN_ON(memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS].base | |
703 | != (phys_addr_t)RED_INACTIVE); | |
704 | ||
705 | memblock.memory_size = 0; | |
706 | ||
707 | for (i = 0; i < memblock.memory.cnt; i++) | |
708 | memblock.memory_size += memblock.memory.regions[i].size; | |
142b45a7 BH |
709 | |
710 | /* We allow resizing from there */ | |
711 | memblock_can_resize = 1; | |
6ed311b2 BH |
712 | } |
713 | ||
7590abe8 BH |
714 | void __init memblock_init(void) |
715 | { | |
236260b9 JF |
716 | static int init_done __initdata = 0; |
717 | ||
718 | if (init_done) | |
719 | return; | |
720 | init_done = 1; | |
721 | ||
7590abe8 BH |
722 | /* Hookup the initial arrays */ |
723 | memblock.memory.regions = memblock_memory_init_regions; | |
724 | memblock.memory.max = INIT_MEMBLOCK_REGIONS; | |
725 | memblock.reserved.regions = memblock_reserved_init_regions; | |
726 | memblock.reserved.max = INIT_MEMBLOCK_REGIONS; | |
727 | ||
728 | /* Write a marker in the unused last array entry */ | |
729 | memblock.memory.regions[INIT_MEMBLOCK_REGIONS].base = (phys_addr_t)RED_INACTIVE; | |
730 | memblock.reserved.regions[INIT_MEMBLOCK_REGIONS].base = (phys_addr_t)RED_INACTIVE; | |
731 | ||
732 | /* Create a dummy zero size MEMBLOCK which will get coalesced away later. | |
733 | * This simplifies the memblock_add() code below... | |
734 | */ | |
735 | memblock.memory.regions[0].base = 0; | |
736 | memblock.memory.regions[0].size = 0; | |
737 | memblock.memory.cnt = 1; | |
738 | ||
739 | /* Ditto. */ | |
740 | memblock.reserved.regions[0].base = 0; | |
741 | memblock.reserved.regions[0].size = 0; | |
742 | memblock.reserved.cnt = 1; | |
743 | ||
744 | memblock.current_limit = MEMBLOCK_ALLOC_ANYWHERE; | |
745 | } | |
746 | ||
6ed311b2 BH |
747 | static int __init early_memblock(char *p) |
748 | { | |
749 | if (p && strstr(p, "debug")) | |
750 | memblock_debug = 1; | |
751 | return 0; | |
752 | } | |
753 | early_param("memblock", early_memblock); | |
754 | ||
10d06439 | 755 | #if defined(CONFIG_DEBUG_FS) && !defined(ARCH_DISCARD_MEMBLOCK) |
6d03b885 BH |
756 | |
757 | static int memblock_debug_show(struct seq_file *m, void *private) | |
758 | { | |
759 | struct memblock_type *type = m->private; | |
760 | struct memblock_region *reg; | |
761 | int i; | |
762 | ||
763 | for (i = 0; i < type->cnt; i++) { | |
764 | reg = &type->regions[i]; | |
765 | seq_printf(m, "%4d: ", i); | |
766 | if (sizeof(phys_addr_t) == 4) | |
767 | seq_printf(m, "0x%08lx..0x%08lx\n", | |
768 | (unsigned long)reg->base, | |
769 | (unsigned long)(reg->base + reg->size - 1)); | |
770 | else | |
771 | seq_printf(m, "0x%016llx..0x%016llx\n", | |
772 | (unsigned long long)reg->base, | |
773 | (unsigned long long)(reg->base + reg->size - 1)); | |
774 | ||
775 | } | |
776 | return 0; | |
777 | } | |
778 | ||
779 | static int memblock_debug_open(struct inode *inode, struct file *file) | |
780 | { | |
781 | return single_open(file, memblock_debug_show, inode->i_private); | |
782 | } | |
783 | ||
784 | static const struct file_operations memblock_debug_fops = { | |
785 | .open = memblock_debug_open, | |
786 | .read = seq_read, | |
787 | .llseek = seq_lseek, | |
788 | .release = single_release, | |
789 | }; | |
790 | ||
791 | static int __init memblock_init_debugfs(void) | |
792 | { | |
793 | struct dentry *root = debugfs_create_dir("memblock", NULL); | |
794 | if (!root) | |
795 | return -ENXIO; | |
796 | debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops); | |
797 | debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops); | |
798 | ||
799 | return 0; | |
800 | } | |
801 | __initcall(memblock_init_debugfs); | |
802 | ||
803 | #endif /* CONFIG_DEBUG_FS */ |