]>
Commit | Line | Data |
---|---|---|
95f72d1e YL |
1 | /* |
2 | * Procedures for maintaining information about logical memory blocks. | |
3 | * | |
4 | * Peter Bergner, IBM Corp. June 2001. | |
5 | * Copyright (C) 2001 Peter Bergner. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU General Public License | |
9 | * as published by the Free Software Foundation; either version | |
10 | * 2 of the License, or (at your option) any later version. | |
11 | */ | |
12 | ||
13 | #include <linux/kernel.h> | |
142b45a7 | 14 | #include <linux/slab.h> |
95f72d1e YL |
15 | #include <linux/init.h> |
16 | #include <linux/bitops.h> | |
449e8df3 | 17 | #include <linux/poison.h> |
c196f76f | 18 | #include <linux/pfn.h> |
6d03b885 BH |
19 | #include <linux/debugfs.h> |
20 | #include <linux/seq_file.h> | |
95f72d1e YL |
21 | #include <linux/memblock.h> |
22 | ||
fe091c20 TH |
23 | static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; |
24 | static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; | |
25 | ||
26 | struct memblock memblock __initdata_memblock = { | |
27 | .memory.regions = memblock_memory_init_regions, | |
28 | .memory.cnt = 1, /* empty dummy entry */ | |
29 | .memory.max = INIT_MEMBLOCK_REGIONS, | |
30 | ||
31 | .reserved.regions = memblock_reserved_init_regions, | |
32 | .reserved.cnt = 1, /* empty dummy entry */ | |
33 | .reserved.max = INIT_MEMBLOCK_REGIONS, | |
34 | ||
35 | .current_limit = MEMBLOCK_ALLOC_ANYWHERE, | |
36 | }; | |
95f72d1e | 37 | |
10d06439 | 38 | int memblock_debug __initdata_memblock; |
1aadc056 | 39 | static int memblock_can_resize __initdata_memblock; |
181eb394 GS |
40 | static int memblock_memory_in_slab __initdata_memblock = 0; |
41 | static int memblock_reserved_in_slab __initdata_memblock = 0; | |
95f72d1e | 42 | |
142b45a7 BH |
43 | /* inline so we don't get a warning when pr_debug is compiled out */ |
44 | static inline const char *memblock_type_name(struct memblock_type *type) | |
45 | { | |
46 | if (type == &memblock.memory) | |
47 | return "memory"; | |
48 | else if (type == &memblock.reserved) | |
49 | return "reserved"; | |
50 | else | |
51 | return "unknown"; | |
52 | } | |
53 | ||
eb18f1b5 TH |
54 | /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */ |
55 | static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size) | |
56 | { | |
57 | return *size = min(*size, (phys_addr_t)ULLONG_MAX - base); | |
58 | } | |
59 | ||
6ed311b2 BH |
60 | /* |
61 | * Address comparison utilities | |
62 | */ | |
10d06439 | 63 | static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, |
2898cc4c | 64 | phys_addr_t base2, phys_addr_t size2) |
95f72d1e YL |
65 | { |
66 | return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); | |
67 | } | |
68 | ||
2d7d3eb2 HS |
69 | static long __init_memblock memblock_overlaps_region(struct memblock_type *type, |
70 | phys_addr_t base, phys_addr_t size) | |
6ed311b2 BH |
71 | { |
72 | unsigned long i; | |
73 | ||
74 | for (i = 0; i < type->cnt; i++) { | |
75 | phys_addr_t rgnbase = type->regions[i].base; | |
76 | phys_addr_t rgnsize = type->regions[i].size; | |
77 | if (memblock_addrs_overlap(base, size, rgnbase, rgnsize)) | |
78 | break; | |
79 | } | |
80 | ||
81 | return (i < type->cnt) ? i : -1; | |
82 | } | |
83 | ||
7bd0b0f0 TH |
84 | /** |
85 | * memblock_find_in_range_node - find free area in given range and node | |
86 | * @start: start of candidate range | |
87 | * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} | |
88 | * @size: size of free area to find | |
89 | * @align: alignment of free area to find | |
90 | * @nid: nid of the free area to find, %MAX_NUMNODES for any node | |
91 | * | |
92 | * Find @size free area aligned to @align in the specified range and node. | |
93 | * | |
94 | * RETURNS: | |
95 | * Found address on success, %0 on failure. | |
6ed311b2 | 96 | */ |
7bd0b0f0 TH |
97 | phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start, |
98 | phys_addr_t end, phys_addr_t size, | |
99 | phys_addr_t align, int nid) | |
6ed311b2 | 100 | { |
7bd0b0f0 TH |
101 | phys_addr_t this_start, this_end, cand; |
102 | u64 i; | |
6ed311b2 | 103 | |
7bd0b0f0 TH |
104 | /* pump up @end */ |
105 | if (end == MEMBLOCK_ALLOC_ACCESSIBLE) | |
106 | end = memblock.current_limit; | |
f1af98c7 | 107 | |
5d53cb27 TH |
108 | /* avoid allocating the first page */ |
109 | start = max_t(phys_addr_t, start, PAGE_SIZE); | |
7bd0b0f0 | 110 | end = max(start, end); |
f1af98c7 | 111 | |
7bd0b0f0 TH |
112 | for_each_free_mem_range_reverse(i, nid, &this_start, &this_end, NULL) { |
113 | this_start = clamp(this_start, start, end); | |
114 | this_end = clamp(this_end, start, end); | |
6ed311b2 | 115 | |
5d53cb27 TH |
116 | if (this_end < size) |
117 | continue; | |
118 | ||
7bd0b0f0 TH |
119 | cand = round_down(this_end - size, align); |
120 | if (cand >= this_start) | |
121 | return cand; | |
122 | } | |
1f5026a7 | 123 | return 0; |
6ed311b2 BH |
124 | } |
125 | ||
7bd0b0f0 TH |
126 | /** |
127 | * memblock_find_in_range - find free area in given range | |
128 | * @start: start of candidate range | |
129 | * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} | |
130 | * @size: size of free area to find | |
131 | * @align: alignment of free area to find | |
132 | * | |
133 | * Find @size free area aligned to @align in the specified range. | |
134 | * | |
135 | * RETURNS: | |
136 | * Found address on success, %0 on failure. | |
fc769a8e | 137 | */ |
7bd0b0f0 TH |
138 | phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start, |
139 | phys_addr_t end, phys_addr_t size, | |
140 | phys_addr_t align) | |
6ed311b2 | 141 | { |
7bd0b0f0 TH |
142 | return memblock_find_in_range_node(start, end, size, align, |
143 | MAX_NUMNODES); | |
6ed311b2 BH |
144 | } |
145 | ||
7950c407 YL |
146 | /* |
147 | * Free memblock.reserved.regions | |
148 | */ | |
149 | int __init_memblock memblock_free_reserved_regions(void) | |
150 | { | |
151 | if (memblock.reserved.regions == memblock_reserved_init_regions) | |
152 | return 0; | |
153 | ||
154 | return memblock_free(__pa(memblock.reserved.regions), | |
155 | sizeof(struct memblock_region) * memblock.reserved.max); | |
156 | } | |
157 | ||
158 | /* | |
159 | * Reserve memblock.reserved.regions | |
160 | */ | |
161 | int __init_memblock memblock_reserve_reserved_regions(void) | |
162 | { | |
163 | if (memblock.reserved.regions == memblock_reserved_init_regions) | |
164 | return 0; | |
165 | ||
166 | return memblock_reserve(__pa(memblock.reserved.regions), | |
167 | sizeof(struct memblock_region) * memblock.reserved.max); | |
168 | } | |
169 | ||
10d06439 | 170 | static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r) |
95f72d1e | 171 | { |
1440c4e2 | 172 | type->total_size -= type->regions[r].size; |
7c0caeb8 TH |
173 | memmove(&type->regions[r], &type->regions[r + 1], |
174 | (type->cnt - (r + 1)) * sizeof(type->regions[r])); | |
e3239ff9 | 175 | type->cnt--; |
95f72d1e | 176 | |
8f7a6605 BH |
177 | /* Special case for empty arrays */ |
178 | if (type->cnt == 0) { | |
1440c4e2 | 179 | WARN_ON(type->total_size != 0); |
8f7a6605 BH |
180 | type->cnt = 1; |
181 | type->regions[0].base = 0; | |
182 | type->regions[0].size = 0; | |
7c0caeb8 | 183 | memblock_set_region_node(&type->regions[0], MAX_NUMNODES); |
8f7a6605 | 184 | } |
95f72d1e YL |
185 | } |
186 | ||
10d06439 | 187 | static int __init_memblock memblock_double_array(struct memblock_type *type) |
142b45a7 BH |
188 | { |
189 | struct memblock_region *new_array, *old_array; | |
190 | phys_addr_t old_size, new_size, addr; | |
191 | int use_slab = slab_is_available(); | |
181eb394 | 192 | int *in_slab; |
142b45a7 BH |
193 | |
194 | /* We don't allow resizing until we know about the reserved regions | |
195 | * of memory that aren't suitable for allocation | |
196 | */ | |
197 | if (!memblock_can_resize) | |
198 | return -1; | |
199 | ||
142b45a7 BH |
200 | /* Calculate new doubled size */ |
201 | old_size = type->max * sizeof(struct memblock_region); | |
202 | new_size = old_size << 1; | |
203 | ||
181eb394 GS |
204 | /* Retrieve the slab flag */ |
205 | if (type == &memblock.memory) | |
206 | in_slab = &memblock_memory_in_slab; | |
207 | else | |
208 | in_slab = &memblock_reserved_in_slab; | |
209 | ||
142b45a7 BH |
210 | /* Try to find some space for it. |
211 | * | |
212 | * WARNING: We assume that either slab_is_available() and we use it or | |
213 | * we use MEMBLOCK for allocations. That means that this is unsafe to use | |
214 | * when bootmem is currently active (unless bootmem itself is implemented | |
215 | * on top of MEMBLOCK which isn't the case yet) | |
216 | * | |
217 | * This should however not be an issue for now, as we currently only | |
218 | * call into MEMBLOCK while it's still active, or much later when slab is | |
219 | * active for memory hotplug operations | |
220 | */ | |
221 | if (use_slab) { | |
222 | new_array = kmalloc(new_size, GFP_KERNEL); | |
1f5026a7 | 223 | addr = new_array ? __pa(new_array) : 0; |
4e2f0775 | 224 | } else { |
fc769a8e | 225 | addr = memblock_find_in_range(0, MEMBLOCK_ALLOC_ACCESSIBLE, new_size, sizeof(phys_addr_t)); |
4e2f0775 GS |
226 | new_array = addr ? __va(addr) : 0; |
227 | } | |
1f5026a7 | 228 | if (!addr) { |
142b45a7 BH |
229 | pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n", |
230 | memblock_type_name(type), type->max, type->max * 2); | |
231 | return -1; | |
232 | } | |
142b45a7 | 233 | |
ea9e4376 YL |
234 | memblock_dbg("memblock: %s array is doubled to %ld at [%#010llx-%#010llx]", |
235 | memblock_type_name(type), type->max * 2, (u64)addr, (u64)addr + new_size - 1); | |
236 | ||
142b45a7 BH |
237 | /* Found space, we now need to move the array over before |
238 | * we add the reserved region since it may be our reserved | |
239 | * array itself that is full. | |
240 | */ | |
241 | memcpy(new_array, type->regions, old_size); | |
242 | memset(new_array + type->max, 0, old_size); | |
243 | old_array = type->regions; | |
244 | type->regions = new_array; | |
245 | type->max <<= 1; | |
246 | ||
181eb394 GS |
247 | /* Free old array. We needn't free it if the array is the |
248 | * static one | |
142b45a7 | 249 | */ |
181eb394 GS |
250 | if (*in_slab) |
251 | kfree(old_array); | |
252 | else if (old_array != memblock_memory_init_regions && | |
253 | old_array != memblock_reserved_init_regions) | |
142b45a7 BH |
254 | memblock_free(__pa(old_array), old_size); |
255 | ||
181eb394 GS |
256 | /* Reserve the new array if that comes from the memblock. |
257 | * Otherwise, we needn't do it | |
258 | */ | |
259 | if (!use_slab) | |
260 | BUG_ON(memblock_reserve(addr, new_size)); | |
261 | ||
262 | /* Update slab flag */ | |
263 | *in_slab = use_slab; | |
264 | ||
142b45a7 BH |
265 | return 0; |
266 | } | |
267 | ||
784656f9 TH |
268 | /** |
269 | * memblock_merge_regions - merge neighboring compatible regions | |
270 | * @type: memblock type to scan | |
271 | * | |
272 | * Scan @type and merge neighboring compatible regions. | |
273 | */ | |
274 | static void __init_memblock memblock_merge_regions(struct memblock_type *type) | |
95f72d1e | 275 | { |
784656f9 | 276 | int i = 0; |
95f72d1e | 277 | |
784656f9 TH |
278 | /* cnt never goes below 1 */ |
279 | while (i < type->cnt - 1) { | |
280 | struct memblock_region *this = &type->regions[i]; | |
281 | struct memblock_region *next = &type->regions[i + 1]; | |
95f72d1e | 282 | |
7c0caeb8 TH |
283 | if (this->base + this->size != next->base || |
284 | memblock_get_region_node(this) != | |
285 | memblock_get_region_node(next)) { | |
784656f9 TH |
286 | BUG_ON(this->base + this->size > next->base); |
287 | i++; | |
288 | continue; | |
8f7a6605 BH |
289 | } |
290 | ||
784656f9 TH |
291 | this->size += next->size; |
292 | memmove(next, next + 1, (type->cnt - (i + 1)) * sizeof(*next)); | |
293 | type->cnt--; | |
95f72d1e | 294 | } |
784656f9 | 295 | } |
95f72d1e | 296 | |
784656f9 TH |
297 | /** |
298 | * memblock_insert_region - insert new memblock region | |
299 | * @type: memblock type to insert into | |
300 | * @idx: index for the insertion point | |
301 | * @base: base address of the new region | |
302 | * @size: size of the new region | |
303 | * | |
304 | * Insert new memblock region [@base,@base+@size) into @type at @idx. | |
305 | * @type must already have extra room to accomodate the new region. | |
306 | */ | |
307 | static void __init_memblock memblock_insert_region(struct memblock_type *type, | |
308 | int idx, phys_addr_t base, | |
7c0caeb8 | 309 | phys_addr_t size, int nid) |
784656f9 TH |
310 | { |
311 | struct memblock_region *rgn = &type->regions[idx]; | |
312 | ||
313 | BUG_ON(type->cnt >= type->max); | |
314 | memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn)); | |
315 | rgn->base = base; | |
316 | rgn->size = size; | |
7c0caeb8 | 317 | memblock_set_region_node(rgn, nid); |
784656f9 | 318 | type->cnt++; |
1440c4e2 | 319 | type->total_size += size; |
784656f9 TH |
320 | } |
321 | ||
322 | /** | |
323 | * memblock_add_region - add new memblock region | |
324 | * @type: memblock type to add new region into | |
325 | * @base: base address of the new region | |
326 | * @size: size of the new region | |
7fb0bc3f | 327 | * @nid: nid of the new region |
784656f9 TH |
328 | * |
329 | * Add new memblock region [@base,@base+@size) into @type. The new region | |
330 | * is allowed to overlap with existing ones - overlaps don't affect already | |
331 | * existing regions. @type is guaranteed to be minimal (all neighbouring | |
332 | * compatible regions are merged) after the addition. | |
333 | * | |
334 | * RETURNS: | |
335 | * 0 on success, -errno on failure. | |
336 | */ | |
581adcbe | 337 | static int __init_memblock memblock_add_region(struct memblock_type *type, |
7fb0bc3f | 338 | phys_addr_t base, phys_addr_t size, int nid) |
784656f9 TH |
339 | { |
340 | bool insert = false; | |
eb18f1b5 TH |
341 | phys_addr_t obase = base; |
342 | phys_addr_t end = base + memblock_cap_size(base, &size); | |
784656f9 TH |
343 | int i, nr_new; |
344 | ||
b3dc627c TH |
345 | if (!size) |
346 | return 0; | |
347 | ||
784656f9 TH |
348 | /* special case for empty array */ |
349 | if (type->regions[0].size == 0) { | |
1440c4e2 | 350 | WARN_ON(type->cnt != 1 || type->total_size); |
8f7a6605 BH |
351 | type->regions[0].base = base; |
352 | type->regions[0].size = size; | |
7fb0bc3f | 353 | memblock_set_region_node(&type->regions[0], nid); |
1440c4e2 | 354 | type->total_size = size; |
8f7a6605 | 355 | return 0; |
95f72d1e | 356 | } |
784656f9 TH |
357 | repeat: |
358 | /* | |
359 | * The following is executed twice. Once with %false @insert and | |
360 | * then with %true. The first counts the number of regions needed | |
361 | * to accomodate the new area. The second actually inserts them. | |
142b45a7 | 362 | */ |
784656f9 TH |
363 | base = obase; |
364 | nr_new = 0; | |
95f72d1e | 365 | |
784656f9 TH |
366 | for (i = 0; i < type->cnt; i++) { |
367 | struct memblock_region *rgn = &type->regions[i]; | |
368 | phys_addr_t rbase = rgn->base; | |
369 | phys_addr_t rend = rbase + rgn->size; | |
370 | ||
371 | if (rbase >= end) | |
95f72d1e | 372 | break; |
784656f9 TH |
373 | if (rend <= base) |
374 | continue; | |
375 | /* | |
376 | * @rgn overlaps. If it separates the lower part of new | |
377 | * area, insert that portion. | |
378 | */ | |
379 | if (rbase > base) { | |
380 | nr_new++; | |
381 | if (insert) | |
382 | memblock_insert_region(type, i++, base, | |
7fb0bc3f | 383 | rbase - base, nid); |
95f72d1e | 384 | } |
784656f9 TH |
385 | /* area below @rend is dealt with, forget about it */ |
386 | base = min(rend, end); | |
95f72d1e | 387 | } |
784656f9 TH |
388 | |
389 | /* insert the remaining portion */ | |
390 | if (base < end) { | |
391 | nr_new++; | |
392 | if (insert) | |
7fb0bc3f | 393 | memblock_insert_region(type, i, base, end - base, nid); |
95f72d1e | 394 | } |
95f72d1e | 395 | |
784656f9 TH |
396 | /* |
397 | * If this was the first round, resize array and repeat for actual | |
398 | * insertions; otherwise, merge and return. | |
142b45a7 | 399 | */ |
784656f9 TH |
400 | if (!insert) { |
401 | while (type->cnt + nr_new > type->max) | |
402 | if (memblock_double_array(type) < 0) | |
403 | return -ENOMEM; | |
404 | insert = true; | |
405 | goto repeat; | |
406 | } else { | |
407 | memblock_merge_regions(type); | |
408 | return 0; | |
142b45a7 | 409 | } |
95f72d1e YL |
410 | } |
411 | ||
7fb0bc3f TH |
412 | int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size, |
413 | int nid) | |
414 | { | |
415 | return memblock_add_region(&memblock.memory, base, size, nid); | |
416 | } | |
417 | ||
581adcbe | 418 | int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) |
95f72d1e | 419 | { |
7fb0bc3f | 420 | return memblock_add_region(&memblock.memory, base, size, MAX_NUMNODES); |
95f72d1e YL |
421 | } |
422 | ||
6a9ceb31 TH |
423 | /** |
424 | * memblock_isolate_range - isolate given range into disjoint memblocks | |
425 | * @type: memblock type to isolate range for | |
426 | * @base: base of range to isolate | |
427 | * @size: size of range to isolate | |
428 | * @start_rgn: out parameter for the start of isolated region | |
429 | * @end_rgn: out parameter for the end of isolated region | |
430 | * | |
431 | * Walk @type and ensure that regions don't cross the boundaries defined by | |
432 | * [@base,@base+@size). Crossing regions are split at the boundaries, | |
433 | * which may create at most two more regions. The index of the first | |
434 | * region inside the range is returned in *@start_rgn and end in *@end_rgn. | |
435 | * | |
436 | * RETURNS: | |
437 | * 0 on success, -errno on failure. | |
438 | */ | |
439 | static int __init_memblock memblock_isolate_range(struct memblock_type *type, | |
440 | phys_addr_t base, phys_addr_t size, | |
441 | int *start_rgn, int *end_rgn) | |
442 | { | |
eb18f1b5 | 443 | phys_addr_t end = base + memblock_cap_size(base, &size); |
6a9ceb31 TH |
444 | int i; |
445 | ||
446 | *start_rgn = *end_rgn = 0; | |
447 | ||
b3dc627c TH |
448 | if (!size) |
449 | return 0; | |
450 | ||
6a9ceb31 TH |
451 | /* we'll create at most two more regions */ |
452 | while (type->cnt + 2 > type->max) | |
453 | if (memblock_double_array(type) < 0) | |
454 | return -ENOMEM; | |
455 | ||
456 | for (i = 0; i < type->cnt; i++) { | |
457 | struct memblock_region *rgn = &type->regions[i]; | |
458 | phys_addr_t rbase = rgn->base; | |
459 | phys_addr_t rend = rbase + rgn->size; | |
460 | ||
461 | if (rbase >= end) | |
462 | break; | |
463 | if (rend <= base) | |
464 | continue; | |
465 | ||
466 | if (rbase < base) { | |
467 | /* | |
468 | * @rgn intersects from below. Split and continue | |
469 | * to process the next region - the new top half. | |
470 | */ | |
471 | rgn->base = base; | |
1440c4e2 TH |
472 | rgn->size -= base - rbase; |
473 | type->total_size -= base - rbase; | |
6a9ceb31 | 474 | memblock_insert_region(type, i, rbase, base - rbase, |
71936180 | 475 | memblock_get_region_node(rgn)); |
6a9ceb31 TH |
476 | } else if (rend > end) { |
477 | /* | |
478 | * @rgn intersects from above. Split and redo the | |
479 | * current region - the new bottom half. | |
480 | */ | |
481 | rgn->base = end; | |
1440c4e2 TH |
482 | rgn->size -= end - rbase; |
483 | type->total_size -= end - rbase; | |
6a9ceb31 | 484 | memblock_insert_region(type, i--, rbase, end - rbase, |
71936180 | 485 | memblock_get_region_node(rgn)); |
6a9ceb31 TH |
486 | } else { |
487 | /* @rgn is fully contained, record it */ | |
488 | if (!*end_rgn) | |
489 | *start_rgn = i; | |
490 | *end_rgn = i + 1; | |
491 | } | |
492 | } | |
493 | ||
494 | return 0; | |
495 | } | |
6a9ceb31 | 496 | |
581adcbe TH |
497 | static int __init_memblock __memblock_remove(struct memblock_type *type, |
498 | phys_addr_t base, phys_addr_t size) | |
95f72d1e | 499 | { |
71936180 TH |
500 | int start_rgn, end_rgn; |
501 | int i, ret; | |
95f72d1e | 502 | |
71936180 TH |
503 | ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); |
504 | if (ret) | |
505 | return ret; | |
95f72d1e | 506 | |
71936180 TH |
507 | for (i = end_rgn - 1; i >= start_rgn; i--) |
508 | memblock_remove_region(type, i); | |
8f7a6605 | 509 | return 0; |
95f72d1e YL |
510 | } |
511 | ||
581adcbe | 512 | int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size) |
95f72d1e YL |
513 | { |
514 | return __memblock_remove(&memblock.memory, base, size); | |
515 | } | |
516 | ||
581adcbe | 517 | int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) |
95f72d1e | 518 | { |
24aa0788 | 519 | memblock_dbg(" memblock_free: [%#016llx-%#016llx] %pF\n", |
a150439c PA |
520 | (unsigned long long)base, |
521 | (unsigned long long)base + size, | |
522 | (void *)_RET_IP_); | |
24aa0788 | 523 | |
95f72d1e YL |
524 | return __memblock_remove(&memblock.reserved, base, size); |
525 | } | |
526 | ||
581adcbe | 527 | int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) |
95f72d1e | 528 | { |
e3239ff9 | 529 | struct memblock_type *_rgn = &memblock.reserved; |
95f72d1e | 530 | |
24aa0788 | 531 | memblock_dbg("memblock_reserve: [%#016llx-%#016llx] %pF\n", |
a150439c PA |
532 | (unsigned long long)base, |
533 | (unsigned long long)base + size, | |
534 | (void *)_RET_IP_); | |
95f72d1e | 535 | |
7fb0bc3f | 536 | return memblock_add_region(_rgn, base, size, MAX_NUMNODES); |
95f72d1e YL |
537 | } |
538 | ||
35fd0808 TH |
539 | /** |
540 | * __next_free_mem_range - next function for for_each_free_mem_range() | |
541 | * @idx: pointer to u64 loop variable | |
542 | * @nid: nid: node selector, %MAX_NUMNODES for all nodes | |
543 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL | |
544 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL | |
545 | * @p_nid: ptr to int for nid of the range, can be %NULL | |
546 | * | |
547 | * Find the first free area from *@idx which matches @nid, fill the out | |
548 | * parameters, and update *@idx for the next iteration. The lower 32bit of | |
549 | * *@idx contains index into memory region and the upper 32bit indexes the | |
550 | * areas before each reserved region. For example, if reserved regions | |
551 | * look like the following, | |
552 | * | |
553 | * 0:[0-16), 1:[32-48), 2:[128-130) | |
554 | * | |
555 | * The upper 32bit indexes the following regions. | |
556 | * | |
557 | * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX) | |
558 | * | |
559 | * As both region arrays are sorted, the function advances the two indices | |
560 | * in lockstep and returns each intersection. | |
561 | */ | |
562 | void __init_memblock __next_free_mem_range(u64 *idx, int nid, | |
563 | phys_addr_t *out_start, | |
564 | phys_addr_t *out_end, int *out_nid) | |
565 | { | |
566 | struct memblock_type *mem = &memblock.memory; | |
567 | struct memblock_type *rsv = &memblock.reserved; | |
568 | int mi = *idx & 0xffffffff; | |
569 | int ri = *idx >> 32; | |
570 | ||
571 | for ( ; mi < mem->cnt; mi++) { | |
572 | struct memblock_region *m = &mem->regions[mi]; | |
573 | phys_addr_t m_start = m->base; | |
574 | phys_addr_t m_end = m->base + m->size; | |
575 | ||
576 | /* only memory regions are associated with nodes, check it */ | |
577 | if (nid != MAX_NUMNODES && nid != memblock_get_region_node(m)) | |
578 | continue; | |
579 | ||
580 | /* scan areas before each reservation for intersection */ | |
581 | for ( ; ri < rsv->cnt + 1; ri++) { | |
582 | struct memblock_region *r = &rsv->regions[ri]; | |
583 | phys_addr_t r_start = ri ? r[-1].base + r[-1].size : 0; | |
584 | phys_addr_t r_end = ri < rsv->cnt ? r->base : ULLONG_MAX; | |
585 | ||
586 | /* if ri advanced past mi, break out to advance mi */ | |
587 | if (r_start >= m_end) | |
588 | break; | |
589 | /* if the two regions intersect, we're done */ | |
590 | if (m_start < r_end) { | |
591 | if (out_start) | |
592 | *out_start = max(m_start, r_start); | |
593 | if (out_end) | |
594 | *out_end = min(m_end, r_end); | |
595 | if (out_nid) | |
596 | *out_nid = memblock_get_region_node(m); | |
597 | /* | |
598 | * The region which ends first is advanced | |
599 | * for the next iteration. | |
600 | */ | |
601 | if (m_end <= r_end) | |
602 | mi++; | |
603 | else | |
604 | ri++; | |
605 | *idx = (u32)mi | (u64)ri << 32; | |
606 | return; | |
607 | } | |
608 | } | |
609 | } | |
610 | ||
611 | /* signal end of iteration */ | |
612 | *idx = ULLONG_MAX; | |
613 | } | |
614 | ||
7bd0b0f0 TH |
615 | /** |
616 | * __next_free_mem_range_rev - next function for for_each_free_mem_range_reverse() | |
617 | * @idx: pointer to u64 loop variable | |
618 | * @nid: nid: node selector, %MAX_NUMNODES for all nodes | |
619 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL | |
620 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL | |
621 | * @p_nid: ptr to int for nid of the range, can be %NULL | |
622 | * | |
623 | * Reverse of __next_free_mem_range(). | |
624 | */ | |
625 | void __init_memblock __next_free_mem_range_rev(u64 *idx, int nid, | |
626 | phys_addr_t *out_start, | |
627 | phys_addr_t *out_end, int *out_nid) | |
628 | { | |
629 | struct memblock_type *mem = &memblock.memory; | |
630 | struct memblock_type *rsv = &memblock.reserved; | |
631 | int mi = *idx & 0xffffffff; | |
632 | int ri = *idx >> 32; | |
633 | ||
634 | if (*idx == (u64)ULLONG_MAX) { | |
635 | mi = mem->cnt - 1; | |
636 | ri = rsv->cnt; | |
637 | } | |
638 | ||
639 | for ( ; mi >= 0; mi--) { | |
640 | struct memblock_region *m = &mem->regions[mi]; | |
641 | phys_addr_t m_start = m->base; | |
642 | phys_addr_t m_end = m->base + m->size; | |
643 | ||
644 | /* only memory regions are associated with nodes, check it */ | |
645 | if (nid != MAX_NUMNODES && nid != memblock_get_region_node(m)) | |
646 | continue; | |
647 | ||
648 | /* scan areas before each reservation for intersection */ | |
649 | for ( ; ri >= 0; ri--) { | |
650 | struct memblock_region *r = &rsv->regions[ri]; | |
651 | phys_addr_t r_start = ri ? r[-1].base + r[-1].size : 0; | |
652 | phys_addr_t r_end = ri < rsv->cnt ? r->base : ULLONG_MAX; | |
653 | ||
654 | /* if ri advanced past mi, break out to advance mi */ | |
655 | if (r_end <= m_start) | |
656 | break; | |
657 | /* if the two regions intersect, we're done */ | |
658 | if (m_end > r_start) { | |
659 | if (out_start) | |
660 | *out_start = max(m_start, r_start); | |
661 | if (out_end) | |
662 | *out_end = min(m_end, r_end); | |
663 | if (out_nid) | |
664 | *out_nid = memblock_get_region_node(m); | |
665 | ||
666 | if (m_start >= r_start) | |
667 | mi--; | |
668 | else | |
669 | ri--; | |
670 | *idx = (u32)mi | (u64)ri << 32; | |
671 | return; | |
672 | } | |
673 | } | |
674 | } | |
675 | ||
676 | *idx = ULLONG_MAX; | |
677 | } | |
678 | ||
7c0caeb8 TH |
679 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP |
680 | /* | |
681 | * Common iterator interface used to define for_each_mem_range(). | |
682 | */ | |
683 | void __init_memblock __next_mem_pfn_range(int *idx, int nid, | |
684 | unsigned long *out_start_pfn, | |
685 | unsigned long *out_end_pfn, int *out_nid) | |
686 | { | |
687 | struct memblock_type *type = &memblock.memory; | |
688 | struct memblock_region *r; | |
689 | ||
690 | while (++*idx < type->cnt) { | |
691 | r = &type->regions[*idx]; | |
692 | ||
693 | if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size)) | |
694 | continue; | |
695 | if (nid == MAX_NUMNODES || nid == r->nid) | |
696 | break; | |
697 | } | |
698 | if (*idx >= type->cnt) { | |
699 | *idx = -1; | |
700 | return; | |
701 | } | |
702 | ||
703 | if (out_start_pfn) | |
704 | *out_start_pfn = PFN_UP(r->base); | |
705 | if (out_end_pfn) | |
706 | *out_end_pfn = PFN_DOWN(r->base + r->size); | |
707 | if (out_nid) | |
708 | *out_nid = r->nid; | |
709 | } | |
710 | ||
711 | /** | |
712 | * memblock_set_node - set node ID on memblock regions | |
713 | * @base: base of area to set node ID for | |
714 | * @size: size of area to set node ID for | |
715 | * @nid: node ID to set | |
716 | * | |
717 | * Set the nid of memblock memory regions in [@base,@base+@size) to @nid. | |
718 | * Regions which cross the area boundaries are split as necessary. | |
719 | * | |
720 | * RETURNS: | |
721 | * 0 on success, -errno on failure. | |
722 | */ | |
723 | int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size, | |
724 | int nid) | |
725 | { | |
726 | struct memblock_type *type = &memblock.memory; | |
6a9ceb31 TH |
727 | int start_rgn, end_rgn; |
728 | int i, ret; | |
7c0caeb8 | 729 | |
6a9ceb31 TH |
730 | ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); |
731 | if (ret) | |
732 | return ret; | |
7c0caeb8 | 733 | |
6a9ceb31 TH |
734 | for (i = start_rgn; i < end_rgn; i++) |
735 | type->regions[i].nid = nid; | |
7c0caeb8 TH |
736 | |
737 | memblock_merge_regions(type); | |
738 | return 0; | |
739 | } | |
740 | #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ | |
741 | ||
7bd0b0f0 TH |
742 | static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size, |
743 | phys_addr_t align, phys_addr_t max_addr, | |
744 | int nid) | |
95f72d1e | 745 | { |
6ed311b2 | 746 | phys_addr_t found; |
95f72d1e | 747 | |
847854f5 TH |
748 | /* align @size to avoid excessive fragmentation on reserved array */ |
749 | size = round_up(size, align); | |
750 | ||
7bd0b0f0 | 751 | found = memblock_find_in_range_node(0, max_addr, size, align, nid); |
9c8c27e2 | 752 | if (found && !memblock_reserve(found, size)) |
6ed311b2 | 753 | return found; |
95f72d1e | 754 | |
6ed311b2 | 755 | return 0; |
95f72d1e YL |
756 | } |
757 | ||
7bd0b0f0 TH |
758 | phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid) |
759 | { | |
760 | return memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE, nid); | |
761 | } | |
762 | ||
763 | phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) | |
764 | { | |
765 | return memblock_alloc_base_nid(size, align, max_addr, MAX_NUMNODES); | |
766 | } | |
767 | ||
6ed311b2 | 768 | phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) |
95f72d1e | 769 | { |
6ed311b2 BH |
770 | phys_addr_t alloc; |
771 | ||
772 | alloc = __memblock_alloc_base(size, align, max_addr); | |
773 | ||
774 | if (alloc == 0) | |
775 | panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n", | |
776 | (unsigned long long) size, (unsigned long long) max_addr); | |
777 | ||
778 | return alloc; | |
95f72d1e YL |
779 | } |
780 | ||
6ed311b2 | 781 | phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align) |
95f72d1e | 782 | { |
6ed311b2 BH |
783 | return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); |
784 | } | |
95f72d1e | 785 | |
9d1e2492 BH |
786 | phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid) |
787 | { | |
788 | phys_addr_t res = memblock_alloc_nid(size, align, nid); | |
789 | ||
790 | if (res) | |
791 | return res; | |
15fb0972 | 792 | return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); |
95f72d1e YL |
793 | } |
794 | ||
9d1e2492 BH |
795 | |
796 | /* | |
797 | * Remaining API functions | |
798 | */ | |
799 | ||
2898cc4c | 800 | phys_addr_t __init memblock_phys_mem_size(void) |
95f72d1e | 801 | { |
1440c4e2 | 802 | return memblock.memory.total_size; |
95f72d1e YL |
803 | } |
804 | ||
0a93ebef SR |
805 | /* lowest address */ |
806 | phys_addr_t __init_memblock memblock_start_of_DRAM(void) | |
807 | { | |
808 | return memblock.memory.regions[0].base; | |
809 | } | |
810 | ||
10d06439 | 811 | phys_addr_t __init_memblock memblock_end_of_DRAM(void) |
95f72d1e YL |
812 | { |
813 | int idx = memblock.memory.cnt - 1; | |
814 | ||
e3239ff9 | 815 | return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size); |
95f72d1e YL |
816 | } |
817 | ||
c0ce8fef | 818 | void __init memblock_enforce_memory_limit(phys_addr_t limit) |
95f72d1e YL |
819 | { |
820 | unsigned long i; | |
c0ce8fef | 821 | phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX; |
95f72d1e | 822 | |
c0ce8fef | 823 | if (!limit) |
95f72d1e YL |
824 | return; |
825 | ||
c0ce8fef | 826 | /* find out max address */ |
95f72d1e | 827 | for (i = 0; i < memblock.memory.cnt; i++) { |
c0ce8fef | 828 | struct memblock_region *r = &memblock.memory.regions[i]; |
95f72d1e | 829 | |
c0ce8fef TH |
830 | if (limit <= r->size) { |
831 | max_addr = r->base + limit; | |
832 | break; | |
95f72d1e | 833 | } |
c0ce8fef | 834 | limit -= r->size; |
95f72d1e | 835 | } |
c0ce8fef TH |
836 | |
837 | /* truncate both memory and reserved regions */ | |
838 | __memblock_remove(&memblock.memory, max_addr, (phys_addr_t)ULLONG_MAX); | |
839 | __memblock_remove(&memblock.reserved, max_addr, (phys_addr_t)ULLONG_MAX); | |
95f72d1e YL |
840 | } |
841 | ||
cd79481d | 842 | static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr) |
72d4b0b4 BH |
843 | { |
844 | unsigned int left = 0, right = type->cnt; | |
845 | ||
846 | do { | |
847 | unsigned int mid = (right + left) / 2; | |
848 | ||
849 | if (addr < type->regions[mid].base) | |
850 | right = mid; | |
851 | else if (addr >= (type->regions[mid].base + | |
852 | type->regions[mid].size)) | |
853 | left = mid + 1; | |
854 | else | |
855 | return mid; | |
856 | } while (left < right); | |
857 | return -1; | |
858 | } | |
859 | ||
2898cc4c | 860 | int __init memblock_is_reserved(phys_addr_t addr) |
95f72d1e | 861 | { |
72d4b0b4 BH |
862 | return memblock_search(&memblock.reserved, addr) != -1; |
863 | } | |
95f72d1e | 864 | |
3661ca66 | 865 | int __init_memblock memblock_is_memory(phys_addr_t addr) |
72d4b0b4 BH |
866 | { |
867 | return memblock_search(&memblock.memory, addr) != -1; | |
868 | } | |
869 | ||
3661ca66 | 870 | int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) |
72d4b0b4 | 871 | { |
abb65272 | 872 | int idx = memblock_search(&memblock.memory, base); |
eb18f1b5 | 873 | phys_addr_t end = base + memblock_cap_size(base, &size); |
72d4b0b4 BH |
874 | |
875 | if (idx == -1) | |
876 | return 0; | |
abb65272 TV |
877 | return memblock.memory.regions[idx].base <= base && |
878 | (memblock.memory.regions[idx].base + | |
eb18f1b5 | 879 | memblock.memory.regions[idx].size) >= end; |
95f72d1e YL |
880 | } |
881 | ||
10d06439 | 882 | int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) |
95f72d1e | 883 | { |
eb18f1b5 | 884 | memblock_cap_size(base, &size); |
f1c2c19c | 885 | return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; |
95f72d1e YL |
886 | } |
887 | ||
e63075a3 | 888 | |
3661ca66 | 889 | void __init_memblock memblock_set_current_limit(phys_addr_t limit) |
e63075a3 BH |
890 | { |
891 | memblock.current_limit = limit; | |
892 | } | |
893 | ||
7c0caeb8 | 894 | static void __init_memblock memblock_dump(struct memblock_type *type, char *name) |
6ed311b2 BH |
895 | { |
896 | unsigned long long base, size; | |
897 | int i; | |
898 | ||
7c0caeb8 | 899 | pr_info(" %s.cnt = 0x%lx\n", name, type->cnt); |
6ed311b2 | 900 | |
7c0caeb8 TH |
901 | for (i = 0; i < type->cnt; i++) { |
902 | struct memblock_region *rgn = &type->regions[i]; | |
903 | char nid_buf[32] = ""; | |
904 | ||
905 | base = rgn->base; | |
906 | size = rgn->size; | |
907 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP | |
908 | if (memblock_get_region_node(rgn) != MAX_NUMNODES) | |
909 | snprintf(nid_buf, sizeof(nid_buf), " on node %d", | |
910 | memblock_get_region_node(rgn)); | |
911 | #endif | |
912 | pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes%s\n", | |
913 | name, i, base, base + size - 1, size, nid_buf); | |
6ed311b2 BH |
914 | } |
915 | } | |
916 | ||
4ff7b82f | 917 | void __init_memblock __memblock_dump_all(void) |
6ed311b2 | 918 | { |
6ed311b2 | 919 | pr_info("MEMBLOCK configuration:\n"); |
1440c4e2 TH |
920 | pr_info(" memory size = %#llx reserved size = %#llx\n", |
921 | (unsigned long long)memblock.memory.total_size, | |
922 | (unsigned long long)memblock.reserved.total_size); | |
6ed311b2 BH |
923 | |
924 | memblock_dump(&memblock.memory, "memory"); | |
925 | memblock_dump(&memblock.reserved, "reserved"); | |
926 | } | |
927 | ||
1aadc056 | 928 | void __init memblock_allow_resize(void) |
6ed311b2 | 929 | { |
142b45a7 | 930 | memblock_can_resize = 1; |
6ed311b2 BH |
931 | } |
932 | ||
6ed311b2 BH |
933 | static int __init early_memblock(char *p) |
934 | { | |
935 | if (p && strstr(p, "debug")) | |
936 | memblock_debug = 1; | |
937 | return 0; | |
938 | } | |
939 | early_param("memblock", early_memblock); | |
940 | ||
c378ddd5 | 941 | #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK) |
6d03b885 BH |
942 | |
943 | static int memblock_debug_show(struct seq_file *m, void *private) | |
944 | { | |
945 | struct memblock_type *type = m->private; | |
946 | struct memblock_region *reg; | |
947 | int i; | |
948 | ||
949 | for (i = 0; i < type->cnt; i++) { | |
950 | reg = &type->regions[i]; | |
951 | seq_printf(m, "%4d: ", i); | |
952 | if (sizeof(phys_addr_t) == 4) | |
953 | seq_printf(m, "0x%08lx..0x%08lx\n", | |
954 | (unsigned long)reg->base, | |
955 | (unsigned long)(reg->base + reg->size - 1)); | |
956 | else | |
957 | seq_printf(m, "0x%016llx..0x%016llx\n", | |
958 | (unsigned long long)reg->base, | |
959 | (unsigned long long)(reg->base + reg->size - 1)); | |
960 | ||
961 | } | |
962 | return 0; | |
963 | } | |
964 | ||
965 | static int memblock_debug_open(struct inode *inode, struct file *file) | |
966 | { | |
967 | return single_open(file, memblock_debug_show, inode->i_private); | |
968 | } | |
969 | ||
970 | static const struct file_operations memblock_debug_fops = { | |
971 | .open = memblock_debug_open, | |
972 | .read = seq_read, | |
973 | .llseek = seq_lseek, | |
974 | .release = single_release, | |
975 | }; | |
976 | ||
977 | static int __init memblock_init_debugfs(void) | |
978 | { | |
979 | struct dentry *root = debugfs_create_dir("memblock", NULL); | |
980 | if (!root) | |
981 | return -ENXIO; | |
982 | debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops); | |
983 | debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops); | |
984 | ||
985 | return 0; | |
986 | } | |
987 | __initcall(memblock_init_debugfs); | |
988 | ||
989 | #endif /* CONFIG_DEBUG_FS */ |