]>
Commit | Line | Data |
---|---|---|
7c8c6b97 PM |
1 | /* |
2 | * Procedures for maintaining information about logical memory blocks. | |
3 | * | |
4 | * Peter Bergner, IBM Corp. June 2001. | |
5 | * Copyright (C) 2001 Peter Bergner. | |
d9b2b2a2 | 6 | * |
7c8c6b97 PM |
7 | * This program is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU General Public License | |
9 | * as published by the Free Software Foundation; either version | |
10 | * 2 of the License, or (at your option) any later version. | |
11 | */ | |
12 | ||
7c8c6b97 PM |
13 | #include <linux/kernel.h> |
14 | #include <linux/init.h> | |
15 | #include <linux/bitops.h> | |
d9b2b2a2 | 16 | #include <linux/lmb.h> |
7c8c6b97 | 17 | |
3b9331da ME |
18 | #define LMB_ALLOC_ANYWHERE 0 |
19 | ||
eb481899 ME |
20 | struct lmb lmb; |
21 | ||
7c8c6b97 PM |
22 | void lmb_dump_all(void) |
23 | { | |
24 | #ifdef DEBUG | |
25 | unsigned long i; | |
26 | ||
300613e5 PM |
27 | pr_debug("lmb_dump_all:\n"); |
28 | pr_debug(" memory.cnt = 0x%lx\n", lmb.memory.cnt); | |
29 | pr_debug(" memory.size = 0x%llx\n", | |
e5f27095 | 30 | (unsigned long long)lmb.memory.size); |
7c8c6b97 | 31 | for (i=0; i < lmb.memory.cnt ;i++) { |
300613e5 | 32 | pr_debug(" memory.region[0x%x].base = 0x%llx\n", |
e5f27095 | 33 | i, (unsigned long long)lmb.memory.region[i].base); |
300613e5 | 34 | pr_debug(" .size = 0x%llx\n", |
e5f27095 | 35 | (unsigned long long)lmb.memory.region[i].size); |
7c8c6b97 PM |
36 | } |
37 | ||
300613e5 PM |
38 | pr_debug(" reserved.cnt = 0x%lx\n", lmb.reserved.cnt); |
39 | pr_debug(" reserved.size = 0x%lx\n", lmb.reserved.size); | |
7c8c6b97 | 40 | for (i=0; i < lmb.reserved.cnt ;i++) { |
300613e5 | 41 | pr_debug(" reserved.region[0x%x].base = 0x%llx\n", |
e5f27095 | 42 | i, (unsigned long long)lmb.reserved.region[i].base); |
300613e5 | 43 | pr_debug(" .size = 0x%llx\n", |
e5f27095 | 44 | (unsigned long long)lmb.reserved.region[i].size); |
7c8c6b97 PM |
45 | } |
46 | #endif /* DEBUG */ | |
47 | } | |
48 | ||
98d5c21c BP |
49 | static unsigned long lmb_addrs_overlap(u64 base1, u64 size1, u64 base2, |
50 | u64 size2) | |
7c8c6b97 | 51 | { |
300613e5 | 52 | return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); |
7c8c6b97 PM |
53 | } |
54 | ||
98d5c21c | 55 | static long lmb_addrs_adjacent(u64 base1, u64 size1, u64 base2, u64 size2) |
7c8c6b97 PM |
56 | { |
57 | if (base2 == base1 + size1) | |
58 | return 1; | |
59 | else if (base1 == base2 + size2) | |
60 | return -1; | |
61 | ||
62 | return 0; | |
63 | } | |
64 | ||
98d5c21c | 65 | static long lmb_regions_adjacent(struct lmb_region *rgn, |
7c8c6b97 PM |
66 | unsigned long r1, unsigned long r2) |
67 | { | |
e5f27095 BB |
68 | u64 base1 = rgn->region[r1].base; |
69 | u64 size1 = rgn->region[r1].size; | |
70 | u64 base2 = rgn->region[r2].base; | |
71 | u64 size2 = rgn->region[r2].size; | |
7c8c6b97 PM |
72 | |
73 | return lmb_addrs_adjacent(base1, size1, base2, size2); | |
74 | } | |
75 | ||
98d5c21c | 76 | static void lmb_remove_region(struct lmb_region *rgn, unsigned long r) |
7c8c6b97 PM |
77 | { |
78 | unsigned long i; | |
79 | ||
2babf5c2 ME |
80 | for (i = r; i < rgn->cnt - 1; i++) { |
81 | rgn->region[i].base = rgn->region[i + 1].base; | |
82 | rgn->region[i].size = rgn->region[i + 1].size; | |
7c8c6b97 PM |
83 | } |
84 | rgn->cnt--; | |
85 | } | |
86 | ||
2babf5c2 | 87 | /* Assumption: base addr of region 1 < base addr of region 2 */ |
98d5c21c | 88 | static void lmb_coalesce_regions(struct lmb_region *rgn, |
2babf5c2 ME |
89 | unsigned long r1, unsigned long r2) |
90 | { | |
91 | rgn->region[r1].size += rgn->region[r2].size; | |
92 | lmb_remove_region(rgn, r2); | |
93 | } | |
94 | ||
7c8c6b97 PM |
95 | void __init lmb_init(void) |
96 | { | |
97 | /* Create a dummy zero size LMB which will get coalesced away later. | |
98 | * This simplifies the lmb_add() code below... | |
99 | */ | |
100 | lmb.memory.region[0].base = 0; | |
101 | lmb.memory.region[0].size = 0; | |
102 | lmb.memory.cnt = 1; | |
103 | ||
104 | /* Ditto. */ | |
105 | lmb.reserved.region[0].base = 0; | |
106 | lmb.reserved.region[0].size = 0; | |
107 | lmb.reserved.cnt = 1; | |
108 | } | |
109 | ||
7c8c6b97 PM |
110 | void __init lmb_analyze(void) |
111 | { | |
112 | int i; | |
113 | ||
114 | lmb.memory.size = 0; | |
115 | ||
116 | for (i = 0; i < lmb.memory.cnt; i++) | |
117 | lmb.memory.size += lmb.memory.region[i].size; | |
118 | } | |
119 | ||
98d5c21c | 120 | static long lmb_add_region(struct lmb_region *rgn, u64 base, u64 size) |
7c8c6b97 | 121 | { |
56d6d1a7 MA |
122 | unsigned long coalesced = 0; |
123 | long adjacent, i; | |
7c8c6b97 | 124 | |
27e6672b KG |
125 | if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) { |
126 | rgn->region[0].base = base; | |
127 | rgn->region[0].size = size; | |
128 | return 0; | |
129 | } | |
130 | ||
7c8c6b97 | 131 | /* First try and coalesce this LMB with another. */ |
300613e5 | 132 | for (i = 0; i < rgn->cnt; i++) { |
e5f27095 BB |
133 | u64 rgnbase = rgn->region[i].base; |
134 | u64 rgnsize = rgn->region[i].size; | |
7c8c6b97 | 135 | |
eb6de286 DG |
136 | if ((rgnbase == base) && (rgnsize == size)) |
137 | /* Already have this region, so we're done */ | |
138 | return 0; | |
139 | ||
300613e5 PM |
140 | adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize); |
141 | if (adjacent > 0) { | |
7c8c6b97 PM |
142 | rgn->region[i].base -= size; |
143 | rgn->region[i].size += size; | |
144 | coalesced++; | |
145 | break; | |
300613e5 | 146 | } else if (adjacent < 0) { |
7c8c6b97 PM |
147 | rgn->region[i].size += size; |
148 | coalesced++; | |
149 | break; | |
150 | } | |
151 | } | |
152 | ||
300613e5 | 153 | if ((i < rgn->cnt - 1) && lmb_regions_adjacent(rgn, i, i+1)) { |
7c8c6b97 PM |
154 | lmb_coalesce_regions(rgn, i, i+1); |
155 | coalesced++; | |
156 | } | |
157 | ||
158 | if (coalesced) | |
159 | return coalesced; | |
160 | if (rgn->cnt >= MAX_LMB_REGIONS) | |
161 | return -1; | |
162 | ||
163 | /* Couldn't coalesce the LMB, so add it to the sorted table. */ | |
300613e5 | 164 | for (i = rgn->cnt - 1; i >= 0; i--) { |
7c8c6b97 PM |
165 | if (base < rgn->region[i].base) { |
166 | rgn->region[i+1].base = rgn->region[i].base; | |
167 | rgn->region[i+1].size = rgn->region[i].size; | |
168 | } else { | |
169 | rgn->region[i+1].base = base; | |
170 | rgn->region[i+1].size = size; | |
171 | break; | |
172 | } | |
173 | } | |
74b20dad KG |
174 | |
175 | if (base < rgn->region[0].base) { | |
176 | rgn->region[0].base = base; | |
177 | rgn->region[0].size = size; | |
178 | } | |
7c8c6b97 PM |
179 | rgn->cnt++; |
180 | ||
181 | return 0; | |
182 | } | |
183 | ||
98d5c21c | 184 | long lmb_add(u64 base, u64 size) |
7c8c6b97 | 185 | { |
300613e5 | 186 | struct lmb_region *_rgn = &lmb.memory; |
7c8c6b97 PM |
187 | |
188 | /* On pSeries LPAR systems, the first LMB is our RMO region. */ | |
189 | if (base == 0) | |
190 | lmb.rmo_size = size; | |
191 | ||
192 | return lmb_add_region(_rgn, base, size); | |
193 | ||
194 | } | |
195 | ||
98d5c21c BP |
196 | long lmb_remove(u64 base, u64 size) |
197 | { | |
198 | struct lmb_region *rgn = &(lmb.memory); | |
199 | u64 rgnbegin, rgnend; | |
200 | u64 end = base + size; | |
201 | int i; | |
202 | ||
203 | rgnbegin = rgnend = 0; /* supress gcc warnings */ | |
204 | ||
205 | /* Find the region where (base, size) belongs to */ | |
206 | for (i=0; i < rgn->cnt; i++) { | |
207 | rgnbegin = rgn->region[i].base; | |
208 | rgnend = rgnbegin + rgn->region[i].size; | |
209 | ||
210 | if ((rgnbegin <= base) && (end <= rgnend)) | |
211 | break; | |
212 | } | |
213 | ||
214 | /* Didn't find the region */ | |
215 | if (i == rgn->cnt) | |
216 | return -1; | |
217 | ||
218 | /* Check to see if we are removing entire region */ | |
219 | if ((rgnbegin == base) && (rgnend == end)) { | |
220 | lmb_remove_region(rgn, i); | |
221 | return 0; | |
222 | } | |
223 | ||
224 | /* Check to see if region is matching at the front */ | |
225 | if (rgnbegin == base) { | |
226 | rgn->region[i].base = end; | |
227 | rgn->region[i].size -= size; | |
228 | return 0; | |
229 | } | |
230 | ||
231 | /* Check to see if the region is matching at the end */ | |
232 | if (rgnend == end) { | |
233 | rgn->region[i].size -= size; | |
234 | return 0; | |
235 | } | |
236 | ||
237 | /* | |
238 | * We need to split the entry - adjust the current one to the | |
239 | * beginging of the hole and add the region after hole. | |
240 | */ | |
241 | rgn->region[i].size = base - rgn->region[i].base; | |
242 | return lmb_add_region(rgn, end, rgnend - end); | |
243 | } | |
244 | ||
e5f27095 | 245 | long __init lmb_reserve(u64 base, u64 size) |
7c8c6b97 | 246 | { |
300613e5 | 247 | struct lmb_region *_rgn = &lmb.reserved; |
7c8c6b97 | 248 | |
8c20fafa ME |
249 | BUG_ON(0 == size); |
250 | ||
7c8c6b97 PM |
251 | return lmb_add_region(_rgn, base, size); |
252 | } | |
253 | ||
300613e5 | 254 | long __init lmb_overlaps_region(struct lmb_region *rgn, u64 base, u64 size) |
7c8c6b97 PM |
255 | { |
256 | unsigned long i; | |
257 | ||
300613e5 | 258 | for (i = 0; i < rgn->cnt; i++) { |
e5f27095 BB |
259 | u64 rgnbase = rgn->region[i].base; |
260 | u64 rgnsize = rgn->region[i].size; | |
300613e5 | 261 | if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) |
7c8c6b97 | 262 | break; |
7c8c6b97 PM |
263 | } |
264 | ||
265 | return (i < rgn->cnt) ? i : -1; | |
266 | } | |
267 | ||
c50f68c8 DM |
268 | static u64 lmb_align_down(u64 addr, u64 size) |
269 | { | |
270 | return addr & ~(size - 1); | |
271 | } | |
272 | ||
273 | static u64 lmb_align_up(u64 addr, u64 size) | |
274 | { | |
275 | return (addr + (size - 1)) & ~(size - 1); | |
276 | } | |
277 | ||
278 | static u64 __init lmb_alloc_nid_unreserved(u64 start, u64 end, | |
279 | u64 size, u64 align) | |
280 | { | |
d9024df0 | 281 | u64 base, res_base; |
c50f68c8 DM |
282 | long j; |
283 | ||
284 | base = lmb_align_down((end - size), align); | |
d9024df0 PM |
285 | while (start <= base) { |
286 | j = lmb_overlaps_region(&lmb.reserved, base, size); | |
287 | if (j < 0) { | |
288 | /* this area isn't reserved, take it */ | |
4978db5b | 289 | if (lmb_add_region(&lmb.reserved, base, size) < 0) |
d9024df0 PM |
290 | base = ~(u64)0; |
291 | return base; | |
292 | } | |
293 | res_base = lmb.reserved.region[j].base; | |
294 | if (res_base < size) | |
295 | break; | |
296 | base = lmb_align_down(res_base - size, align); | |
c50f68c8 DM |
297 | } |
298 | ||
299 | return ~(u64)0; | |
300 | } | |
301 | ||
302 | static u64 __init lmb_alloc_nid_region(struct lmb_property *mp, | |
303 | u64 (*nid_range)(u64, u64, int *), | |
304 | u64 size, u64 align, int nid) | |
305 | { | |
306 | u64 start, end; | |
307 | ||
308 | start = mp->base; | |
309 | end = start + mp->size; | |
310 | ||
311 | start = lmb_align_up(start, align); | |
312 | while (start < end) { | |
313 | u64 this_end; | |
314 | int this_nid; | |
315 | ||
316 | this_end = nid_range(start, end, &this_nid); | |
317 | if (this_nid == nid) { | |
318 | u64 ret = lmb_alloc_nid_unreserved(start, this_end, | |
319 | size, align); | |
320 | if (ret != ~(u64)0) | |
321 | return ret; | |
322 | } | |
323 | start = this_end; | |
324 | } | |
325 | ||
326 | return ~(u64)0; | |
327 | } | |
328 | ||
329 | u64 __init lmb_alloc_nid(u64 size, u64 align, int nid, | |
330 | u64 (*nid_range)(u64 start, u64 end, int *nid)) | |
331 | { | |
332 | struct lmb_region *mem = &lmb.memory; | |
333 | int i; | |
334 | ||
4978db5b DM |
335 | BUG_ON(0 == size); |
336 | ||
337 | size = lmb_align_up(size, align); | |
338 | ||
c50f68c8 DM |
339 | for (i = 0; i < mem->cnt; i++) { |
340 | u64 ret = lmb_alloc_nid_region(&mem->region[i], | |
341 | nid_range, | |
342 | size, align, nid); | |
343 | if (ret != ~(u64)0) | |
344 | return ret; | |
345 | } | |
346 | ||
347 | return lmb_alloc(size, align); | |
348 | } | |
349 | ||
e5f27095 | 350 | u64 __init lmb_alloc(u64 size, u64 align) |
7c8c6b97 PM |
351 | { |
352 | return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE); | |
353 | } | |
354 | ||
e5f27095 | 355 | u64 __init lmb_alloc_base(u64 size, u64 align, u64 max_addr) |
d7a5b2ff | 356 | { |
e5f27095 | 357 | u64 alloc; |
d7a5b2ff ME |
358 | |
359 | alloc = __lmb_alloc_base(size, align, max_addr); | |
360 | ||
2c276603 | 361 | if (alloc == 0) |
e5f27095 BB |
362 | panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n", |
363 | (unsigned long long) size, (unsigned long long) max_addr); | |
d7a5b2ff ME |
364 | |
365 | return alloc; | |
366 | } | |
367 | ||
e5f27095 | 368 | u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr) |
7c8c6b97 PM |
369 | { |
370 | long i, j; | |
e5f27095 | 371 | u64 base = 0; |
d9024df0 | 372 | u64 res_base; |
7c8c6b97 | 373 | |
8c20fafa ME |
374 | BUG_ON(0 == size); |
375 | ||
4978db5b DM |
376 | size = lmb_align_up(size, align); |
377 | ||
d9b2b2a2 | 378 | /* On some platforms, make sure we allocate lowmem */ |
d9024df0 | 379 | /* Note that LMB_REAL_LIMIT may be LMB_ALLOC_ANYWHERE */ |
7c8c6b97 | 380 | if (max_addr == LMB_ALLOC_ANYWHERE) |
d9b2b2a2 DM |
381 | max_addr = LMB_REAL_LIMIT; |
382 | ||
300613e5 | 383 | for (i = lmb.memory.cnt - 1; i >= 0; i--) { |
e5f27095 BB |
384 | u64 lmbbase = lmb.memory.region[i].base; |
385 | u64 lmbsize = lmb.memory.region[i].size; | |
7c8c6b97 | 386 | |
d9024df0 PM |
387 | if (lmbsize < size) |
388 | continue; | |
7c8c6b97 | 389 | if (max_addr == LMB_ALLOC_ANYWHERE) |
d9b2b2a2 | 390 | base = lmb_align_down(lmbbase + lmbsize - size, align); |
7c8c6b97 PM |
391 | else if (lmbbase < max_addr) { |
392 | base = min(lmbbase + lmbsize, max_addr); | |
d9b2b2a2 | 393 | base = lmb_align_down(base - size, align); |
7c8c6b97 PM |
394 | } else |
395 | continue; | |
396 | ||
d9024df0 | 397 | while (base && lmbbase <= base) { |
300613e5 | 398 | j = lmb_overlaps_region(&lmb.reserved, base, size); |
d9024df0 PM |
399 | if (j < 0) { |
400 | /* this area isn't reserved, take it */ | |
4978db5b | 401 | if (lmb_add_region(&lmb.reserved, base, size) < 0) |
d9024df0 PM |
402 | return 0; |
403 | return base; | |
404 | } | |
405 | res_base = lmb.reserved.region[j].base; | |
406 | if (res_base < size) | |
300613e5 | 407 | break; |
d9024df0 | 408 | base = lmb_align_down(res_base - size, align); |
300613e5 | 409 | } |
7c8c6b97 | 410 | } |
d9024df0 | 411 | return 0; |
7c8c6b97 PM |
412 | } |
413 | ||
414 | /* You must call lmb_analyze() before this. */ | |
e5f27095 | 415 | u64 __init lmb_phys_mem_size(void) |
7c8c6b97 PM |
416 | { |
417 | return lmb.memory.size; | |
418 | } | |
419 | ||
e5f27095 | 420 | u64 __init lmb_end_of_DRAM(void) |
7c8c6b97 PM |
421 | { |
422 | int idx = lmb.memory.cnt - 1; | |
423 | ||
424 | return (lmb.memory.region[idx].base + lmb.memory.region[idx].size); | |
425 | } | |
426 | ||
2babf5c2 | 427 | /* You must call lmb_analyze() after this. */ |
e5f27095 | 428 | void __init lmb_enforce_memory_limit(u64 memory_limit) |
7c8c6b97 | 429 | { |
e5f27095 BB |
430 | unsigned long i; |
431 | u64 limit; | |
2babf5c2 | 432 | struct lmb_property *p; |
7c8c6b97 | 433 | |
300613e5 | 434 | if (!memory_limit) |
7c8c6b97 PM |
435 | return; |
436 | ||
2babf5c2 | 437 | /* Truncate the lmb regions to satisfy the memory limit. */ |
7c8c6b97 PM |
438 | limit = memory_limit; |
439 | for (i = 0; i < lmb.memory.cnt; i++) { | |
440 | if (limit > lmb.memory.region[i].size) { | |
441 | limit -= lmb.memory.region[i].size; | |
442 | continue; | |
443 | } | |
444 | ||
445 | lmb.memory.region[i].size = limit; | |
446 | lmb.memory.cnt = i + 1; | |
447 | break; | |
448 | } | |
2babf5c2 | 449 | |
30f30e13 ME |
450 | if (lmb.memory.region[0].size < lmb.rmo_size) |
451 | lmb.rmo_size = lmb.memory.region[0].size; | |
2babf5c2 ME |
452 | |
453 | /* And truncate any reserves above the limit also. */ | |
454 | for (i = 0; i < lmb.reserved.cnt; i++) { | |
455 | p = &lmb.reserved.region[i]; | |
456 | ||
457 | if (p->base > memory_limit) | |
458 | p->size = 0; | |
459 | else if ((p->base + p->size) > memory_limit) | |
460 | p->size = memory_limit - p->base; | |
461 | ||
462 | if (p->size == 0) { | |
463 | lmb_remove_region(&lmb.reserved, i); | |
464 | i--; | |
465 | } | |
466 | } | |
7c8c6b97 | 467 | } |
f98eeb4e | 468 | |
e5f27095 | 469 | int __init lmb_is_reserved(u64 addr) |
f98eeb4e KG |
470 | { |
471 | int i; | |
472 | ||
473 | for (i = 0; i < lmb.reserved.cnt; i++) { | |
e5f27095 BB |
474 | u64 upper = lmb.reserved.region[i].base + |
475 | lmb.reserved.region[i].size - 1; | |
f98eeb4e KG |
476 | if ((addr >= lmb.reserved.region[i].base) && (addr <= upper)) |
477 | return 1; | |
478 | } | |
479 | return 0; | |
480 | } | |
9d88a2eb BP |
481 | |
482 | /* | |
483 | * Given a <base, len>, find which memory regions belong to this range. | |
484 | * Adjust the request and return a contiguous chunk. | |
485 | */ | |
486 | int lmb_find(struct lmb_property *res) | |
487 | { | |
488 | int i; | |
489 | u64 rstart, rend; | |
490 | ||
491 | rstart = res->base; | |
492 | rend = rstart + res->size - 1; | |
493 | ||
494 | for (i = 0; i < lmb.memory.cnt; i++) { | |
495 | u64 start = lmb.memory.region[i].base; | |
496 | u64 end = start + lmb.memory.region[i].size - 1; | |
497 | ||
498 | if (start > rend) | |
499 | return -1; | |
500 | ||
501 | if ((end >= rstart) && (start < rend)) { | |
502 | /* adjust the request */ | |
503 | if (rstart < start) | |
504 | rstart = start; | |
505 | if (rend > end) | |
506 | rend = end; | |
507 | res->base = rstart; | |
508 | res->size = rend - rstart + 1; | |
509 | return 0; | |
510 | } | |
511 | } | |
512 | return -1; | |
513 | } |