]>
Commit | Line | Data |
---|---|---|
83d290c5 | 1 | // SPDX-License-Identifier: GPL-2.0+ |
4ed6552f KG |
2 | /* |
3 | * Procedures for maintaining information about logical memory blocks. | |
4 | * | |
5 | * Peter Bergner, IBM Corp. June 2001. | |
6 | * Copyright (C) 2001 Peter Bergner. | |
4ed6552f KG |
7 | */ |
8 | ||
9 | #include <common.h> | |
4d72caa5 | 10 | #include <image.h> |
4ed6552f | 11 | #include <lmb.h> |
f7ae49fc | 12 | #include <log.h> |
336d4615 | 13 | #include <malloc.h> |
4ed6552f | 14 | |
1274698d MV |
15 | #include <asm/global_data.h> |
16 | ||
17 | DECLARE_GLOBAL_DATA_PTR; | |
18 | ||
4ed6552f KG |
19 | #define LMB_ALLOC_ANYWHERE 0 |
20 | ||
358c7789 | 21 | static void lmb_dump_region(struct lmb_region *rgn, char *name) |
4ed6552f | 22 | { |
358c7789 PD |
23 | unsigned long long base, size, end; |
24 | enum lmb_flags flags; | |
25 | int i; | |
4ed6552f | 26 | |
358c7789 | 27 | printf(" %s.cnt = 0x%lx\n", name, rgn->cnt); |
4ed6552f | 28 | |
358c7789 PD |
29 | for (i = 0; i < rgn->cnt; i++) { |
30 | base = rgn->region[i].base; | |
31 | size = rgn->region[i].size; | |
32 | end = base + size - 1; | |
33 | flags = rgn->region[i].flags; | |
34 | ||
35 | printf(" %s[%d]\t[0x%llx-0x%llx], 0x%08llx bytes flags: %x\n", | |
36 | name, i, base, end, size, flags); | |
4ed6552f | 37 | } |
9996cea7 TK |
38 | } |
39 | ||
358c7789 PD |
40 | void lmb_dump_all_force(struct lmb *lmb) |
41 | { | |
42 | printf("lmb_dump_all:\n"); | |
43 | lmb_dump_region(&lmb->memory, "memory"); | |
44 | lmb_dump_region(&lmb->reserved, "reserved"); | |
45 | } | |
46 | ||
9996cea7 TK |
47 | void lmb_dump_all(struct lmb *lmb) |
48 | { | |
49 | #ifdef DEBUG | |
50 | lmb_dump_all_force(lmb); | |
51 | #endif | |
4ed6552f KG |
52 | } |
53 | ||
e35d2a75 SG |
54 | static long lmb_addrs_overlap(phys_addr_t base1, phys_size_t size1, |
55 | phys_addr_t base2, phys_size_t size2) | |
4ed6552f | 56 | { |
d67f33cf SG |
57 | const phys_addr_t base1_end = base1 + size1 - 1; |
58 | const phys_addr_t base2_end = base2 + size2 - 1; | |
59 | ||
60 | return ((base1 <= base2_end) && (base2 <= base1_end)); | |
4ed6552f KG |
61 | } |
62 | ||
391fd93a | 63 | static long lmb_addrs_adjacent(phys_addr_t base1, phys_size_t size1, |
e35d2a75 | 64 | phys_addr_t base2, phys_size_t size2) |
4ed6552f KG |
65 | { |
66 | if (base2 == base1 + size1) | |
67 | return 1; | |
68 | else if (base1 == base2 + size2) | |
69 | return -1; | |
70 | ||
71 | return 0; | |
72 | } | |
73 | ||
e35d2a75 SG |
74 | static long lmb_regions_adjacent(struct lmb_region *rgn, unsigned long r1, |
75 | unsigned long r2) | |
4ed6552f | 76 | { |
391fd93a BB |
77 | phys_addr_t base1 = rgn->region[r1].base; |
78 | phys_size_t size1 = rgn->region[r1].size; | |
79 | phys_addr_t base2 = rgn->region[r2].base; | |
80 | phys_size_t size2 = rgn->region[r2].size; | |
4ed6552f KG |
81 | |
82 | return lmb_addrs_adjacent(base1, size1, base2, size2); | |
83 | } | |
84 | ||
85 | static void lmb_remove_region(struct lmb_region *rgn, unsigned long r) | |
86 | { | |
87 | unsigned long i; | |
88 | ||
89 | for (i = r; i < rgn->cnt - 1; i++) { | |
90 | rgn->region[i].base = rgn->region[i + 1].base; | |
91 | rgn->region[i].size = rgn->region[i + 1].size; | |
59c0ea5d | 92 | rgn->region[i].flags = rgn->region[i + 1].flags; |
4ed6552f KG |
93 | } |
94 | rgn->cnt--; | |
95 | } | |
96 | ||
97 | /* Assumption: base addr of region 1 < base addr of region 2 */ | |
e35d2a75 SG |
98 | static void lmb_coalesce_regions(struct lmb_region *rgn, unsigned long r1, |
99 | unsigned long r2) | |
4ed6552f KG |
100 | { |
101 | rgn->region[r1].size += rgn->region[r2].size; | |
102 | lmb_remove_region(rgn, r2); | |
103 | } | |
104 | ||
105 | void lmb_init(struct lmb *lmb) | |
106 | { | |
6d66502b | 107 | #if IS_ENABLED(CONFIG_LMB_USE_MAX_REGIONS) |
4fa0150d PD |
108 | lmb->memory.max = CONFIG_LMB_MAX_REGIONS; |
109 | lmb->reserved.max = CONFIG_LMB_MAX_REGIONS; | |
6d66502b PD |
110 | #else |
111 | lmb->memory.max = CONFIG_LMB_MEMORY_REGIONS; | |
112 | lmb->reserved.max = CONFIG_LMB_RESERVED_REGIONS; | |
113 | lmb->memory.region = lmb->memory_regions; | |
114 | lmb->reserved.region = lmb->reserved_regions; | |
115 | #endif | |
d67f33cf | 116 | lmb->memory.cnt = 0; |
d67f33cf | 117 | lmb->reserved.cnt = 0; |
4ed6552f KG |
118 | } |
119 | ||
1274698d MV |
120 | void arch_lmb_reserve_generic(struct lmb *lmb, ulong sp, ulong end, ulong align) |
121 | { | |
122 | ulong bank_end; | |
123 | int bank; | |
124 | ||
125 | /* | |
126 | * Reserve memory from aligned address below the bottom of U-Boot stack | |
127 | * until end of U-Boot area using LMB to prevent U-Boot from overwriting | |
128 | * that memory. | |
129 | */ | |
130 | debug("## Current stack ends at 0x%08lx ", sp); | |
131 | ||
132 | /* adjust sp by 4K to be safe */ | |
133 | sp -= align; | |
134 | for (bank = 0; bank < CONFIG_NR_DRAM_BANKS; bank++) { | |
135 | if (!gd->bd->bi_dram[bank].size || | |
136 | sp < gd->bd->bi_dram[bank].start) | |
137 | continue; | |
138 | /* Watch out for RAM at end of address space! */ | |
139 | bank_end = gd->bd->bi_dram[bank].start + | |
140 | gd->bd->bi_dram[bank].size - 1; | |
141 | if (sp > bank_end) | |
142 | continue; | |
143 | if (bank_end > end) | |
144 | bank_end = end - 1; | |
145 | ||
146 | lmb_reserve(lmb, sp, bank_end - sp + 1); | |
147 | break; | |
148 | } | |
149 | } | |
150 | ||
9cc2323f | 151 | static void lmb_reserve_common(struct lmb *lmb, void *fdt_blob) |
aa3c609e | 152 | { |
aa3c609e SG |
153 | arch_lmb_reserve(lmb); |
154 | board_lmb_reserve(lmb); | |
155 | ||
0c303f9a | 156 | if (CONFIG_IS_ENABLED(OF_LIBFDT) && fdt_blob) |
aa3c609e SG |
157 | boot_fdt_add_mem_rsv_regions(lmb, fdt_blob); |
158 | } | |
159 | ||
9cc2323f | 160 | /* Initialize the struct, add memory and call arch/board reserve functions */ |
b75d8dc5 | 161 | void lmb_init_and_reserve(struct lmb *lmb, struct bd_info *bd, void *fdt_blob) |
9cc2323f | 162 | { |
9cc2323f | 163 | int i; |
9cc2323f SG |
164 | |
165 | lmb_init(lmb); | |
dfaf6a57 | 166 | |
9cc2323f SG |
167 | for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) { |
168 | if (bd->bi_dram[i].size) { | |
169 | lmb_add(lmb, bd->bi_dram[i].start, | |
170 | bd->bi_dram[i].size); | |
171 | } | |
172 | } | |
dfaf6a57 | 173 | |
9cc2323f SG |
174 | lmb_reserve_common(lmb, fdt_blob); |
175 | } | |
176 | ||
177 | /* Initialize the struct, add memory and call arch/board reserve functions */ | |
178 | void lmb_init_and_reserve_range(struct lmb *lmb, phys_addr_t base, | |
179 | phys_size_t size, void *fdt_blob) | |
180 | { | |
181 | lmb_init(lmb); | |
182 | lmb_add(lmb, base, size); | |
183 | lmb_reserve_common(lmb, fdt_blob); | |
184 | } | |
185 | ||
4ed6552f | 186 | /* This routine called with relocation disabled. */ |
59c0ea5d PD |
187 | static long lmb_add_region_flags(struct lmb_region *rgn, phys_addr_t base, |
188 | phys_size_t size, enum lmb_flags flags) | |
4ed6552f KG |
189 | { |
190 | unsigned long coalesced = 0; | |
191 | long adjacent, i; | |
192 | ||
d67f33cf | 193 | if (rgn->cnt == 0) { |
4ed6552f KG |
194 | rgn->region[0].base = base; |
195 | rgn->region[0].size = size; | |
59c0ea5d | 196 | rgn->region[0].flags = flags; |
d67f33cf | 197 | rgn->cnt = 1; |
4ed6552f KG |
198 | return 0; |
199 | } | |
200 | ||
201 | /* First try and coalesce this LMB with another. */ | |
e35d2a75 | 202 | for (i = 0; i < rgn->cnt; i++) { |
391fd93a BB |
203 | phys_addr_t rgnbase = rgn->region[i].base; |
204 | phys_size_t rgnsize = rgn->region[i].size; | |
59c0ea5d PD |
205 | phys_size_t rgnflags = rgn->region[i].flags; |
206 | ||
207 | if (rgnbase == base && rgnsize == size) { | |
208 | if (flags == rgnflags) | |
209 | /* Already have this region, so we're done */ | |
210 | return 0; | |
211 | else | |
212 | return -1; /* regions with new flags */ | |
213 | } | |
4ed6552f | 214 | |
e35d2a75 SG |
215 | adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize); |
216 | if (adjacent > 0) { | |
59c0ea5d PD |
217 | if (flags != rgnflags) |
218 | break; | |
4ed6552f KG |
219 | rgn->region[i].base -= size; |
220 | rgn->region[i].size += size; | |
221 | coalesced++; | |
222 | break; | |
e35d2a75 | 223 | } else if (adjacent < 0) { |
59c0ea5d PD |
224 | if (flags != rgnflags) |
225 | break; | |
4ed6552f KG |
226 | rgn->region[i].size += size; |
227 | coalesced++; | |
228 | break; | |
0f7c51a6 SG |
229 | } else if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) { |
230 | /* regions overlap */ | |
231 | return -1; | |
4ed6552f KG |
232 | } |
233 | } | |
234 | ||
e35d2a75 | 235 | if ((i < rgn->cnt - 1) && lmb_regions_adjacent(rgn, i, i + 1)) { |
59c0ea5d PD |
236 | if (rgn->region[i].flags == rgn->region[i + 1].flags) { |
237 | lmb_coalesce_regions(rgn, i, i + 1); | |
238 | coalesced++; | |
239 | } | |
4ed6552f KG |
240 | } |
241 | ||
242 | if (coalesced) | |
243 | return coalesced; | |
00fd8dad | 244 | if (rgn->cnt >= rgn->max) |
4ed6552f KG |
245 | return -1; |
246 | ||
247 | /* Couldn't coalesce the LMB, so add it to the sorted table. */ | |
248 | for (i = rgn->cnt-1; i >= 0; i--) { | |
249 | if (base < rgn->region[i].base) { | |
e35d2a75 SG |
250 | rgn->region[i + 1].base = rgn->region[i].base; |
251 | rgn->region[i + 1].size = rgn->region[i].size; | |
59c0ea5d | 252 | rgn->region[i + 1].flags = rgn->region[i].flags; |
4ed6552f | 253 | } else { |
e35d2a75 SG |
254 | rgn->region[i + 1].base = base; |
255 | rgn->region[i + 1].size = size; | |
59c0ea5d | 256 | rgn->region[i + 1].flags = flags; |
4ed6552f KG |
257 | break; |
258 | } | |
259 | } | |
260 | ||
261 | if (base < rgn->region[0].base) { | |
262 | rgn->region[0].base = base; | |
263 | rgn->region[0].size = size; | |
59c0ea5d | 264 | rgn->region[0].flags = flags; |
4ed6552f KG |
265 | } |
266 | ||
267 | rgn->cnt++; | |
268 | ||
269 | return 0; | |
270 | } | |
271 | ||
59c0ea5d PD |
272 | static long lmb_add_region(struct lmb_region *rgn, phys_addr_t base, |
273 | phys_size_t size) | |
274 | { | |
275 | return lmb_add_region_flags(rgn, base, size, LMB_NONE); | |
276 | } | |
277 | ||
4ed6552f | 278 | /* This routine may be called with relocation disabled. */ |
391fd93a | 279 | long lmb_add(struct lmb *lmb, phys_addr_t base, phys_size_t size) |
4ed6552f KG |
280 | { |
281 | struct lmb_region *_rgn = &(lmb->memory); | |
282 | ||
283 | return lmb_add_region(_rgn, base, size); | |
284 | } | |
285 | ||
98874ff3 | 286 | long lmb_free(struct lmb *lmb, phys_addr_t base, phys_size_t size) |
63796c4e AF |
287 | { |
288 | struct lmb_region *rgn = &(lmb->reserved); | |
98874ff3 | 289 | phys_addr_t rgnbegin, rgnend; |
d67f33cf | 290 | phys_addr_t end = base + size - 1; |
63796c4e AF |
291 | int i; |
292 | ||
293 | rgnbegin = rgnend = 0; /* supress gcc warnings */ | |
294 | ||
295 | /* Find the region where (base, size) belongs to */ | |
e35d2a75 | 296 | for (i = 0; i < rgn->cnt; i++) { |
63796c4e | 297 | rgnbegin = rgn->region[i].base; |
d67f33cf | 298 | rgnend = rgnbegin + rgn->region[i].size - 1; |
63796c4e AF |
299 | |
300 | if ((rgnbegin <= base) && (end <= rgnend)) | |
301 | break; | |
302 | } | |
303 | ||
304 | /* Didn't find the region */ | |
305 | if (i == rgn->cnt) | |
306 | return -1; | |
307 | ||
308 | /* Check to see if we are removing entire region */ | |
309 | if ((rgnbegin == base) && (rgnend == end)) { | |
310 | lmb_remove_region(rgn, i); | |
311 | return 0; | |
312 | } | |
313 | ||
314 | /* Check to see if region is matching at the front */ | |
315 | if (rgnbegin == base) { | |
d67f33cf | 316 | rgn->region[i].base = end + 1; |
63796c4e AF |
317 | rgn->region[i].size -= size; |
318 | return 0; | |
319 | } | |
320 | ||
321 | /* Check to see if the region is matching at the end */ | |
322 | if (rgnend == end) { | |
323 | rgn->region[i].size -= size; | |
324 | return 0; | |
325 | } | |
326 | ||
327 | /* | |
328 | * We need to split the entry - adjust the current one to the | |
329 | * beginging of the hole and add the region after hole. | |
330 | */ | |
331 | rgn->region[i].size = base - rgn->region[i].base; | |
59c0ea5d PD |
332 | return lmb_add_region_flags(rgn, end + 1, rgnend - end, |
333 | rgn->region[i].flags); | |
63796c4e AF |
334 | } |
335 | ||
59c0ea5d PD |
336 | long lmb_reserve_flags(struct lmb *lmb, phys_addr_t base, phys_size_t size, |
337 | enum lmb_flags flags) | |
4ed6552f KG |
338 | { |
339 | struct lmb_region *_rgn = &(lmb->reserved); | |
340 | ||
59c0ea5d PD |
341 | return lmb_add_region_flags(_rgn, base, size, flags); |
342 | } | |
343 | ||
344 | long lmb_reserve(struct lmb *lmb, phys_addr_t base, phys_size_t size) | |
345 | { | |
346 | return lmb_reserve_flags(lmb, base, size, LMB_NONE); | |
4ed6552f KG |
347 | } |
348 | ||
750a6ff4 | 349 | static long lmb_overlaps_region(struct lmb_region *rgn, phys_addr_t base, |
391fd93a | 350 | phys_size_t size) |
4ed6552f KG |
351 | { |
352 | unsigned long i; | |
353 | ||
e35d2a75 | 354 | for (i = 0; i < rgn->cnt; i++) { |
391fd93a BB |
355 | phys_addr_t rgnbase = rgn->region[i].base; |
356 | phys_size_t rgnsize = rgn->region[i].size; | |
e35d2a75 | 357 | if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) |
4ed6552f | 358 | break; |
4ed6552f KG |
359 | } |
360 | ||
361 | return (i < rgn->cnt) ? i : -1; | |
362 | } | |
363 | ||
391fd93a | 364 | phys_addr_t lmb_alloc(struct lmb *lmb, phys_size_t size, ulong align) |
4ed6552f KG |
365 | { |
366 | return lmb_alloc_base(lmb, size, align, LMB_ALLOC_ANYWHERE); | |
367 | } | |
368 | ||
391fd93a | 369 | phys_addr_t lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_addr_t max_addr) |
4ed6552f | 370 | { |
391fd93a | 371 | phys_addr_t alloc; |
4ed6552f KG |
372 | |
373 | alloc = __lmb_alloc_base(lmb, size, align, max_addr); | |
374 | ||
375 | if (alloc == 0) | |
376 | printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n", | |
e35d2a75 | 377 | (ulong)size, (ulong)max_addr); |
4ed6552f KG |
378 | |
379 | return alloc; | |
380 | } | |
381 | ||
391fd93a | 382 | static phys_addr_t lmb_align_down(phys_addr_t addr, phys_size_t size) |
4ed6552f KG |
383 | { |
384 | return addr & ~(size - 1); | |
385 | } | |
386 | ||
391fd93a | 387 | phys_addr_t __lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_addr_t max_addr) |
4ed6552f | 388 | { |
e35d2a75 | 389 | long i, rgn; |
391fd93a | 390 | phys_addr_t base = 0; |
7570a994 | 391 | phys_addr_t res_base; |
4ed6552f | 392 | |
e35d2a75 | 393 | for (i = lmb->memory.cnt - 1; i >= 0; i--) { |
391fd93a BB |
394 | phys_addr_t lmbbase = lmb->memory.region[i].base; |
395 | phys_size_t lmbsize = lmb->memory.region[i].size; | |
4ed6552f | 396 | |
7570a994 AF |
397 | if (lmbsize < size) |
398 | continue; | |
4ed6552f KG |
399 | if (max_addr == LMB_ALLOC_ANYWHERE) |
400 | base = lmb_align_down(lmbbase + lmbsize - size, align); | |
401 | else if (lmbbase < max_addr) { | |
ad3fda52 SW |
402 | base = lmbbase + lmbsize; |
403 | if (base < lmbbase) | |
404 | base = -1; | |
405 | base = min(base, max_addr); | |
4ed6552f KG |
406 | base = lmb_align_down(base - size, align); |
407 | } else | |
408 | continue; | |
409 | ||
7570a994 | 410 | while (base && lmbbase <= base) { |
e35d2a75 SG |
411 | rgn = lmb_overlaps_region(&lmb->reserved, base, size); |
412 | if (rgn < 0) { | |
7570a994 AF |
413 | /* This area isn't reserved, take it */ |
414 | if (lmb_add_region(&lmb->reserved, base, | |
0f7c51a6 | 415 | size) < 0) |
7570a994 AF |
416 | return 0; |
417 | return base; | |
418 | } | |
e35d2a75 | 419 | res_base = lmb->reserved.region[rgn].base; |
7570a994 AF |
420 | if (res_base < size) |
421 | break; | |
422 | base = lmb_align_down(res_base - size, align); | |
423 | } | |
4ed6552f | 424 | } |
7570a994 | 425 | return 0; |
4ed6552f KG |
426 | } |
427 | ||
4cc8af80 SG |
428 | /* |
429 | * Try to allocate a specific address range: must be in defined memory but not | |
430 | * reserved | |
431 | */ | |
432 | phys_addr_t lmb_alloc_addr(struct lmb *lmb, phys_addr_t base, phys_size_t size) | |
433 | { | |
e35d2a75 | 434 | long rgn; |
4cc8af80 SG |
435 | |
436 | /* Check if the requested address is in one of the memory regions */ | |
e35d2a75 SG |
437 | rgn = lmb_overlaps_region(&lmb->memory, base, size); |
438 | if (rgn >= 0) { | |
4cc8af80 SG |
439 | /* |
440 | * Check if the requested end address is in the same memory | |
441 | * region we found. | |
442 | */ | |
e35d2a75 SG |
443 | if (lmb_addrs_overlap(lmb->memory.region[rgn].base, |
444 | lmb->memory.region[rgn].size, | |
445 | base + size - 1, 1)) { | |
4cc8af80 SG |
446 | /* ok, reserve the memory */ |
447 | if (lmb_reserve(lmb, base, size) >= 0) | |
448 | return base; | |
449 | } | |
450 | } | |
451 | return 0; | |
452 | } | |
453 | ||
454 | /* Return number of bytes from a given address that are free */ | |
65304aad | 455 | phys_size_t lmb_get_free_size(struct lmb *lmb, phys_addr_t addr) |
4cc8af80 SG |
456 | { |
457 | int i; | |
e35d2a75 | 458 | long rgn; |
4cc8af80 SG |
459 | |
460 | /* check if the requested address is in the memory regions */ | |
e35d2a75 SG |
461 | rgn = lmb_overlaps_region(&lmb->memory, addr, 1); |
462 | if (rgn >= 0) { | |
4cc8af80 SG |
463 | for (i = 0; i < lmb->reserved.cnt; i++) { |
464 | if (addr < lmb->reserved.region[i].base) { | |
465 | /* first reserved range > requested address */ | |
466 | return lmb->reserved.region[i].base - addr; | |
467 | } | |
468 | if (lmb->reserved.region[i].base + | |
469 | lmb->reserved.region[i].size > addr) { | |
470 | /* requested addr is in this reserved range */ | |
471 | return 0; | |
472 | } | |
473 | } | |
474 | /* if we come here: no reserved ranges above requested addr */ | |
475 | return lmb->memory.region[lmb->memory.cnt - 1].base + | |
476 | lmb->memory.region[lmb->memory.cnt - 1].size - addr; | |
477 | } | |
478 | return 0; | |
479 | } | |
480 | ||
e359a4a5 | 481 | int lmb_is_reserved_flags(struct lmb *lmb, phys_addr_t addr, int flags) |
4ed6552f KG |
482 | { |
483 | int i; | |
484 | ||
485 | for (i = 0; i < lmb->reserved.cnt; i++) { | |
391fd93a | 486 | phys_addr_t upper = lmb->reserved.region[i].base + |
4ed6552f KG |
487 | lmb->reserved.region[i].size - 1; |
488 | if ((addr >= lmb->reserved.region[i].base) && (addr <= upper)) | |
e359a4a5 | 489 | return (lmb->reserved.region[i].flags & flags) == flags; |
4ed6552f KG |
490 | } |
491 | return 0; | |
492 | } | |
a16028da | 493 | |
e359a4a5 PD |
494 | int lmb_is_reserved(struct lmb *lmb, phys_addr_t addr) |
495 | { | |
496 | return lmb_is_reserved_flags(lmb, addr, LMB_NONE); | |
497 | } | |
498 | ||
2c34f3f5 | 499 | __weak void board_lmb_reserve(struct lmb *lmb) |
a16028da MF |
500 | { |
501 | /* please define platform specific board_lmb_reserve() */ | |
502 | } | |
a16028da | 503 | |
2c34f3f5 | 504 | __weak void arch_lmb_reserve(struct lmb *lmb) |
a16028da MF |
505 | { |
506 | /* please define platform specific arch_lmb_reserve() */ | |
507 | } |