]>
Commit | Line | Data |
---|---|---|
5d00995c AG |
1 | /* |
2 | * EFI application memory management | |
3 | * | |
4 | * Copyright (c) 2016 Alexander Graf | |
5 | * | |
6 | * SPDX-License-Identifier: GPL-2.0+ | |
7 | */ | |
8 | ||
5d00995c AG |
9 | #include <common.h> |
10 | #include <efi_loader.h> | |
11 | #include <malloc.h> | |
12 | #include <asm/global_data.h> | |
13 | #include <libfdt_env.h> | |
38ce65e1 | 14 | #include <linux/list_sort.h> |
5d00995c AG |
15 | #include <inttypes.h> |
16 | #include <watchdog.h> | |
17 | ||
18 | DECLARE_GLOBAL_DATA_PTR; | |
19 | ||
20 | struct efi_mem_list { | |
21 | struct list_head link; | |
22 | struct efi_mem_desc desc; | |
23 | }; | |
24 | ||
74c16acc AG |
25 | #define EFI_CARVE_NO_OVERLAP -1 |
26 | #define EFI_CARVE_LOOP_AGAIN -2 | |
27 | #define EFI_CARVE_OVERLAPS_NONRAM -3 | |
28 | ||
5d00995c AG |
29 | /* This list contains all memory map items */ |
30 | LIST_HEAD(efi_mem); | |
31 | ||
51735ae0 AG |
32 | #ifdef CONFIG_EFI_LOADER_BOUNCE_BUFFER |
33 | void *efi_bounce_buffer; | |
34 | #endif | |
35 | ||
42417bc8 SB |
36 | /* |
37 | * U-Boot services each EFI AllocatePool request as a separate | |
38 | * (multiple) page allocation. We have to track the number of pages | |
39 | * to be able to free the correct amount later. | |
40 | * EFI requires 8 byte alignment for pool allocations, so we can | |
41 | * prepend each allocation with an 64 bit header tracking the | |
42 | * allocation size, and hand out the remainder to the caller. | |
43 | */ | |
44 | struct efi_pool_allocation { | |
45 | u64 num_pages; | |
946160f3 | 46 | char data[] __aligned(ARCH_DMA_MINALIGN); |
42417bc8 SB |
47 | }; |
48 | ||
38ce65e1 AG |
49 | /* |
50 | * Sorts the memory list from highest address to lowest address | |
51 | * | |
52 | * When allocating memory we should always start from the highest | |
53 | * address chunk, so sort the memory list such that the first list | |
54 | * iterator gets the highest address and goes lower from there. | |
55 | */ | |
56 | static int efi_mem_cmp(void *priv, struct list_head *a, struct list_head *b) | |
57 | { | |
58 | struct efi_mem_list *mema = list_entry(a, struct efi_mem_list, link); | |
59 | struct efi_mem_list *memb = list_entry(b, struct efi_mem_list, link); | |
60 | ||
61 | if (mema->desc.physical_start == memb->desc.physical_start) | |
62 | return 0; | |
63 | else if (mema->desc.physical_start < memb->desc.physical_start) | |
64 | return 1; | |
65 | else | |
66 | return -1; | |
67 | } | |
68 | ||
69 | static void efi_mem_sort(void) | |
70 | { | |
71 | list_sort(NULL, &efi_mem, efi_mem_cmp); | |
72 | } | |
73 | ||
5d00995c AG |
74 | /* |
75 | * Unmaps all memory occupied by the carve_desc region from the | |
76 | * list entry pointed to by map. | |
77 | * | |
852efbf5 SB |
78 | * Returns EFI_CARVE_NO_OVERLAP if the regions don't overlap. |
79 | * Returns EFI_CARVE_OVERLAPS_NONRAM if the carve and map overlap, | |
80 | * and the map contains anything but free ram. | |
81 | * (only when overlap_only_ram is true) | |
82 | * Returns EFI_CARVE_LOOP_AGAIN if the mapping list should be traversed | |
83 | * again, as it has been altered | |
84 | * Returns the number of overlapping pages. The pages are removed from | |
85 | * the mapping list. | |
86 | * | |
87 | * In case of EFI_CARVE_OVERLAPS_NONRAM it is the callers responsibility | |
88 | * to readd the already carved out pages to the mapping. | |
5d00995c AG |
89 | */ |
90 | static int efi_mem_carve_out(struct efi_mem_list *map, | |
91 | struct efi_mem_desc *carve_desc, | |
92 | bool overlap_only_ram) | |
93 | { | |
94 | struct efi_mem_list *newmap; | |
95 | struct efi_mem_desc *map_desc = &map->desc; | |
96 | uint64_t map_start = map_desc->physical_start; | |
97 | uint64_t map_end = map_start + (map_desc->num_pages << EFI_PAGE_SHIFT); | |
98 | uint64_t carve_start = carve_desc->physical_start; | |
99 | uint64_t carve_end = carve_start + | |
100 | (carve_desc->num_pages << EFI_PAGE_SHIFT); | |
101 | ||
102 | /* check whether we're overlapping */ | |
103 | if ((carve_end <= map_start) || (carve_start >= map_end)) | |
74c16acc | 104 | return EFI_CARVE_NO_OVERLAP; |
5d00995c AG |
105 | |
106 | /* We're overlapping with non-RAM, warn the caller if desired */ | |
107 | if (overlap_only_ram && (map_desc->type != EFI_CONVENTIONAL_MEMORY)) | |
74c16acc | 108 | return EFI_CARVE_OVERLAPS_NONRAM; |
5d00995c AG |
109 | |
110 | /* Sanitize carve_start and carve_end to lie within our bounds */ | |
111 | carve_start = max(carve_start, map_start); | |
112 | carve_end = min(carve_end, map_end); | |
113 | ||
114 | /* Carving at the beginning of our map? Just move it! */ | |
115 | if (carve_start == map_start) { | |
116 | if (map_end == carve_end) { | |
117 | /* Full overlap, just remove map */ | |
118 | list_del(&map->link); | |
511d0b97 SB |
119 | free(map); |
120 | } else { | |
121 | map->desc.physical_start = carve_end; | |
122 | map->desc.num_pages = (map_end - carve_end) | |
123 | >> EFI_PAGE_SHIFT; | |
5d00995c AG |
124 | } |
125 | ||
74c16acc | 126 | return (carve_end - carve_start) >> EFI_PAGE_SHIFT; |
5d00995c AG |
127 | } |
128 | ||
129 | /* | |
130 | * Overlapping maps, just split the list map at carve_start, | |
131 | * it will get moved or removed in the next iteration. | |
132 | * | |
133 | * [ map_desc |__carve_start__| newmap ] | |
134 | */ | |
135 | ||
136 | /* Create a new map from [ carve_start ... map_end ] */ | |
137 | newmap = calloc(1, sizeof(*newmap)); | |
138 | newmap->desc = map->desc; | |
139 | newmap->desc.physical_start = carve_start; | |
140 | newmap->desc.num_pages = (map_end - carve_start) >> EFI_PAGE_SHIFT; | |
b6a95172 SB |
141 | /* Insert before current entry (descending address order) */ |
142 | list_add_tail(&newmap->link, &map->link); | |
5d00995c AG |
143 | |
144 | /* Shrink the map to [ map_start ... carve_start ] */ | |
145 | map_desc->num_pages = (carve_start - map_start) >> EFI_PAGE_SHIFT; | |
146 | ||
74c16acc | 147 | return EFI_CARVE_LOOP_AGAIN; |
5d00995c AG |
148 | } |
149 | ||
150 | uint64_t efi_add_memory_map(uint64_t start, uint64_t pages, int memory_type, | |
151 | bool overlap_only_ram) | |
152 | { | |
153 | struct list_head *lhandle; | |
154 | struct efi_mem_list *newlist; | |
74c16acc AG |
155 | bool carve_again; |
156 | uint64_t carved_pages = 0; | |
5d00995c | 157 | |
c933ed94 AF |
158 | debug("%s: 0x%" PRIx64 " 0x%" PRIx64 " %d %s\n", __func__, |
159 | start, pages, memory_type, overlap_only_ram ? "yes" : "no"); | |
160 | ||
5d00995c AG |
161 | if (!pages) |
162 | return start; | |
163 | ||
164 | newlist = calloc(1, sizeof(*newlist)); | |
165 | newlist->desc.type = memory_type; | |
166 | newlist->desc.physical_start = start; | |
167 | newlist->desc.virtual_start = start; | |
168 | newlist->desc.num_pages = pages; | |
169 | ||
170 | switch (memory_type) { | |
171 | case EFI_RUNTIME_SERVICES_CODE: | |
172 | case EFI_RUNTIME_SERVICES_DATA: | |
173 | newlist->desc.attribute = (1 << EFI_MEMORY_WB_SHIFT) | | |
174 | (1ULL << EFI_MEMORY_RUNTIME_SHIFT); | |
175 | break; | |
176 | case EFI_MMAP_IO: | |
177 | newlist->desc.attribute = 1ULL << EFI_MEMORY_RUNTIME_SHIFT; | |
178 | break; | |
179 | default: | |
180 | newlist->desc.attribute = 1 << EFI_MEMORY_WB_SHIFT; | |
181 | break; | |
182 | } | |
183 | ||
184 | /* Add our new map */ | |
185 | do { | |
74c16acc | 186 | carve_again = false; |
5d00995c AG |
187 | list_for_each(lhandle, &efi_mem) { |
188 | struct efi_mem_list *lmem; | |
189 | int r; | |
190 | ||
191 | lmem = list_entry(lhandle, struct efi_mem_list, link); | |
192 | r = efi_mem_carve_out(lmem, &newlist->desc, | |
193 | overlap_only_ram); | |
74c16acc AG |
194 | switch (r) { |
195 | case EFI_CARVE_OVERLAPS_NONRAM: | |
196 | /* | |
197 | * The user requested to only have RAM overlaps, | |
198 | * but we hit a non-RAM region. Error out. | |
199 | */ | |
5d00995c | 200 | return 0; |
74c16acc AG |
201 | case EFI_CARVE_NO_OVERLAP: |
202 | /* Just ignore this list entry */ | |
203 | break; | |
204 | case EFI_CARVE_LOOP_AGAIN: | |
205 | /* | |
206 | * We split an entry, but need to loop through | |
207 | * the list again to actually carve it. | |
208 | */ | |
209 | carve_again = true; | |
210 | break; | |
211 | default: | |
212 | /* We carved a number of pages */ | |
213 | carved_pages += r; | |
214 | carve_again = true; | |
215 | break; | |
216 | } | |
217 | ||
218 | if (carve_again) { | |
219 | /* The list changed, we need to start over */ | |
5d00995c AG |
220 | break; |
221 | } | |
222 | } | |
74c16acc AG |
223 | } while (carve_again); |
224 | ||
225 | if (overlap_only_ram && (carved_pages != pages)) { | |
226 | /* | |
227 | * The payload wanted to have RAM overlaps, but we overlapped | |
228 | * with an unallocated region. Error out. | |
229 | */ | |
230 | return 0; | |
231 | } | |
5d00995c AG |
232 | |
233 | /* Add our new map */ | |
234 | list_add_tail(&newlist->link, &efi_mem); | |
235 | ||
38ce65e1 AG |
236 | /* And make sure memory is listed in descending order */ |
237 | efi_mem_sort(); | |
238 | ||
5d00995c AG |
239 | return start; |
240 | } | |
241 | ||
242 | static uint64_t efi_find_free_memory(uint64_t len, uint64_t max_addr) | |
243 | { | |
244 | struct list_head *lhandle; | |
245 | ||
246 | list_for_each(lhandle, &efi_mem) { | |
247 | struct efi_mem_list *lmem = list_entry(lhandle, | |
248 | struct efi_mem_list, link); | |
249 | struct efi_mem_desc *desc = &lmem->desc; | |
250 | uint64_t desc_len = desc->num_pages << EFI_PAGE_SHIFT; | |
251 | uint64_t desc_end = desc->physical_start + desc_len; | |
252 | uint64_t curmax = min(max_addr, desc_end); | |
253 | uint64_t ret = curmax - len; | |
254 | ||
255 | /* We only take memory from free RAM */ | |
256 | if (desc->type != EFI_CONVENTIONAL_MEMORY) | |
257 | continue; | |
258 | ||
259 | /* Out of bounds for max_addr */ | |
260 | if ((ret + len) > max_addr) | |
261 | continue; | |
262 | ||
263 | /* Out of bounds for upper map limit */ | |
264 | if ((ret + len) > desc_end) | |
265 | continue; | |
266 | ||
267 | /* Out of bounds for lower map limit */ | |
268 | if (ret < desc->physical_start) | |
269 | continue; | |
270 | ||
271 | /* Return the highest address in this map within bounds */ | |
272 | return ret; | |
273 | } | |
274 | ||
275 | return 0; | |
276 | } | |
277 | ||
278 | efi_status_t efi_allocate_pages(int type, int memory_type, | |
f5a2a938 | 279 | efi_uintn_t pages, uint64_t *memory) |
5d00995c AG |
280 | { |
281 | u64 len = pages << EFI_PAGE_SHIFT; | |
282 | efi_status_t r = EFI_SUCCESS; | |
283 | uint64_t addr; | |
284 | ||
285 | switch (type) { | |
286 | case 0: | |
287 | /* Any page */ | |
dede284d | 288 | addr = efi_find_free_memory(len, gd->start_addr_sp); |
5d00995c AG |
289 | if (!addr) { |
290 | r = EFI_NOT_FOUND; | |
291 | break; | |
292 | } | |
293 | break; | |
294 | case 1: | |
295 | /* Max address */ | |
296 | addr = efi_find_free_memory(len, *memory); | |
297 | if (!addr) { | |
298 | r = EFI_NOT_FOUND; | |
299 | break; | |
300 | } | |
301 | break; | |
302 | case 2: | |
303 | /* Exact address, reserve it. The addr is already in *memory. */ | |
304 | addr = *memory; | |
305 | break; | |
306 | default: | |
307 | /* UEFI doesn't specify other allocation types */ | |
308 | r = EFI_INVALID_PARAMETER; | |
309 | break; | |
310 | } | |
311 | ||
312 | if (r == EFI_SUCCESS) { | |
313 | uint64_t ret; | |
314 | ||
315 | /* Reserve that map in our memory maps */ | |
316 | ret = efi_add_memory_map(addr, pages, memory_type, true); | |
317 | if (ret == addr) { | |
318 | *memory = addr; | |
319 | } else { | |
320 | /* Map would overlap, bail out */ | |
321 | r = EFI_OUT_OF_RESOURCES; | |
322 | } | |
323 | } | |
324 | ||
325 | return r; | |
326 | } | |
327 | ||
328 | void *efi_alloc(uint64_t len, int memory_type) | |
329 | { | |
330 | uint64_t ret = 0; | |
331 | uint64_t pages = (len + EFI_PAGE_MASK) >> EFI_PAGE_SHIFT; | |
332 | efi_status_t r; | |
333 | ||
334 | r = efi_allocate_pages(0, memory_type, pages, &ret); | |
335 | if (r == EFI_SUCCESS) | |
336 | return (void*)(uintptr_t)ret; | |
337 | ||
338 | return NULL; | |
339 | } | |
340 | ||
f5a2a938 | 341 | efi_status_t efi_free_pages(uint64_t memory, efi_uintn_t pages) |
5d00995c | 342 | { |
b61d857b SB |
343 | uint64_t r = 0; |
344 | ||
345 | r = efi_add_memory_map(memory, pages, EFI_CONVENTIONAL_MEMORY, false); | |
346 | /* Merging of adjacent free regions is missing */ | |
347 | ||
348 | if (r == memory) | |
349 | return EFI_SUCCESS; | |
350 | ||
351 | return EFI_NOT_FOUND; | |
5d00995c AG |
352 | } |
353 | ||
f5a2a938 | 354 | efi_status_t efi_allocate_pool(int pool_type, efi_uintn_t size, |
ead1274b SB |
355 | void **buffer) |
356 | { | |
357 | efi_status_t r; | |
358 | efi_physical_addr_t t; | |
946160f3 RC |
359 | u64 num_pages = (size + sizeof(struct efi_pool_allocation) + |
360 | EFI_PAGE_MASK) >> EFI_PAGE_SHIFT; | |
42417bc8 SB |
361 | |
362 | if (size == 0) { | |
363 | *buffer = NULL; | |
364 | return EFI_SUCCESS; | |
365 | } | |
ead1274b SB |
366 | |
367 | r = efi_allocate_pages(0, pool_type, num_pages, &t); | |
42417bc8 SB |
368 | |
369 | if (r == EFI_SUCCESS) { | |
370 | struct efi_pool_allocation *alloc = (void *)(uintptr_t)t; | |
371 | alloc->num_pages = num_pages; | |
372 | *buffer = alloc->data; | |
373 | } | |
374 | ||
375 | return r; | |
376 | } | |
377 | ||
378 | efi_status_t efi_free_pool(void *buffer) | |
379 | { | |
380 | efi_status_t r; | |
381 | struct efi_pool_allocation *alloc; | |
382 | ||
71275a3e HS |
383 | if (buffer == NULL) |
384 | return EFI_INVALID_PARAMETER; | |
385 | ||
42417bc8 SB |
386 | alloc = container_of(buffer, struct efi_pool_allocation, data); |
387 | /* Sanity check, was the supplied address returned by allocate_pool */ | |
388 | assert(((uintptr_t)alloc & EFI_PAGE_MASK) == 0); | |
389 | ||
390 | r = efi_free_pages((uintptr_t)alloc, alloc->num_pages); | |
ead1274b SB |
391 | |
392 | return r; | |
393 | } | |
394 | ||
f5a2a938 HS |
395 | efi_status_t efi_get_memory_map(efi_uintn_t *memory_map_size, |
396 | struct efi_mem_desc *memory_map, | |
397 | efi_uintn_t *map_key, | |
398 | efi_uintn_t *descriptor_size, | |
399 | uint32_t *descriptor_version) | |
5d00995c | 400 | { |
f5a2a938 | 401 | efi_uintn_t map_size = 0; |
cee752fa | 402 | int map_entries = 0; |
5d00995c | 403 | struct list_head *lhandle; |
f5a2a938 | 404 | efi_uintn_t provided_map_size = *memory_map_size; |
5d00995c AG |
405 | |
406 | list_for_each(lhandle, &efi_mem) | |
cee752fa AG |
407 | map_entries++; |
408 | ||
409 | map_size = map_entries * sizeof(struct efi_mem_desc); | |
5d00995c | 410 | |
a1b24823 RC |
411 | *memory_map_size = map_size; |
412 | ||
0ecba5db HS |
413 | if (provided_map_size < map_size) |
414 | return EFI_BUFFER_TOO_SMALL; | |
415 | ||
5d00995c AG |
416 | if (descriptor_size) |
417 | *descriptor_size = sizeof(struct efi_mem_desc); | |
418 | ||
4c02c11d MYK |
419 | if (descriptor_version) |
420 | *descriptor_version = EFI_MEMORY_DESCRIPTOR_VERSION; | |
421 | ||
5d00995c AG |
422 | /* Copy list into array */ |
423 | if (memory_map) { | |
cee752fa AG |
424 | /* Return the list in ascending order */ |
425 | memory_map = &memory_map[map_entries - 1]; | |
5d00995c AG |
426 | list_for_each(lhandle, &efi_mem) { |
427 | struct efi_mem_list *lmem; | |
428 | ||
429 | lmem = list_entry(lhandle, struct efi_mem_list, link); | |
430 | *memory_map = lmem->desc; | |
cee752fa | 431 | memory_map--; |
5d00995c AG |
432 | } |
433 | } | |
434 | ||
c6e3c3e6 HS |
435 | *map_key = 0; |
436 | ||
5d00995c AG |
437 | return EFI_SUCCESS; |
438 | } | |
439 | ||
42633745 | 440 | __weak void efi_add_known_memory(void) |
5d00995c | 441 | { |
5d00995c AG |
442 | int i; |
443 | ||
444 | /* Add RAM */ | |
445 | for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) { | |
446 | u64 ram_start = gd->bd->bi_dram[i].start; | |
447 | u64 ram_size = gd->bd->bi_dram[i].size; | |
448 | u64 start = (ram_start + EFI_PAGE_MASK) & ~EFI_PAGE_MASK; | |
449 | u64 pages = (ram_size + EFI_PAGE_MASK) >> EFI_PAGE_SHIFT; | |
450 | ||
451 | efi_add_memory_map(start, pages, EFI_CONVENTIONAL_MEMORY, | |
452 | false); | |
453 | } | |
42633745 YS |
454 | } |
455 | ||
456 | int efi_memory_init(void) | |
457 | { | |
458 | unsigned long runtime_start, runtime_end, runtime_pages; | |
459 | unsigned long uboot_start, uboot_pages; | |
460 | unsigned long uboot_stack_size = 16 * 1024 * 1024; | |
461 | ||
462 | efi_add_known_memory(); | |
5d00995c AG |
463 | |
464 | /* Add U-Boot */ | |
465 | uboot_start = (gd->start_addr_sp - uboot_stack_size) & ~EFI_PAGE_MASK; | |
466 | uboot_pages = (gd->ram_top - uboot_start) >> EFI_PAGE_SHIFT; | |
467 | efi_add_memory_map(uboot_start, uboot_pages, EFI_LOADER_DATA, false); | |
468 | ||
469 | /* Add Runtime Services */ | |
470 | runtime_start = (ulong)&__efi_runtime_start & ~EFI_PAGE_MASK; | |
471 | runtime_end = (ulong)&__efi_runtime_stop; | |
472 | runtime_end = (runtime_end + EFI_PAGE_MASK) & ~EFI_PAGE_MASK; | |
473 | runtime_pages = (runtime_end - runtime_start) >> EFI_PAGE_SHIFT; | |
474 | efi_add_memory_map(runtime_start, runtime_pages, | |
475 | EFI_RUNTIME_SERVICES_CODE, false); | |
476 | ||
51735ae0 AG |
477 | #ifdef CONFIG_EFI_LOADER_BOUNCE_BUFFER |
478 | /* Request a 32bit 64MB bounce buffer region */ | |
479 | uint64_t efi_bounce_buffer_addr = 0xffffffff; | |
480 | ||
481 | if (efi_allocate_pages(1, EFI_LOADER_DATA, | |
482 | (64 * 1024 * 1024) >> EFI_PAGE_SHIFT, | |
483 | &efi_bounce_buffer_addr) != EFI_SUCCESS) | |
484 | return -1; | |
485 | ||
486 | efi_bounce_buffer = (void*)(uintptr_t)efi_bounce_buffer_addr; | |
487 | #endif | |
488 | ||
5d00995c AG |
489 | return 0; |
490 | } |