]> Git Repo - linux.git/blame - drivers/firmware/efi/memmap.c
efi: memmap: Move EFI fake memmap support into x86 arch tree
[linux.git] / drivers / firmware / efi / memmap.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
60863c0d
MF
2/*
3 * Common EFI memory map functions.
4 */
5
6#define pr_fmt(fmt) "efi: " fmt
7
8#include <linux/init.h>
9#include <linux/kernel.h>
10#include <linux/efi.h>
11#include <linux/io.h>
12#include <asm/early_ioremap.h>
20b1e22d
NS
13#include <linux/memblock.h>
14#include <linux/slab.h>
15
16static phys_addr_t __init __efi_memmap_alloc_early(unsigned long size)
17{
7e1c4e27 18 return memblock_phys_alloc(size, SMP_CACHE_BYTES);
20b1e22d
NS
19}
20
21static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size)
22{
23 unsigned int order = get_order(size);
24 struct page *p = alloc_pages(GFP_KERNEL, order);
25
26 if (!p)
27 return 0;
28
29 return PFN_PHYS(page_to_pfn(p));
30}
31
484a418d 32void __init __efi_memmap_free(u64 phys, unsigned long size, unsigned long flags)
f0ef6523
DW
33{
34 if (flags & EFI_MEMMAP_MEMBLOCK) {
35 if (slab_is_available())
36 memblock_free_late(phys, size);
37 else
3ecc6834 38 memblock_phys_free(phys, size);
f0ef6523
DW
39 } else if (flags & EFI_MEMMAP_SLAB) {
40 struct page *p = pfn_to_page(PHYS_PFN(phys));
41 unsigned int order = get_order(size);
42
43 free_pages((unsigned long) page_address(p), order);
44 }
45}
46
47static void __init efi_memmap_free(void)
48{
49 __efi_memmap_free(efi.memmap.phys_map,
50 efi.memmap.desc_size * efi.memmap.nr_map,
51 efi.memmap.flags);
52}
53
20b1e22d
NS
54/**
55 * efi_memmap_alloc - Allocate memory for the EFI memory map
56 * @num_entries: Number of entries in the allocated map.
1db91035 57 * @data: efi memmap installation parameters
20b1e22d
NS
58 *
59 * Depending on whether mm_init() has already been invoked or not,
60 * either memblock or "normal" page allocation is used.
61 *
db01ea88 62 * Returns zero on success, a negative error code on failure.
20b1e22d 63 */
1db91035
DW
64int __init efi_memmap_alloc(unsigned int num_entries,
65 struct efi_memory_map_data *data)
20b1e22d 66{
1db91035
DW
67 /* Expect allocation parameters are zero initialized */
68 WARN_ON(data->phys_map || data->size);
69
70 data->size = num_entries * efi.memmap.desc_size;
71 data->desc_version = efi.memmap.desc_version;
72 data->desc_size = efi.memmap.desc_size;
73 data->flags &= ~(EFI_MEMMAP_SLAB | EFI_MEMMAP_MEMBLOCK);
74 data->flags |= efi.memmap.flags & EFI_MEMMAP_LATE;
75
76 if (slab_is_available()) {
77 data->flags |= EFI_MEMMAP_SLAB;
78 data->phys_map = __efi_memmap_alloc_late(data->size);
79 } else {
80 data->flags |= EFI_MEMMAP_MEMBLOCK;
81 data->phys_map = __efi_memmap_alloc_early(data->size);
82 }
20b1e22d 83
1db91035
DW
84 if (!data->phys_map)
85 return -ENOMEM;
86 return 0;
20b1e22d 87}
60863c0d
MF
88
89/**
90 * __efi_memmap_init - Common code for mapping the EFI memory map
91 * @data: EFI memory map data
60863c0d
MF
92 *
93 * This function takes care of figuring out which function to use to
94 * map the EFI memory map in efi.memmap based on how far into the boot
95 * we are.
96 *
26c0e44a
DW
97 * During bootup EFI_MEMMAP_LATE in data->flags should be clear since we
98 * only have access to the early_memremap*() functions as the vmalloc
99 * space isn't setup. Once the kernel is fully booted we can fallback
100 * to the more robust memremap*() API.
60863c0d
MF
101 *
102 * Returns zero on success, a negative error code on failure.
103 */
1db91035 104static int __init __efi_memmap_init(struct efi_memory_map_data *data)
60863c0d
MF
105{
106 struct efi_memory_map map;
107 phys_addr_t phys_map;
108
109 if (efi_enabled(EFI_PARAVIRT))
110 return 0;
111
112 phys_map = data->phys_map;
113
26c0e44a 114 if (data->flags & EFI_MEMMAP_LATE)
60863c0d
MF
115 map.map = memremap(phys_map, data->size, MEMREMAP_WB);
116 else
117 map.map = early_memremap(phys_map, data->size);
118
119 if (!map.map) {
120 pr_err("Could not map the memory map!\n");
121 return -ENOMEM;
122 }
123
f0ef6523
DW
124 /* NOP if data->flags & (EFI_MEMMAP_MEMBLOCK | EFI_MEMMAP_SLAB) == 0 */
125 efi_memmap_free();
126
60863c0d
MF
127 map.phys_map = data->phys_map;
128 map.nr_map = data->size / data->desc_size;
129 map.map_end = map.map + data->size;
130
131 map.desc_version = data->desc_version;
132 map.desc_size = data->desc_size;
26c0e44a 133 map.flags = data->flags;
60863c0d
MF
134
135 set_bit(EFI_MEMMAP, &efi.flags);
136
137 efi.memmap = map;
138
139 return 0;
140}
141
142/**
143 * efi_memmap_init_early - Map the EFI memory map data structure
144 * @data: EFI memory map data
145 *
146 * Use early_memremap() to map the passed in EFI memory map and assign
147 * it to efi.memmap.
148 */
149int __init efi_memmap_init_early(struct efi_memory_map_data *data)
150{
151 /* Cannot go backwards */
26c0e44a 152 WARN_ON(efi.memmap.flags & EFI_MEMMAP_LATE);
60863c0d 153
26c0e44a
DW
154 data->flags = 0;
155 return __efi_memmap_init(data);
60863c0d
MF
156}
157
158void __init efi_memmap_unmap(void)
159{
33412b86
AB
160 if (!efi_enabled(EFI_MEMMAP))
161 return;
162
26c0e44a 163 if (!(efi.memmap.flags & EFI_MEMMAP_LATE)) {
60863c0d
MF
164 unsigned long size;
165
166 size = efi.memmap.desc_size * efi.memmap.nr_map;
167 early_memunmap(efi.memmap.map, size);
168 } else {
169 memunmap(efi.memmap.map);
170 }
171
172 efi.memmap.map = NULL;
173 clear_bit(EFI_MEMMAP, &efi.flags);
174}
175
176/**
177 * efi_memmap_init_late - Map efi.memmap with memremap()
178 * @phys_addr: Physical address of the new EFI memory map
179 * @size: Size in bytes of the new EFI memory map
180 *
181 * Setup a mapping of the EFI memory map using ioremap_cache(). This
182 * function should only be called once the vmalloc space has been
183 * setup and is therefore not suitable for calling during early EFI
184 * initialise, e.g. in efi_init(). Additionally, it expects
185 * efi_memmap_init_early() to have already been called.
186 *
187 * The reason there are two EFI memmap initialisation
188 * (efi_memmap_init_early() and this late version) is because the
189 * early EFI memmap should be explicitly unmapped once EFI
190 * initialisation is complete as the fixmap space used to map the EFI
191 * memmap (via early_memremap()) is a scarce resource.
192 *
193 * This late mapping is intended to persist for the duration of
194 * runtime so that things like efi_mem_desc_lookup() and
195 * efi_mem_attributes() always work.
196 *
197 * Returns zero on success, a negative error code on failure.
198 */
199int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size)
200{
201 struct efi_memory_map_data data = {
202 .phys_map = addr,
203 .size = size,
26c0e44a 204 .flags = EFI_MEMMAP_LATE,
60863c0d
MF
205 };
206
207 /* Did we forget to unmap the early EFI memmap? */
208 WARN_ON(efi.memmap.map);
209
210 /* Were we already called? */
26c0e44a 211 WARN_ON(efi.memmap.flags & EFI_MEMMAP_LATE);
60863c0d
MF
212
213 /*
214 * It makes no sense to allow callers to register different
215 * values for the following fields. Copy them out of the
216 * existing early EFI memmap.
217 */
218 data.desc_version = efi.memmap.desc_version;
219 data.desc_size = efi.memmap.desc_size;
220
26c0e44a 221 return __efi_memmap_init(&data);
60863c0d
MF
222}
223
c45f4da3
MF
224/**
225 * efi_memmap_install - Install a new EFI memory map in efi.memmap
1db91035 226 * @ctx: map allocation parameters (address, size, flags)
c45f4da3
MF
227 *
228 * Unlike efi_memmap_init_*(), this function does not allow the caller
229 * to switch from early to late mappings. It simply uses the existing
230 * mapping function and installs the new memmap.
231 *
232 * Returns zero on success, a negative error code on failure.
233 */
1db91035 234int __init efi_memmap_install(struct efi_memory_map_data *data)
c45f4da3 235{
c45f4da3
MF
236 efi_memmap_unmap();
237
1db91035 238 return __efi_memmap_init(data);
c45f4da3
MF
239}
240
60863c0d
MF
241/**
242 * efi_memmap_split_count - Count number of additional EFI memmap entries
243 * @md: EFI memory descriptor to split
244 * @range: Address range (start, end) to split around
245 *
246 * Returns the number of additional EFI memmap entries required to
1df4d172 247 * accommodate @range.
60863c0d
MF
248 */
249int __init efi_memmap_split_count(efi_memory_desc_t *md, struct range *range)
250{
251 u64 m_start, m_end;
252 u64 start, end;
253 int count = 0;
254
255 start = md->phys_addr;
256 end = start + (md->num_pages << EFI_PAGE_SHIFT) - 1;
257
258 /* modifying range */
259 m_start = range->start;
260 m_end = range->end;
261
262 if (m_start <= start) {
263 /* split into 2 parts */
264 if (start < m_end && m_end < end)
265 count++;
266 }
267
268 if (start < m_start && m_start < end) {
269 /* split into 3 parts */
270 if (m_end < end)
271 count += 2;
272 /* split into 2 parts */
273 if (end <= m_end)
274 count++;
275 }
276
277 return count;
278}
279
280/**
281 * efi_memmap_insert - Insert a memory region in an EFI memmap
282 * @old_memmap: The existing EFI memory map structure
283 * @buf: Address of buffer to store new map
284 * @mem: Memory map entry to insert
285 *
286 * It is suggested that you call efi_memmap_split_count() first
287 * to see how large @buf needs to be.
288 */
289void __init efi_memmap_insert(struct efi_memory_map *old_memmap, void *buf,
290 struct efi_mem_range *mem)
291{
292 u64 m_start, m_end, m_attr;
293 efi_memory_desc_t *md;
294 u64 start, end;
295 void *old, *new;
296
297 /* modifying range */
298 m_start = mem->range.start;
299 m_end = mem->range.end;
300 m_attr = mem->attribute;
301
92dc3350
MF
302 /*
303 * The EFI memory map deals with regions in EFI_PAGE_SIZE
304 * units. Ensure that the region described by 'mem' is aligned
305 * correctly.
306 */
307 if (!IS_ALIGNED(m_start, EFI_PAGE_SIZE) ||
308 !IS_ALIGNED(m_end + 1, EFI_PAGE_SIZE)) {
309 WARN_ON(1);
310 return;
311 }
312
60863c0d
MF
313 for (old = old_memmap->map, new = buf;
314 old < old_memmap->map_end;
315 old += old_memmap->desc_size, new += old_memmap->desc_size) {
316
317 /* copy original EFI memory descriptor */
318 memcpy(new, old, old_memmap->desc_size);
319 md = new;
320 start = md->phys_addr;
321 end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1;
322
323 if (m_start <= start && end <= m_end)
324 md->attribute |= m_attr;
325
326 if (m_start <= start &&
327 (start < m_end && m_end < end)) {
328 /* first part */
329 md->attribute |= m_attr;
330 md->num_pages = (m_end - md->phys_addr + 1) >>
331 EFI_PAGE_SHIFT;
332 /* latter part */
333 new += old_memmap->desc_size;
334 memcpy(new, old, old_memmap->desc_size);
335 md = new;
336 md->phys_addr = m_end + 1;
337 md->num_pages = (end - md->phys_addr + 1) >>
338 EFI_PAGE_SHIFT;
339 }
340
341 if ((start < m_start && m_start < end) && m_end < end) {
342 /* first part */
343 md->num_pages = (m_start - md->phys_addr) >>
344 EFI_PAGE_SHIFT;
345 /* middle part */
346 new += old_memmap->desc_size;
347 memcpy(new, old, old_memmap->desc_size);
348 md = new;
349 md->attribute |= m_attr;
350 md->phys_addr = m_start;
351 md->num_pages = (m_end - m_start + 1) >>
352 EFI_PAGE_SHIFT;
353 /* last part */
354 new += old_memmap->desc_size;
355 memcpy(new, old, old_memmap->desc_size);
356 md = new;
357 md->phys_addr = m_end + 1;
358 md->num_pages = (end - m_end) >>
359 EFI_PAGE_SHIFT;
360 }
361
362 if ((start < m_start && m_start < end) &&
363 (end <= m_end)) {
364 /* first part */
365 md->num_pages = (m_start - md->phys_addr) >>
366 EFI_PAGE_SHIFT;
367 /* latter part */
368 new += old_memmap->desc_size;
369 memcpy(new, old, old_memmap->desc_size);
370 md = new;
371 md->phys_addr = m_start;
372 md->num_pages = (end - md->phys_addr + 1) >>
373 EFI_PAGE_SHIFT;
374 md->attribute |= m_attr;
375 }
376 }
377}
This page took 0.531415 seconds and 4 git commands to generate.